[
  {
    "path": ".config/nextest.toml",
    "content": "# Profiles to fail after a timeout, but continue with the other tests \n[profile.quick-10]\nslow-timeout = { period = \"10s\", terminate-after = 1 }\nfail-fast = false\n\n[profile.quick-30]\nslow-timeout = { period = \"30s\", terminate-after = 1 }\nfail-fast = false\n\n[profile.quick-60]\nslow-timeout = { period = \"60s\", terminate-after = 1 }\nfail-fast = false\n"
  },
  {
    "path": ".gitattributes",
    "content": "**/*.asm linguist-language=Rust\n**/*.pil linguist-language=Rust\n"
  },
  {
    "path": ".github/actions/init-testing-instance/action.yml",
    "content": "name: \"Init testing instance\"\ndescription: \"Initialises a testing instance with all required tools and fetches the precomputed tests archive named `tests_archive_cpu`\"\n\nruns:\n  using: \"composite\"\n  steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Download build artifacts (CPU)\n        uses: actions/download-artifact@v4\n        with:\n          name: tests_archive_cpu\n      - name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)\n        shell: bash\n        run: rustup toolchain install nightly-2025-05-14 --component clippy,rustfmt,rust-src\n      - name: Install riscv target\n        shell: bash\n        run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-10-01\n      - name: Install test dependencies\n        shell: bash\n        run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld\n      - name: Install Rust deps\n        shell: bash\n        run: rustup install nightly-2025-10-01 --component rust-src\n      - name: Install Rust deps\n        shell: bash\n        run: rustup install nightly-2025-02-14 --component rust-src\n      - name: Install OpenVM guest toolchain (nightly-2025-08-02)\n        shell: bash\n        run: |\n          rustup toolchain install nightly-2025-08-02\n          rustup component add rust-src --toolchain nightly-2025-08-02\n      - uses: taiki-e/install-action@nextest"
  },
  {
    "path": ".github/actions/init-testing-instance-gpu/action.yml",
    "content": "name: \"Init testing instance (GPU)\"\ndescription: \"Initialises a testing instance with all required tools and fetches the precomputed tests archive named `tests_archive_gpu`\"\n\nruns:\n  using: \"composite\"\n  steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Download build artifacts (GPU)\n        uses: actions/download-artifact@v4\n        with:\n          name: tests_archive_gpu\n      - name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)\n        shell: bash\n        run: rustup toolchain install nightly-2025-05-14 --component clippy,rustfmt,rust-src\n      - name: Install riscv target\n        shell: bash\n        run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-10-01\n      # TODO: runner on our GPU server has no sudo access, so we manually installed these; uncomment these once we have proper runners\n      # - name: Install test dependencies\n      #  shell: bash\n      #  run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld\n      - name: Install Rust deps\n        shell: bash\n        run: rustup install nightly-2025-10-01 --component rust-src\n      - name: Install Rust deps\n        shell: bash\n        run: rustup install nightly-2025-02-14 --component rust-src\n      - name: Install OpenVM guest toolchain (nightly-2025-08-02)\n        shell: bash\n        run: |\n          rustup toolchain install nightly-2025-08-02\n          rustup component add rust-src --toolchain nightly-2025-08-02\n      - uses: taiki-e/install-action@nextest\n\n\n"
  },
  {
    "path": ".github/actions/patch-openvm-reth-benchmark/action.yml",
    "content": "name: \"Patch openvm-reth-benchmark\"\ndescription: \"Checks out powdr-labs/openvm-reth-benchmark at a fixed ref and patches it to use local powdr crates\"\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Checkout openvm-reth-benchmark\n      uses: actions/checkout@v4\n      with:\n        repository: powdr-labs/openvm-reth-benchmark\n        # Set once here — no inputs required elsewhere\n        # Should always point to the latest main commit\n        ref: 4a697fec23cb00849039f0bcaab5432929e05b38\n        path: openvm-reth-benchmark\n\n    - name: Patch openvm-reth-benchmark to use local powdr\n      shell: bash\n      run: |\n        cd openvm-reth-benchmark\n        mkdir -p .cargo\n        cat <<'EOF' > .cargo/config.toml\n        [patch.\"https://github.com/powdr-labs/powdr.git\"]\n        powdr-openvm-riscv = { path = \"../openvm-riscv\" }\n        powdr-openvm = { path = \"../openvm\" }\n        powdr-riscv-elf = { path = \"../riscv-elf\" }\n        powdr-number = { path = \"../number\" }\n        powdr-autoprecompiles = { path = \"../autoprecompiles\" }\n        powdr-openvm-riscv-hints-circuit = { path = \"../openvm-riscv/extensions/hints-circuit\" }\n        EOF\n"
  },
  {
    "path": ".github/runner/Dockerfile",
    "content": "#\n# Runner for powdr github actions.\n# We don't automate runner token generation yet. This image should be used as follows:\n# - generate a runner token in github (valid for ~1h)\n# - build the docker image passing the token as argument:\n#   docker buildx build -t github-runner --build-arg TOKEN=THE_GENERATED_TOKEN .\n# - this will create an image already registered it with github\n# - the container will start the runner (./run.sh) by default.\n\n# this base image was taken from the Dockerfile in the github runner repo\nFROM mcr.microsoft.com/dotnet/runtime-deps:6.0-jammy AS build\n\nARG RUNNER_VERSION=2.319.1\n\nRUN apt-get update && apt install -y curl \\\n    sudo \\\n    libicu70 \\\n    liblttng-ust1 \\\n    libkrb5-3 \\\n    zlib1g \\\n    libssl3 \\\n    git \\\n    build-essential \\\n    clang-15 \\\n    nlohmann-json3-dev \\\n    libpqxx-dev \\\n    nasm \\\n    libgmp-dev \\\n    uuid-dev \\\n    zstd\n\nRUN adduser --disabled-password --uid 1001 runner \\\n    && usermod -aG sudo runner \\\n    && echo \"%sudo   ALL=(ALL:ALL) NOPASSWD:ALL\" > /etc/sudoers \\\n    && echo \"Defaults env_keep += \\\"DEBIAN_FRONTEND\\\"\" >> /etc/sudoers\n\nUSER runner\n\nWORKDIR /home/runner\n\nRUN curl -f -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz \\\n    && tar xzf ./runner.tar.gz \\\n    && rm runner.tar.gz\n\nRUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s - -y\n\nARG TOKEN\nRUN test -n \"$TOKEN\" || (echo \"must set github runner TOKEN: --build-arg TOKEN=XXX\" && false)\n\nRUN ./config.sh --name arch-server --work work --replace --url https://github.com/powdr-labs/powdr --token ${TOKEN}\n\n# anything that should be in the PATH of the runner must be setup here\nENV PATH=\"/home/runner/.cargo/bin:$PATH\"\n\nCMD [\"./run.sh\"]\n"
  },
  {
    "path": ".github/workflows/build-cache.yml",
    "content": "name: Generate rust cache for PR builds\non:\n  workflow_dispatch:\n  schedule:\n    - cron: '0 2 * * *' # run at 2 AM UTC\n\nenv:\n    CARGO_TERM_COLOR: always\n\njobs:\n  build:\n    runs-on: warp-ubuntu-2404-x64-4x\n\n    steps:\n    - uses: actions/checkout@v4\n      with:\n        submodules: recursive\n\n    - name: Save date of cache build\n      run: mkdir target && date -R -u > target/cache-build-date.txt\n    - name: Save commit hash of cache build\n      run: git rev-parse HEAD > target/cache-commit-hash.txt\n\n    ##### The block below is shared between cache build and PR build workflows #####\n    - name: Install Rust toolchain nightly-2025-10-01 (with clippy and rustfmt)\n      run: rustup toolchain install nightly-2025-10-01 --component clippy,rustfmt\n    - name: Install Rust toolchain\n      run: rustup toolchain install nightly-2025-02-14 --component rust-src\n    - name: Install Rust toolchain 1.90 (stable)\n      run: rustup toolchain install 1.90\n    - name: Set cargo to perform shallow clones\n      run: echo \"CARGO_NET_GIT_FETCH_WITH_CLI=true\" >> $GITHUB_ENV\n    - name: Format\n      run: cargo fmt --all --check --verbose\n    - name: Cargo check with Rust 1.90 (default features)\n      run: cargo +1.90 check --all-targets\n    - name: Lint no default features\n      run: cargo clippy --all --all-targets --no-default-features --profile pr-tests --verbose -- -D warnings\n    - name: Build\n      run: cargo build --all-targets --features metrics --all --profile pr-tests --verbose\n    ###############################################################################\n\n    - name: Delete the old cache\n      uses: WarpBuilds/cache@v1\n      with:\n        path: |\n          ~/.cargo/registry/index/\n          ~/.cargo/registry/cache/\n          ~/.cargo/git/db/\n          target/\n          Cargo.lock\n        key: ${{ runner.os }}-cargo-pr-tests\n        delete-cache: true\n    \n    - name: ⚡ Save rust cache\n      uses: WarpBuilds/cache/save@v1\n      with:\n        path: |\n          ~/.cargo/registry/index/\n          ~/.cargo/registry/cache/\n          ~/.cargo/git/db/\n          target/\n          Cargo.lock\n        key: ${{ runner.os }}-cargo-pr-tests\n"
  },
  {
    "path": ".github/workflows/dead-links.yml",
    "content": "name: Check markdown links\non: [pull_request, merge_group]\njobs:\n  markdown-link-check:\n    runs-on: ubuntu-24.04\n    steps:\n      - uses: actions/checkout@v4\n      - uses: gaurav-nelson/github-action-markdown-link-check@v1\n        with:\n          use-quiet-mode: 'no'\n          use-verbose-mode: 'yes'\n"
  },
  {
    "path": ".github/workflows/nightly-analyze.yml",
    "content": "name: Nightly Regression Analysis\n\non:\n  workflow_dispatch:\n  workflow_run:\n    workflows: [\"Nightly tests\"]\n    types:\n      - completed\n\njobs:\n  analyze:\n    runs-on: ubuntu-latest\n    # Only run if nightly tests completed successfully or failed (not skipped/cancelled)\n    if: >-\n      ${{\n        github.event_name == 'workflow_dispatch' ||\n        github.event.workflow_run.conclusion == 'success' ||\n        github.event.workflow_run.conclusion == 'failure'\n      }}\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - name: Set up Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: '3.12'\n\n      - name: Install dependencies\n        run: pip install pandas\n\n      - name: Run regression analysis\n        id: analysis\n        run: |\n          # Run analysis and capture stdout (report) separately from stderr (logs)\n          set +e\n          python ./scripts/analyze_nightly.py --regression-threshold 2 > analysis_report.md\n          EXIT_CODE=$?\n          set -e\n\n          # Set outputs for later steps\n          echo \"exit_code=$EXIT_CODE\" >> $GITHUB_OUTPUT\n\n          # Save report as output (using delimiter for multiline)\n          {\n            echo \"report<<EOF\"\n            cat analysis_report.md\n            echo \"EOF\"\n          } >> $GITHUB_OUTPUT\n\n          # Print report to logs as well\n          cat analysis_report.md\n\n      - name: Generate job summary\n        run: cat analysis_report.md >> $GITHUB_STEP_SUMMARY\n\n      - name: Check for regressions\n        if: ${{ steps.analysis.outputs.exit_code == '1' }}\n        run: echo \"::warning::Performance regressions detected! See job summary for details.\"\n\n      - name: Check for errors\n        if: ${{ steps.analysis.outputs.exit_code == '2' }}\n        run: echo \"::warning::Errors occurred during analysis. See job summary for details.\"\n\n      - name: Send report to Matrix\n        uses: fadenb/matrix-chat-message@v0.0.6\n        with:\n          homeserver: ${{ secrets.MATRIX_HOMESERVER }}\n          token: ${{ secrets.MATRIX_ACCESS_TOKEN }}\n          channel: ${{ secrets.MATRIX_ROOM_ID }}\n          message: |\n            ${{ steps.analysis.outputs.report }}\n\n            [View workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})\n"
  },
  {
    "path": ".github/workflows/nightly-tests.yml",
    "content": "name: Nightly tests\non:\n  workflow_dispatch:\n  schedule:\n    - cron: \"0 23 * * *\" # run at 11pm UTC\n\nenv:\n  CARGO_TERM_COLOR: always\n  RUSTFLAGS: \"-C target-cpu=native\"\n  RUST_BACKTRACE: 1\n  JEMALLOC_SYS_WITH_MALLOC_CONF: \"retain:true,background_thread:true,metadata_thp:always,dirty_decay_ms:10000,muzzy_decay_ms:10000,abort_conf:true\"\n  POWDR_OPENVM_SEGMENT_DELTA: 50000\n\njobs:\n  bench:\n    runs-on: warp-ubuntu-2404-x64-4x\n    permissions:\n      contents: write\n      deployments: write\n      pull-requests: write\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: ⚡ Restore rust cache\n        id: cache\n        uses: WarpBuilds/cache/restore@v1\n        with:\n          path: |\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            target/\n            Cargo.lock\n          key: ${{ runner.os }}-cargo-pr-tests\n      - name: Install Rust toolchain nightly-2025-10-01 (with clippy and rustfmt)\n        run: rustup toolchain install nightly-2025-10-01 --component clippy,rustfmt,rust-src\n      - name: Install Rust toolchain 1.90\n        run: rustup toolchain install 1.90\n      - name: Install riscv target\n        run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-10-01\n      - name: Install test dependencies\n        run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld\n      - name: Run benchmarks\n        # we add `|| exit 1` to make sure the step fails if `cargo bench` fails\n        run: cargo bench --workspace --features \"metrics\" -- --output-format bencher | tee output.txt || exit 1\n      - name: Store benchmark result\n        uses: benchmark-action/github-action-benchmark@v1\n        with:\n          name: Benchmarks\n          tool: \"cargo\"\n          output-file-path: output.txt\n          github-token: ${{ secrets.GITHUB_TOKEN }}\n          auto-push: true\n          alert-threshold: \"120%\"\n          comment-on-alert: true\n          summary-always: true\n\n  test_apc:\n    runs-on: server-dev\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n\n      - name: ⚡ Cache rust\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-release-apc-${{ hashFiles('**/Cargo.toml') }}\n\n      - name: Build\n        run: cargo build --release -p powdr-openvm\n\n      - name: Install cargo openvm\n        # Rust 1.90 is needed by fresher versions of dependencies of cargo-openvm.\n        run: |\n          rustup toolchain install 1.90\n          cargo +1.90 install --git 'http://github.com/powdr-labs/openvm.git' --rev \"v1.4.2-powdr-rc.4\" --locked cargo-openvm\n\n      - name: Setup python venv\n        run: |\n          python3 -m venv .venv\n          source .venv/bin/activate\n          pip install -r openvm-riscv/scripts/requirements.txt\n          pip install -r autoprecompiles/scripts/requirements.txt\n\n      - name: Remove old results if present\n        run: |\n          rm -rf results\n          mkdir -p results\n\n      - name: Run guest benchmarks\n        run: |\n          source .venv/bin/activate\n          bash ./openvm-riscv/scripts/run_guest_benches.sh\n\n      - name: Patch benchmark\n        uses: ./.github/actions/patch-openvm-reth-benchmark\n\n      - name: Run reth benchmark\n        run: |\n          source .venv/bin/activate\n          cd openvm-reth-benchmark\n          RES_DIR=reth\n          mkdir -p $RES_DIR\n          echo \"export RPC_1=${{ secrets.RPC_1 }}\" >> .env\n\n          # prove with no APCs\n          ./run.sh --apc 0 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with no APCs\"\n          mv metrics.json $RES_DIR/apc000.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc000.png $RES_DIR/apc000.json > $RES_DIR/trace_cells_apc000.txt\n\n          # prove with 3 APCs\n          ./run.sh --apc 3 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 3 APCs\"\n          mv metrics.json $RES_DIR/apc003.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc003.png $RES_DIR/apc003.json > $RES_DIR/trace_cells_apc003.txt\n\n          # prove with 10 APCs\n          ./run.sh --apc 10 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 10 APCs\"\n          mv metrics.json $RES_DIR/apc010.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc010.png $RES_DIR/apc010.json > $RES_DIR/trace_cells_apc010.txt\n\n          # prove with 30 APCs\n          ./run.sh --apc 30 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 30 APCs\"\n          mv metrics.json $RES_DIR/apc030.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc030.png $RES_DIR/apc030.json > $RES_DIR/trace_cells_apc030.txt\n\n          # prove with 100 APCs, recording mem usage\n          psrecord --include-children --interval 1 --log $RES_DIR/psrecord.csv --log-format csv --plot $RES_DIR/psrecord.png \"./run.sh --apc 100 --mode prove-stark\" || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 100 APCs\"\n          mv metrics.json $RES_DIR/apc100.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc100.png $RES_DIR/apc100.json > $RES_DIR/trace_cells_apc100.txt\n\n          # The APC candidates would be the same for all runs, so just keep the last one\n          mv apcs/apc_candidates.json $RES_DIR/apc_candidates.json\n\n          python ../openvm-riscv/scripts/basic_metrics.py summary-table --csv $RES_DIR/apc000.json $RES_DIR/apc003.json $RES_DIR/apc010.json $RES_DIR/apc030.json $RES_DIR/apc100.json > $RES_DIR/basic_metrics.csv\n          python ../openvm-riscv/scripts/basic_metrics.py plot $RES_DIR/apc000.json $RES_DIR/apc003.json $RES_DIR/apc010.json $RES_DIR/apc030.json $RES_DIR/apc100.json -o $RES_DIR/proof_time_breakdown.png\n          python ../openvm-riscv/scripts/basic_metrics.py combine $RES_DIR/apc000.json $RES_DIR/apc003.json $RES_DIR/apc010.json $RES_DIR/apc030.json $RES_DIR/apc100.json > $RES_DIR/combined_metrics.json\n          python ../autoprecompiles/scripts/plot_effectiveness.py $RES_DIR/apc_candidates.json --output $RES_DIR/effectiveness.png\n\n          mv $RES_DIR ../results/\n\n      - name: Save revisions and run info\n        run: |\n          echo \"openvm-reth-benchmark: $(git -C openvm-reth-benchmark rev-parse HEAD)\" > results/run.txt\n          echo \"powdr: $(git rev-parse HEAD)\" >> results/run.txt\n          echo \"run: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}\" >> results/run.txt\n\n      - name: upload result artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: bench-results\n          path: |\n            results/*\n\n      - name: get the date/time\n        id: date\n        run: echo \"value=$(date +'%Y-%m-%d-%H%M')\" >> $GITHUB_OUTPUT\n\n      - name: Generate bench results README\n        run: |\n          python3 ./openvm-riscv/scripts/generate_bench_results_readme.py \\\n            ./results \\\n            \"${{ steps.date.outputs.value }}\" \\\n            --output ./results/readme.md\n\n      - name: commit to bench results\n        uses: peaceiris/actions-gh-pages@v4\n        with:\n          personal_token: ${{ secrets.BENCH_RESULTS_TOKEN }}\n          external_repository: powdr-labs/bench-results\n          publish_dir: ./results\n          destination_dir: results/${{ steps.date.outputs.value }}/\n          keep_files: true\n          enable_jekyll: true\n\n  test_apc_gpu:\n    runs-on: [self-hosted, gpu-shared]\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n\n      - name: ⚡ Cache rust\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-release-apc-gpu-${{ hashFiles('**/Cargo.toml') }}\n\n      - name: Install cargo openvm\n        # Rust 1.90 is needed by fresher versions of dependencies of cargo-openvm.\n        run: |\n          rustup toolchain install 1.90\n          cargo +1.90 install --git 'http://github.com/powdr-labs/openvm.git' --rev \"v1.4.2-powdr-rc.4\" --locked cargo-openvm\n\n      - name: Setup python venv\n        run: |\n          python3 -m venv .venv\n          source .venv/bin/activate\n          pip install -r openvm-riscv/scripts/requirements.txt\n          pip install -r autoprecompiles/scripts/requirements.txt\n\n      - name: Remove old results if present\n        run: |\n          rm -rf results\n          mkdir -p results\n\n      - name: Patch benchmark\n        uses: ./.github/actions/patch-openvm-reth-benchmark\n\n      - name: Run reth benchmark (GPU)\n        run: |\n          source .venv/bin/activate\n          cd openvm-reth-benchmark\n          RES_DIR=reth_gpu\n          mkdir -p $RES_DIR\n          echo \"export RPC_1=${{ secrets.RPC_1 }}\" >> .env\n\n          # prove with no APCs\n          ./run.sh --cuda --apc 0 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with no APCs\"\n          mv metrics.json $RES_DIR/apc000.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc000.png $RES_DIR/apc000.json > $RES_DIR/trace_cells_apc000.txt\n\n          # prove with 10 APCs\n          ./run.sh --cuda --apc 10 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 10 APCs\"\n          mv metrics.json $RES_DIR/apc010.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc010.png $RES_DIR/apc010.json > $RES_DIR/trace_cells_apc010.txt\n\n          # prove with 30 APCs\n          ./run.sh --cuda --apc 30 --mode prove-stark || exit 1\n          # remove apc cache to not interfere with the next runs\n          rm -rf apc-cache\n          echo \"Finished proving with 30 APCs\"\n          mv metrics.json $RES_DIR/apc030.json\n          python ../openvm-riscv/scripts/plot_trace_cells.py -o $RES_DIR/trace_cells_apc030.png $RES_DIR/apc030.json > $RES_DIR/trace_cells_apc030.txt\n\n          # The APC candidates would be the same for all runs, so just keep the last one\n          mv apcs/apc_candidates.json $RES_DIR/apc_candidates.json\n\n          python ../openvm-riscv/scripts/basic_metrics.py summary-table --csv $RES_DIR/apc000.json $RES_DIR/apc010.json $RES_DIR/apc030.json > $RES_DIR/basic_metrics.csv\n          python ../openvm-riscv/scripts/basic_metrics.py plot $RES_DIR/apc000.json $RES_DIR/apc010.json $RES_DIR/apc030.json -o $RES_DIR/proof_time_breakdown.png\n          python ../openvm-riscv/scripts/basic_metrics.py combine $RES_DIR/apc000.json $RES_DIR/apc010.json $RES_DIR/apc030.json > $RES_DIR/combined_metrics.json\n          python ../autoprecompiles/scripts/plot_effectiveness.py $RES_DIR/apc_candidates.json --output $RES_DIR/effectiveness.png\n\n          mv $RES_DIR ../results/\n\n      - name: Save revisions and run info\n        run: |\n          echo \"openvm-reth-benchmark: $(git -C openvm-reth-benchmark rev-parse HEAD)\" > results/run.txt\n          echo \"powdr: $(git rev-parse HEAD)\" >> results/run.txt\n          echo \"run: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}\" >> results/run.txt\n\n      - name: upload result artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: bench-results-gpu\n          path: |\n            results/*\n\n      - name: get the date/time\n        id: date\n        run: echo \"value=$(date +'%Y-%m-%d-%H%M')\" >> $GITHUB_OUTPUT\n\n      - name: Generate bench results README\n        run: |\n          python3 ./openvm-riscv/scripts/generate_bench_results_readme.py \\\n            ./results \\\n            \"${{ steps.date.outputs.value }}-gpu\" \\\n            --output ./results/readme.md\n\n      - name: commit to bench results\n        uses: peaceiris/actions-gh-pages@v4\n        with:\n          personal_token: ${{ secrets.BENCH_RESULTS_TOKEN }}\n          external_repository: powdr-labs/bench-results\n          publish_dir: ./results\n          destination_dir: results/${{ steps.date.outputs.value }}-gpu/\n          keep_files: true\n          enable_jekyll: true\n"
  },
  {
    "path": ".github/workflows/post-merge-tests.yml",
    "content": "name: Post-merge APC tests\non:\n  workflow_dispatch:\n  push:\n    branches:\n      - main\n    paths:\n      - \"**.rs\"\n      - \"**.toml\"\n\nenv:\n  CARGO_TERM_COLOR: always\n  RUSTFLAGS: \"-C target-cpu=native\"\n  RUST_BACKTRACE: 1\n  JEMALLOC_SYS_WITH_MALLOC_CONF: \"retain:true,background_thread:true,metadata_thp:always,dirty_decay_ms:10000,muzzy_decay_ms:10000,abort_conf:true\"\n  POWDR_OPENVM_SEGMENT_DELTA: 50000\n\njobs:\n  test_guests_apc:\n    runs-on: server-dev\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n\n      - name: ⚡ Cache rust\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-release-apc-${{ hashFiles('**/Cargo.toml') }}\n\n      - name: Build\n        run: cargo build --release -p powdr-openvm\n\n      - name: Install cargo openvm\n        # Rust 1.90 is needed by fresher versions of dependencies of cargo-openvm.\n        run: |\n          rustup toolchain install 1.90\n          cargo +1.90 install --git 'http://github.com/powdr-labs/openvm.git' --rev \"v1.4.2-powdr-rc.4\" --locked cargo-openvm\n\n      - name: Run keccak with 100 APCs\n        run: /usr/bin/time -v cargo run --bin powdr_openvm_riscv -r prove guest-keccak --input 10000 --autoprecompiles 100 --recursion\n\n      - name: Run ECC with 100 APCs\n        run: /usr/bin/time -v cargo run --bin powdr_openvm_riscv -r prove guest-ecc-powdr-affine-hint --input 20 --autoprecompiles 100 --recursion\n\n      - name: Run ecrecover with 100 APCs\n        run: /usr/bin/time -v cargo run --bin powdr_openvm_riscv -r prove guest-ecrecover --input 20 --autoprecompiles 100 --recursion\n\n      - name: Patch benchmark\n        uses: ./.github/actions/patch-openvm-reth-benchmark\n\n      - name: Run reth benchmark\n        run: |\n          cd openvm-reth-benchmark\n          RES_DIR=reth\n          mkdir -p $RES_DIR\n          echo \"export RPC_1=${{ secrets.RPC_1 }}\" >> .env\n\n          # prove with 100 APCs\n          /usr/bin/time -v ./run.sh --apc 100  --mode prove-stark || exit 1\n          echo \"Finished proving with 100 APCs\"\n\n      - name: Save revisions and run info\n        run: |\n          echo \"openvm-reth-benchmark: $(git -C openvm-reth-benchmark rev-parse HEAD)\" > run.txt\n          echo \"powdr: $(git rev-parse HEAD)\" >> run.txt\n          echo \"run: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}\" >> run.txt\n\n      - name: upload artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: bench-results\n          path: |\n            run.txt\n"
  },
  {
    "path": ".github/workflows/pr-tests-with-secrets.yml",
    "content": "name: PR tests (with secrets)\n\n# This workflow uses pull_request_target to allow external PRs to access secrets\n# after a maintainer approves the workflow run\n#\n# SECURITY NOTE: This workflow intentionally checks out untrusted code from PRs\n# to run tests with secrets. This is safe because:\n# 1. GitHub requires maintainer approval before running for external contributors\n# 2. The workflow code itself (this file) is controlled and runs from base branch\n# 3. We only run predefined build/test commands, not arbitrary PR code\n# 4. Cache poisoning risk is acceptable for these specific test jobs\non:\n  # also allow this to be run manually (so we can test changes to the workflow in a branch)\n  workflow_dispatch:\n  pull_request_target:\n    types: [opened, synchronize, reopened]\n\n# cancel any previous running workflows for the same branch\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.event.pull_request.number }}\n  cancel-in-progress: true\n\n# Minimal permissions for security\npermissions:\n  contents: read\n\nenv:\n  CARGO_TERM_COLOR: always\n  POWDR_OPENVM_SEGMENT_DELTA: 50000\n\njobs:\n  test_apc_reth_compilation:\n    runs-on: warp-ubuntu-2404-x64-8x\n\n    steps:\n      # IMPORTANT: Checkout the PR head, not the base branch\n      - uses: actions/checkout@v4\n        with:\n          ref: ${{ github.event.pull_request.head.sha }}\n          submodules: recursive\n      - name: ⚡ Cache rust\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-release-${{ hashFiles('**/Cargo.toml') }}\n      - name: Build\n        run: cargo build --release -p powdr-openvm\n\n      - name: Install cargo openvm\n        # Rust 1.90 is needed by fresher versions of dependencies of cargo-openvm.\n        run: |\n          rustup toolchain install 1.90\n          cargo +1.90 install --git 'http://github.com/powdr-labs/openvm.git' --rev \"v1.4.2-powdr-rc.4\" --locked cargo-openvm\n\n      - name: Patch benchmark\n        uses: ./.github/actions/patch-openvm-reth-benchmark\n\n      - name: Run small execution test with APCs\n        run: |\n          cd openvm-reth-benchmark\n          echo \"export RPC_1=${{ secrets.RPC_1 }}\" >> .env\n          PGO_TYPE=\"instruction\" /usr/bin/time -v ./run.sh --apc 10 --mode compile\n\n      # Check that reth commit is on main.\n      # Do that after the actual test so that the step above passes when checking that a\n      # reth PR commit works with a powdr PR.\n      - name: Verify openvm-reth-benchmark ref is on main\n        shell: bash\n        run: |\n          cd openvm-reth-benchmark\n          if [ \"$(git rev-parse --is-shallow-repository)\" = \"true\" ]; then\n            git fetch --quiet --unshallow origin main\n          else\n            git fetch --quiet origin main\n          fi\n          if ! git merge-base --is-ancestor HEAD origin/main; then\n            echo \"Pinned ref is not in origin/main history.\"\n            echo \"HEAD: $(git rev-parse HEAD)\"\n            echo \"origin/main: $(git rev-parse origin/main)\"\n            exit 1\n          fi\n\n  test_apc_reth_app_proof:\n    runs-on: warp-ubuntu-2404-x64-32x\n\n    steps:\n      # IMPORTANT: Checkout the PR head, not the base branch\n      - uses: actions/checkout@v4\n        with:\n          ref: ${{ github.event.pull_request.head.sha }}\n          submodules: recursive\n\n      - name: ⚡ Cache rust\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-release-apc-reth-app-proof-${{ hashFiles('**/Cargo.toml') }}\n\n      - name: Build\n        run: cargo build --release -p powdr-openvm\n\n      - name: Install cargo openvm\n        # Rust 1.90 is needed by fresher versions of dependencies of cargo-openvm.\n        run: |\n          rustup toolchain install 1.90\n          cargo +1.90 install --git 'http://github.com/powdr-labs/openvm.git' --rev \"v1.4.2-powdr-rc.4\" --locked cargo-openvm\n\n      - name: Setup python venv\n        run: |\n          python3 -m venv .venv\n          source .venv/bin/activate\n          pip install -r openvm-riscv/scripts/requirements.txt\n          pip install -r autoprecompiles/scripts/requirements.txt\n\n      - name: Patch benchmark\n        uses: ./.github/actions/patch-openvm-reth-benchmark\n\n      - name: Run reth benchmark\n        run: |\n          source .venv/bin/activate\n          cd openvm-reth-benchmark\n          RES_DIR=reth\n          mkdir -p $RES_DIR\n          echo \"export RPC_1=${{ secrets.RPC_1 }}\" >> .env\n\n          # prove with 3 APCs\n          APC=3 ./run.sh --mode prove-app || exit 1\n          echo \"Finished proving with 3 APCs\"\n"
  },
  {
    "path": ".github/workflows/pr-tests.yml",
    "content": "name: PR tests\n\non:\n  workflow_dispatch:\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n  merge_group:\n  push:\n    branches:\n      - main\n\n# cancel any previous running workflows for the same branch\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}\n\nenv:\n  CARGO_TERM_COLOR: always\n  POWDR_OPENVM_SEGMENT_DELTA: 50000\n\njobs:\n  build_cpu:\n    runs-on: warp-ubuntu-2404-x64-8x\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n          submodules: recursive\n      - name: ⚡ Restore rust cache\n        id: cache\n        uses: WarpBuilds/cache/restore@v1\n        with:\n          path: |\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            target/\n            Cargo.lock\n          key: ${{ runner.os }}-cargo-pr-tests\n      - name: Date of the restored cache\n        run: cat target/cache-build-date.txt\n        continue-on-error: true\n      - name: Check out cache commit state and update mtime accordingly.\n        run: git checkout \"$(cat target/cache-commit-hash.txt || echo 'f02fd626e2bb9e46a22ea1cda96b4feb5c6bda43')\" && git ls-files -z | xargs -0 -n1 touch -d \"Fri, 18 Apr 2025 03:30:58 +0000\" && git checkout HEAD@{1}\n\n      ##### The block below is shared between cache build and PR build workflows #####\n      - name: Install Rust toolchain nightly-2025-10-01 (with clippy and rustfmt)\n        run: rustup toolchain install nightly-2025-10-01 --component clippy,rustfmt\n      - name: Install Rust toolchain 1.90 (stable)\n        run: rustup toolchain install 1.90\n      - name: Set cargo to perform shallow clones\n        run: echo \"CARGO_NET_GIT_FETCH_WITH_CLI=true\" >> $GITHUB_ENV\n      - name: Format\n        run: cargo fmt --all --check --verbose\n      - name: Cargo check with Rust 1.90 (default features)\n        run: cargo +1.90 check --all-targets\n      - name: Lint no default features\n        run: cargo clippy --all --all-targets --features metrics --profile pr-tests --verbose -- -D warnings\n      - name: Build (CPU)\n        run: cargo build --all-targets --features metrics --all --profile pr-tests --verbose\n      ###############################################################################\n\n      - uses: taiki-e/install-action@nextest\n      - name: Create tests archive (CPU)\n        run: cargo nextest archive --archive-file tests_cpu.tar.zst --cargo-profile pr-tests --workspace --no-default-features\n      - name: Upload build artifacts (CPU)\n        uses: actions/upload-artifact@v4\n        with:\n          name: tests_archive_cpu\n          path: |\n            tests_cpu.tar.zst\n\n  test_quick_cpu:\n    needs: build_cpu\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        test:\n          - \"1\"\n          - \"2\"\n          - \"3\"\n          - \"4\"\n          - \"5\"\n          - \"6\"\n          - \"7\"\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Init testing instance\n        uses: ./.github/actions/init-testing-instance\n      - name: Run default tests\n        run: cargo nextest run --archive-file tests_cpu.tar.zst --workspace-remap . --verbose --partition count:\"${{ matrix.test }}\"/7 --no-tests=warn\n\n  test_medium_cpu:\n    needs: build_cpu\n    runs-on: warp-ubuntu-2404-x64-16x\n    strategy:\n      matrix:\n        test:\n          - \"1\"\n          - \"2\"\n          - \"3\"\n          - \"4\"\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Init testing instance\n        uses: ./.github/actions/init-testing-instance\n      - name: Run medium tests (ignored tests except large ones)\n        run: cargo nextest run --archive-file tests_cpu.tar.zst --workspace-remap . --verbose --partition count:\"${{ matrix.test }}\"/7 --test-threads=4 -E 'not (test(_large))' --run-ignored only --no-tests=warn\n\n  test_large_cpu:\n    needs: build_cpu\n    runs-on: warp-ubuntu-2404-x64-32x\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Init testing instance\n        uses: ./.github/actions/init-testing-instance\n      - name: Run large tests\n        run: cargo nextest run --archive-file tests_cpu.tar.zst --workspace-remap . --verbose -E 'test(_large)' --run-ignored only --no-tests=warn\n\n  udeps_cpu:\n    runs-on: ubuntu-22.04\n\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n\n      - name: Install nightly toolchain\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly\n          override: true\n\n      - name: Install cargo-udeps\n        run: cargo install cargo-udeps --locked\n      - name: Run cargo-udeps (CPU)\n        run: cargo udeps --all-targets\n\n  # NOTE: test_apc_reth_compilation has been moved to pr-tests-with-secrets.yml\n  # This job requires secrets.RPC_1 and uses pull_request_target to work with external PRs\n\n  build_gpu:\n    if: github.event.pull_request.draft != true\n    runs-on: [self-hosted, gpu-shared]\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n          submodules: recursive\n\n      - name: Install Rust toolchain nightly-2025-10-01 (with clippy and rustfmt)\n        run: rustup toolchain install nightly-2025-10-01 --component clippy,rustfmt\n      - name: Install Rust toolchain 1.90 (stable)\n        run: rustup toolchain install 1.90\n      - name: Set cargo to perform shallow clones\n        run: echo \"CARGO_NET_GIT_FETCH_WITH_CLI=true\" >> $GITHUB_ENV\n      - name: Format\n        run: cargo fmt --all --check --verbose\n      - name: Cargo check with Rust 1.90 (all features)\n        run: cargo +1.90 check --all-targets\n      - name: Lint no default features\n        run: cargo clippy --all --all-targets --features cuda,metrics,aot --profile pr-tests --verbose -- -D warnings\n      - name: Build (GPU)\n        run: cargo build --all-targets --features cuda,metrics,aot --all --profile pr-tests --verbose\n      - uses: taiki-e/install-action@nextest\n      - name: Create tests archive (GPU, features=cuda)\n        run: cargo nextest archive --archive-file tests_gpu.tar.zst --cargo-profile pr-tests --workspace --package powdr-openvm-riscv --features cuda\n      - name: Upload build artifacts (GPU)\n        uses: actions/upload-artifact@v4\n        with:\n          name: tests_archive_gpu\n          path: |\n            tests_gpu.tar.zst\n\n  test_quick_gpu:\n    if: github.event.pull_request.draft != true\n    needs: build_gpu\n    runs-on: [self-hosted, gpu-shared]\n    timeout-minutes: 30\n    # TODO: we only have one runner on our GPU server, so can't partition yet; uncomment these once we have proper runners\n    # strategy:\n    #   matrix:\n    #     test:\n    #       - \"1\"\n    #       - \"2\"\n    #       - \"3\"\n    #       - \"4\"\n    #       - \"5\"\n    #       - \"6\"\n    #       - \"7\"\n\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: recursive\n      - name: Init testing instance (GPU)\n        uses: ./.github/actions/init-testing-instance-gpu\n      - name: Run quick GPU tests from powdr-openvm-riscv only\n        run: cargo nextest run --archive-file tests_gpu.tar.zst --workspace-remap . --verbose --no-tests=warn\n        # run: cargo nextest run --archive-file tests_gpu.tar.zst --workspace-remap . --verbose --partition count:\"${{ matrix.test }}\"/7 --no-tests=warn\n\n  # NOTE: test_apc_reth_app_proof has been moved to pr-tests-with-secrets.yml\n  # This job requires secrets.RPC_1 and uses pull_request_target to work with external PRs\n\n"
  },
  {
    "path": ".gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n\n# Cargo configuration\n/.cargo/\n\n# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries\n# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html\nCargo.lock\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\ncargo_target/\nriscv/tests/riscv_data/**/target\n"
  },
  {
    "path": "CLAUDE.md",
    "content": "# CLAUDE.md\n\nThis file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.\n\n## Project Overview\n\npowdr is a zkVM enhancement toolkit that provides autoprecompiles (automated synthesis of guest-specific precompiles) and a constraint solver. The codebase is tightly integrated with OpenVM and stark-backend from powdr-labs forks, providing autoprecompiles for the RISC-V zkVM OpenVM.\n\n## Build Commands\n\n```bash\n# Build the workspace (CPU)\ncargo build --all-targets --features metrics\n\n# Build with GPU support\ncargo build --all-targets --features cuda,metrics\n\n# Check compilation\ncargo check --all-targets\n\n# Format code\ncargo fmt --all\n\n# Lint\ncargo clippy --all --all-targets --features metrics -- -D warnings\n```\n\n## Testing\n\nAlways use `--release` for test runs. Use the `quick-10` profile as the default — it times out tests after 10s and continues. Check which tests timed out; only re-run them individually (without the quick profile) if you have good reason to believe your diff could affect them. All tests are run on CI anyway.\n\n```bash\n# Run all tests (default, with 10s timeout per test)\ncargo nextest run --release --profile quick-10\n\n# Run a single test\ncargo nextest run --release <test_name>\n\n# Run ignored (longer) tests\ncargo nextest run --release --run-ignored only\n\n# Run only large tests\ncargo nextest run --release -E 'test(_large)' --run-ignored only\n\n# Run tests in specific package\ncargo nextest run --release -p powdr-openvm\n\n# Available quick profiles (timeout per test, slow tests deprioritized):\n#   --profile quick-10   (10s timeout, good default)\n#   --profile quick-30   (30s timeout)\n#   --profile quick-60   (60s timeout)\n```\n\n## CLI Usage\n\nThe main CLI is `powdr_openvm_riscv` (in `cli-openvm-riscv/`):\n\n```bash\n# Compile a guest program with autoprecompiles\ncargo run -p cli-openvm -- compile guest-keccak --autoprecompiles 10 --pgo instruction --input 100\n\n# Execute a compiled program\ncargo run -p cli-openvm -- execute guest-keccak --autoprecompiles 10 --input 100\n\n# Prove (generate ZK proof)\ncargo run -p cli-openvm -- prove guest-keccak --autoprecompiles 1 --input 10\n\n# Mock prove (debug mode, verifies constraints without full proof)\ncargo run -p cli-openvm -- prove guest-keccak --mock --autoprecompiles 1 --input 10\n```\n\n## Architecture\n\n### Core Crates\n\n- **autoprecompiles** (`autoprecompiles/`): The main precompile synthesis engine. Analyzes basic blocks of agnostic assembly instructions and synthesizes optimized circuits (APCs - Autoprecompiles). Key modules:\n  - `optimizer.rs`: Constraint optimization pipeline\n  - `constraint_optimizer.rs`: Eliminates redundant constraints\n  - `symbolic_machine_generator.rs`: Converts instruction sequences to symbolic machines\n  - `pgo/`: Profile-guided optimization for APC selection\n\n- **constraint-solver** (`constraint-solver/`): Algebraic constraint analysis and solving. Provides:\n  - `grouped_expression.rs`: Expression representation for efficient manipulation\n  - `indexed_constraint_system.rs`: Efficient constraint system indexing\n  - `range_constraint.rs`: Range analysis for variables\n  - `inliner.rs`: Constraint inlining with degree bounds\n\n- **openvm** (`openvm/`): OpenVM integration layer. Connects powdr optimizations to the OpenVM zkVM:\n  - `customize_exe.rs`: Modifies OpenVM executables to use APCs\n  - `powdr_extension/`: OpenVM circuit extension for APCs\n  - `trace_generation.rs`: Generates execution traces for proving\n\n### Supporting Crates\n\n- **expression** (`expression/`): Core algebraic expression types (`AlgebraicExpression`, operators)\n- **number** (`number/`): Field element abstractions\n- **riscv-elf** (`riscv-elf/`): ELF file parsing for RISC-V binaries\n- **cli-openvm** (`cli-openvm/`): Command-line interface\n\n### Guest Programs\n\nExample guest programs in `openvm/guest-*` directories (keccak, sha256, ecc, pairing, etc.) are used for testing and benchmarking.\n\n## Key Concepts\n\n- **APC (Autoprecompile)**: An optimized circuit for a basic block of assembly instructions (often RISC-V)\n- **PGO (Profile-Guided Optimization)**: Uses execution profiling to select which basic blocks to optimize\n  - `PgoConfig::Cell`: Optimizes based on total cell count savings\n  - `PgoConfig::Instruction`: Optimizes based on instruction execution frequency\n- **Symbolic Machine**: Intermediate representation of constraints and bus interactions\n- **Bus Interactions**: Communication between different chips/machines in the OpenVM architecture\n\n## Coding Guidelines\n\n### Coding Style\n- Write idiomatic Rust code. Follow Rust conventions and best practices, and keep the style similar to existing code in the repository.\n- Try to minimize code, reusing existing functions and modules where possible.\n- Keep diffs small and focused. Avoid unrelated changes, unnecessary refactoring, or adding comments to unchanged code.\n- Use builder pattern with `with_*` methods for structs with optional configuration.\n\n### Before Returning to User\nAlways run these checks before claiming work is complete:\n1. Format code\n2. Check clippy\n3. Run relevant tests and / or end-to-end tests using the CLI\n\n### Git Workflow\n- Use `git push origin <branchname>`\n- Never use `git add .` - explicitly add modified files only\n\n### PR Workflow\n\nUse the GitHub CLI to interact with GitHub, for example:\n- Create PR (always use --draft): `gh pr create --repo https://github.com/powdr-labs/powdr --base main --draft --title \"...\" --body \"...\"`\n- Check CI status: `gh pr checks --repo https://github.com/powdr-labs/powdr <pr-number>`\n- View PR comments: `gh pr view --repo https://github.com/powdr-labs/powdr <pr-number> --comments`\n- View review comments on code: `gh api repos/powdr-labs/powdr/pulls/<pr-number>/comments`\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\n\nresolver = \"2\"\n\nmembers = [\n  \"number\",\n  \"constraint-solver\",\n  \"expression\",\n  \"riscv-elf\",\n  \"riscv-types\",\n  \"isa-utils\",\n  \"syscalls\",\n  \"autoprecompiles\",\n  \"openvm\",\n  \"openvm-bus-interaction-handler\",\n  \"openvm-riscv\",\n  \"cli-openvm-riscv\",\n  \"openvm-riscv/extensions/hints-guest\",\n  \"openvm-riscv/extensions/hints-transpiler\",\n  \"openvm-riscv/extensions/hints-circuit\",\n]\n\nexclude = [\"riscv-runtime\"]\n\n[workspace.package]\nversion = \"0.1.4\"\nedition = \"2021\"\nlicense = \"MIT\"\nhomepage = \"https://powdr.org\"\nrepository = \"https://github.com/powdr-labs/powdr\"\n\n[workspace.dependencies]\n# workspace crates\npowdr-constraint-solver = { path = \"./constraint-solver\", version = \"0.1.4\" }\npowdr-isa-utils = { path = \"./isa-utils\", version = \"0.1.4\" }\npowdr-expression = { path = \"./expression\", version = \"0.1.4\" }\npowdr-number = { path = \"./number\", version = \"0.1.4\" }\npowdr-riscv-elf = { path = \"./riscv-elf\", version = \"0.1.4\" }\npowdr-riscv-types = { path = \"./riscv-types\", version = \"0.1.4\" }\npowdr-syscalls = { path = \"./syscalls\", version = \"0.1.4\" }\npowdr-autoprecompiles = { path = \"./autoprecompiles\", version = \"0.1.4\" }\npowdr-openvm-riscv = { path = \"./openvm-riscv\", version = \"0.1.4\" }\npowdr-openvm-bus-interaction-handler = { path = \"./openvm-bus-interaction-handler\", version = \"0.1.4\" }\npowdr-openvm = { path = \"./openvm\", version = \"0.1.4\" }\n\npowdr-openvm-riscv-hints-guest = { path = \"./openvm-riscv/extensions/hints-guest\", version = \"0.1.4\" }\npowdr-openvm-riscv-hints-transpiler = { path = \"./openvm-riscv/extensions/hints-transpiler\", version = \"0.1.4\" }\npowdr-openvm-riscv-hints-circuit = { path = \"./openvm-riscv/extensions/hints-circuit\", version = \"0.1.4\" }\n\n# openvm\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-build = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-rv32im-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-rv32im-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-rv32im-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false }\nopenvm-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-circuit-derive = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-circuit-primitives = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-circuit-primitives-derive = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-instructions = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-instructions-derive = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-sdk = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false, features = [\n  \"parallel\",\n  \"jemalloc\",\n  \"nightly-features\",\n  \"evm-prove\",\n] }\nopenvm-ecc-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-ecc-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-keccak256-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-keccak256-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-sha256-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-sha256-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-algebra-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-algebra-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-bigint-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-bigint-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-pairing-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-pairing-transpiler = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-native-circuit = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false }\nopenvm-native-recursion = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false }\nopenvm-platform = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-custom-insn = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\n\n# stark-backend\nopenvm-stark-sdk = { git = \"https://github.com/powdr-labs/stark-backend.git\", rev = \"v1.2.2-powdr-2026-03-20\", default-features = false, features = [\n  \"parallel\",\n  \"jemalloc\",\n  \"nightly-features\",\n] }\nopenvm-stark-backend = { git = \"https://github.com/powdr-labs/stark-backend.git\", rev = \"v1.2.2-powdr-2026-03-20\", default-features = false, features = [\n  \"parallel\",\n  \"jemalloc\",\n] }\nopenvm-cuda-backend = { git = \"https://github.com/powdr-labs/stark-backend.git\", rev = \"v1.2.2-powdr-2026-03-20\", default-features = false }\nopenvm-cuda-builder = { git = \"https://github.com/powdr-labs/stark-backend.git\", rev = \"v1.2.2-powdr-2026-03-20\", default-features = false }\nopenvm-cuda-common = { git = \"https://github.com/powdr-labs/stark-backend.git\", rev = \"v1.2.2-powdr-2026-03-20\", default-features = false }\n\n# external dependencies\nnum-traits = \"0.2.19\"\nitertools = \"0.14\"\nderive_more = { version = \"2\", features = [\"full\"] }\nlog = \"0.4.27\"\nserde = \"1.0.228\"\ntracing = \"0.1.40\"\neyre = \"0.6.12\"\nserde_cbor = \"0.11.2\"\nmetrics = \"0.23.0\"\nderivative = \"2.2.0\"\nserde_json = \"^1.0.140\"\n\n# dev dependencies\ntest-log = \"0.2.19\"\npretty_assertions = \"1.4.1\"\nenv_logger = \"0.11.8\"\n\n[profile.pr-tests]\ninherits = \"dev\"\nopt-level = 3\ndebug = \"line-tables-only\"\ndebug-assertions = true\noverflow-checks = true\npanic = 'unwind'\nincremental = true         # This is true because target is cached\ncodegen-units = 256\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n\n[workspace.lints.clippy]\nprint_stdout = \"deny\"\nuninlined_format_args = \"deny\"\niter_over_hash_type = \"deny\"\n\n# Uncomment both patches below for local stark-backend and openvm.\n# The local openvm also needs to have stark-backend patched so all types match.\n# [patch.\"https://github.com/powdr-labs/stark-backend.git\"]\n# openvm-stark-sdk = { path = \"../stark-backend/crates/stark-sdk\", default-features = false }\n# openvm-stark-backend = { path = \"../stark-backend/crates/stark-backend\", default-features = false }\n# openvm-cuda-backend = { path = \"../stark-backend/crates/cuda-backend\", default-features = false }\n# openvm-cuda-builder = { path = \"../stark-backend/crates/cuda-builder\", default-features = false }\n# openvm-cuda-common = { path = \"../stark-backend/crates/cuda-common\", default-features = false }\n\n# [patch.\"https://github.com/powdr-labs/openvm.git\"]\n# openvm = { path = \"../openvm/crates/toolchain/openvm\" }\n# openvm-build = { path = \"../openvm/crates/toolchain/build\" }\n# openvm-rv32im-circuit = { path = \"../openvm/extensions/rv32im/circuit/\" }\n# openvm-rv32im-transpiler = { path = \"../openvm/extensions/rv32im/transpiler\" }\n# openvm-rv32im-guest = { path = \"../openvm/extensions/rv32im/guest\" }\n# openvm-transpiler = { path = \"../openvm/crates/toolchain/transpiler\" }\n# openvm-circuit = { path = \"../openvm/crates/vm\" }\n# openvm-circuit-derive = { path = \"../openvm/crates/vm/derive\" }\n# openvm-circuit-primitives = { path = \"../openvm/crates/circuits/primitives\" }\n# openvm-circuit-primitives-derive = { path = \"../openvm/crates/circuits/primitives/derive\" }\n# openvm-instructions = { path = \"../openvm/crates/toolchain/instructions\" }\n# openvm-instructions-derive = { path = \"../openvm/crates/toolchain/instructions/derive\" }\n# openvm-sdk = { path = \"../openvm/crates/sdk\" }\n# openvm-ecc-circuit = { path = \"../openvm/extensions/ecc/circuit\" }\n# openvm-ecc-transpiler = { path = \"../openvm/extensions/ecc/transpiler\" }\n# openvm-keccak256-circuit = { path = \"../openvm/extensions/keccak256/circuit\" }\n# openvm-keccak256-transpiler = { path = \"../openvm/extensions/keccak256/transpiler\" }\n# openvm-sha256-circuit = { path = \"../openvm/extensions/sha256/circuit\" }\n# openvm-sha256-transpiler = { path = \"../openvm/extensions/sha256/transpiler\" }\n# openvm-algebra-circuit = { path = \"../openvm/extensions/algebra/circuit\" }\n# openvm-algebra-transpiler = { path = \"../openvm/extensions/algebra/transpiler\" }\n# openvm-bigint-circuit = { path = \"../openvm/extensions/bigint/circuit\" }\n# openvm-bigint-transpiler = { path = \"../openvm/extensions/bigint/transpiler\" }\n# openvm-pairing-circuit = { path = \"../openvm/extensions/pairing/circuit\" }\n# openvm-pairing-transpiler = { path = \"../openvm/extensions/pairing/transpiler\" }\n# openvm-native-circuit = { path = \"../openvm/extensions/native/circuit\" }\n# openvm-native-recursion = { path = \"../openvm/extensions/native/recursion\" }\n# openvm-platform = { path = \"../openvm/crates/toolchain/platform\" }\n# openvm-custom-insn = { path = \"../openvm/crates/toolchain/custom_insn\" }\n"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "LICENSE-MIT",
    "content": "MIT License\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "<p align=\"center\">\n  <img src=\"assets/powdr_wires.png\" width=\"600\">\n</p>\n\n# powdr\n\n[![Matrix Chat](https://img.shields.io/badge/Matrix%20-chat-brightgreen?style=plastic&logo=matrix)](https://matrix.to/#/#powdr:matrix.org)\n[![Twitter Follow](https://img.shields.io/twitter/follow/powdr_labs?style=plastic&logo=twitter)](https://twitter.com/powdr_labs)<!-- markdown-link-check-disable-line -->\n\n> WARNING: This codebase is experimental and has not been audited. DO NOT USE FOR PRODUCTION!\n\nIf you have any questions or want to contribute, feel free to write us in our [Matrix Chat](https://matrix.to/#/#powdr:matrix.org).\n\n*powdr* provides state-of-the-art performance and security to zkVMs, enhancing them with compiler-based techniques including static analysis and formal verification.\n\nThe main components are:\n\n- [Autoprecompiles](https://www.powdr.org/blog/auto-acc-circuits): automated synthesis of guest-specific precompiles.\n- Constraint Solver: compile-time solver used to detect potential optimizations and security issues.\n- powdr-OpenVM: powdr extensions for [OpenVM](https://github.com/openvm-org/openvm/).\n\n## powdr-legacy\n\nThe previous versions of powdr are now archived in the [powdr-legacy](https://github.com/powdr-labs/powdr-legacy) repository.\nIt contains all previous crates regarding provers, powdr-asm, powdr-pil, powdrVM, stdlib circuits and RISC-V support.\n\n### Project structure\n\nFor an overview of the project structure, run:\n\n```\ncargo doc --workspace --no-deps --open\n```\n\n## Contributing\n\nUnless you explicitly state otherwise, any contribution intentionally submitted\nfor inclusion in the work by you, as defined in the Apache-2.0 license, shall be\ndual licensed as below, without any additional terms or conditions.\n\n## License\n\nThis project is licensed under either of\n\n<!-- markdown-link-check-disable -->\n- [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ([`LICENSE-APACHE`](LICENSE-APACHE))\n- [MIT license](https://opensource.org/licenses/MIT) ([`LICENSE-MIT`](LICENSE-MIT))\n<!-- markdown-link-check-enable -->\n\nat your option.\n"
  },
  {
    "path": "autoprecompile-analyzer/Claude.md",
    "content": "# APC Effectiveness Analyzer\n\nSingle-page web app for visualizing Automatic Precompile (APC) candidate effectiveness in zkVM systems. Port of `plot_effectiveness.py` from powdr.\n\n## Project Structure\n```\nindex.html          # SPA with embedded JS/CSS (~2000 lines)\nCLAUDE.md           # This file\n```\n\n## Data Format\n\nCurrent version:\n\n**Version 4** (current): Each APC is a *superblock* composed of one or more basic blocks.\n```json\n{\n  \"version\": 4,\n  \"apcs\": [{\n    \"execution_frequency\": 50000,\n    \"original_blocks\": [\n      { \"start_pc\": 12345, \"instructions\": [\"instr1\", \"instr2\"] },\n      { \"start_pc\": 12360, \"instructions\": [\"instr3\"] }\n    ],\n    \"stats\": {\n      \"before\": { \"main_columns\": 100, \"constraints\": 200, \"bus_interactions\": 50 },\n      \"after\": { \"main_columns\": 50, \"constraints\": 100, \"bus_interactions\": 25 }\n    },\n    \"width_before\": 100,\n    \"value\": 5000,\n    \"cost_before\": 1000.0,\n    \"cost_after\": 500.0,\n  }],\n  \"labels\": { \"2099200\": [\"memset\"], \"2099448\": [\"memcpy\"] }\n}\n```\n\nAll older formats are normalized to `original_blocks` on load:\n- **Versions 2 & 3**: `original_block: { start_pc, instructions }` → wrapped in a 1-element array\n- **Version 1** (no `version` field): `original_block` with `statements` → `original_blocks[0]` with `instructions`\n- **Version 0** (bare array): same as v1 without wrapper, no labels\n\n**Visualization model**: A block's identity is its `block_id` — a comma-separated list of hex PCs (e.g., `0x3000,0x3050`). `start_pc` is the first basic block's PC (used for sorting/display). Multiple blocks may share the same basic block PC.\n\n## Testing\n\nStart server:\n```bash\npython3 -m http.server 8000 &\n```\n\nTest URL with real data (~11,300 APCs):\n```\nhttp://localhost:8000/?data=https%3A%2F%2Fgithub.com%2Fpowdr-labs%2Fbench-results%2Fblob%2Fgh-pages%2Fresults%2F2026-01-27-0453%2Freth%2Fapc_candidates.json\n```\n\nVerify:\n- Data loads (GitHub URLs auto-convert to raw)\n- Bar chart shows ~3.28x mean effectiveness\n- Value-cost plot reaches ~80% savings at 1000 APCs\n- Labels table expands with function names\n- Block selection syncs across all views\n\nCache-bust: append `&_t=1` to URL.\n\n## URL Parameters\n\n```\n?data=<url>           # Data source (required to load data)\n&plot=value-cost      # Show value-cost plot (omit for default bar chart)\n&block=0x2008f8       # Select block by PC address (hex)\n```\n\nExample - jump directly to value-cost plot with a block selected:\n```\nhttp://localhost:8000/?data=<url>&plot=value-cost&block=0x200af0\n```\n\nURL updates automatically as you interact with the app, enabling easy sharing of specific views.\n\n## Development Notes\n\n**D3.js chart redraw**: Charts are fully recreated on metric switch. Ensure `.remove()` is called on exit selections to prevent memory leaks.\n\n**State persistence**: `selectedBlock` must survive metric changes. Check selection still exists in new processed data.\n\n**GitHub URL conversion**: `loadFromUrl()` has regex converting blob URLs to raw URLs. Brittle - test after GitHub URL format changes.\n\n**Grouping threshold**: Blocks <0.1% of total cells grouped as \"Other\". Hardcoded in `createChart()`.\n\n**Weighted mean**: `sum(effectiveness * traceCells) / sum(traceCells)` - weights by trace cells, not block count.\n\n### Common Errors\n- **CORS**: GitHub blob URLs must convert to raw URLs\n- **D3 selections**: Use enter/update/exit patterns; don't forget `.remove()`\n- **Event handlers**: Remove old handlers when recreating charts\n- **Test with full dataset**: ~11K items, not small test data\n"
  },
  {
    "path": "autoprecompile-analyzer/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n\n<head>\n    <meta charset=\"UTF-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n    <title>APC Effectiveness Analyzer</title>\n\n    <!-- Bootstrap CSS -->\n    <link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css\" rel=\"stylesheet\">\n    <style>\n        .copy-icon:hover { color: white !important; }\n        .copy-icon .copy-check { display: none; }\n        .copy-icon .copy-default { display: inline; }\n        .copy-icon.copied .copy-check { display: inline; }\n        .copy-icon.copied .copy-default { display: none; }\n        .copy-icon.copied { color: #7ee787 !important; }\n    </style>\n\n    <!-- D3.js -->\n    <script src=\"https://d3js.org/d3.v7.min.js\"></script>\n\n    <style>\n        body {\n            background-color: #f8f9fa;\n        }\n\n        .drop-zone {\n            border: 3px dashed #dee2e6;\n            border-radius: 10px;\n            padding: 40px;\n            text-align: center;\n            cursor: pointer;\n            transition: all 0.3s;\n            background-color: white;\n        }\n\n        .drop-zone:hover,\n        .drop-zone.dragover {\n            border-color: #0d6efd;\n            background-color: #e7f1ff;\n        }\n\n        .chart-container {\n            background-color: white;\n            border-radius: 10px;\n            padding: 20px;\n            box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);\n        }\n\n        .bar-highlight {\n            stroke: #ffc10755 !important;\n            stroke-width: 2 !important;\n        }\n\n        .bar-selected {\n            stroke: #ffc107 !important;\n            stroke-width: 4 !important;\n        }\n\n        .code-panel-container {\n            position: relative;\n        }\n\n        .sticky-label-header {\n            position: sticky;\n            top: 0;\n            z-index: 100;\n            background-color: #e3f2fd;\n            border-left: 4px solid #2196f3;\n            padding: 8px 10px;\n            font-weight: bold;\n            color: #1565c0;\n            font-family: 'Courier New', monospace;\n            box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);\n            display: none;\n            cursor: pointer;\n            transition: background-color 0.2s;\n        }\n\n        .sticky-label-header.active {\n            display: block;\n        }\n\n        .sticky-label-header:hover {\n            background-color: #bbdefb;\n        }\n\n        .code-panel {\n            font-family: 'Courier New', monospace;\n            background-color: #f8f9fa;\n            border-radius: 5px;\n            padding: 15px;\n            max-height: 500px;\n            overflow-y: auto;\n            position: relative;\n        }\n\n        .label-line {\n            background-color: #e3f2fd;\n            border-left: 4px solid #2196f3;\n            padding: 8px 10px;\n            margin: 15px 0 5px 0;\n            font-weight: bold;\n            color: #1565c0;\n        }\n\n        .label-name {\n            font-size: 11px;\n            word-break: break-all;\n            line-height: 1.4;\n        }\n\n        .labels-table {\n            width: 100%;\n            margin-top: 10px;\n            font-size: 12px;\n        }\n\n        .labels-table th {\n            cursor: pointer;\n            user-select: none;\n            padding: 8px;\n            background-color: #f8f9fa;\n            border-bottom: 2px solid #dee2e6;\n            position: relative;\n        }\n\n        .labels-table th:hover {\n            background-color: #e9ecef;\n        }\n\n        .labels-table th.sorted-asc::after {\n            content: ' ▲';\n            font-size: 10px;\n        }\n\n        .labels-table th.sorted-desc::after {\n            content: ' ▼';\n            font-size: 10px;\n        }\n\n        .labels-table td {\n            padding: 8px;\n            border-bottom: 1px solid #dee2e6;\n        }\n\n        .labels-table tbody tr {\n            cursor: pointer;\n        }\n\n        .labels-table tbody tr:hover {\n            background-color: #f8f9fa;\n        }\n\n        .label-cell {\n            max-width: 400px;\n            word-break: break-all;\n            font-family: 'Courier New', monospace;\n            font-size: 11px;\n        }\n\n        .collapsible-header {\n            cursor: pointer;\n            user-select: none;\n            display: flex;\n            align-items: center;\n            gap: 10px;\n        }\n\n        .collapsible-header:hover {\n            opacity: 0.8;\n        }\n\n        .collapse-icon {\n            transition: transform 0.3s;\n            font-size: 14px;\n        }\n\n        .collapse-icon.collapsed {\n            transform: rotate(-90deg);\n        }\n\n        .labels-table-wrapper {\n            max-height: 300px;\n            overflow-y: auto;\n            border: 1px solid #dee2e6;\n            border-radius: 5px;\n        }\n\n        .label-row {\n            position: relative;\n        }\n\n        .expand-icon {\n            display: inline-block;\n            width: 16px;\n            transition: transform 0.2s;\n            cursor: pointer;\n        }\n\n        .expand-icon.expanded {\n            transform: rotate(90deg);\n        }\n\n        .blocks-detail-row {\n            background-color: #f8f9fa;\n        }\n\n        .blocks-detail-row td:nth-child(2) {\n            padding-left: 30px !important;\n        }\n\n        .blocks-detail-row:hover {\n            background-color: #e9ecef;\n        }\n\n        .code-block {\n            border: 1px solid #dee2e6;\n            border-radius: 3px;\n            margin: 10px 0;\n            padding: 10px;\n            background-color: white;\n        }\n\n        .code-block-header {\n            font-weight: bold;\n            margin-bottom: 5px;\n            color: #495057;\n            font-size: 12px;\n        }\n\n        .code-block.selected {\n            border-color: #ffc107;\n            background-color: #fff9e6;\n            box-shadow: 0 0 5px rgba(255, 193, 7, 0.3);\n        }\n\n        .code-line {\n            margin: 2px 0;\n            padding: 2px 5px;\n            cursor: pointer;\n        }\n\n        .code-line:hover {\n            background-color: #e9ecef;\n        }\n\n        .code-line.highlighted {\n            background-color: #fff3cd;\n        }\n\n        .tooltip {\n            position: absolute;\n            text-align: left;\n            padding: 10px;\n            font: 12px sans-serif;\n            background: rgba(0, 0, 0, 0.85);\n            color: white;\n            border-radius: 5px;\n            pointer-events: none;\n            z-index: 1000;\n        }\n\n        .bar {\n            stroke: black;\n            stroke-width: 0.5;\n            opacity: 0.8;\n            cursor: pointer;\n        }\n\n        .bar:hover {\n            opacity: 1;\n        }\n\n        .mean-line {\n            stroke: red;\n            stroke-width: 2;\n            stroke-dasharray: 5, 5;\n            opacity: 0.7;\n        }\n\n        .grid line {\n            stroke: #e0e0e0;\n            stroke-opacity: 0.7;\n        }\n\n        .grid path {\n            stroke-width: 0;\n        }\n    </style>\n</head>\n\n<body>\n    <nav class=\"navbar navbar-dark bg-primary\">\n        <div class=\"container-fluid\">\n            <span class=\"navbar-brand mb-0 h1\" id=\"pageTitle\" style=\"cursor: pointer;\">APC Effectiveness Analyzer</span>\n            <span class=\"text-white small ms-auto text-end\" id=\"dataSourceDisplay\"\n                style=\"display:none; flex:1 1 auto; max-width:calc(100vw - 220px); white-space:nowrap; overflow:hidden; text-overflow:ellipsis; font-family: 'Courier New', monospace;\"></span>\n        </div>\n    </nav>\n\n    <div class=\"container mt-4\">\n        <!-- File Upload Section -->\n        <div id=\"uploadSection\" class=\"row mb-4\">\n            <div class=\"col-12\">\n                <div class=\"drop-zone\" id=\"dropZone\">\n                    <h4>Drop JSON file here or click to upload</h4>\n                    <p class=\"text-muted\">Upload APC candidates JSON file</p>\n                    <input type=\"file\" id=\"fileInput\" accept=\".json\" style=\"display: none;\">\n                </div>\n                <div class=\"mt-3\">\n                    <div class=\"input-group\">\n                        <span class=\"input-group-text\">Or paste URL:</span>\n                        <input type=\"text\" id=\"urlInput\" class=\"form-control\"\n                            placeholder=\"https://example.com/data.json or GitHub link\">\n                        <button class=\"btn btn-primary\" id=\"loadUrlBtn\">Load from URL</button>\n                    </div>\n                    <small class=\"text-muted\">Supports direct JSON URLs and GitHub file links</small>\n                </div>\n            </div>\n        </div>\n\n        <!-- Main App Section (hidden initially) -->\n        <div id=\"appSection\" style=\"display: none;\">\n            <div class=\"row\">\n                <!-- Left column: plot + labels + code -->\n                <div class=\"col-12 col-lg-9\">\n                    <div id=\"vizSection\">\n                        <div class=\"row\">\n                            <div class=\"col-12\">\n                                <div class=\"chart-container\">\n                                    <ul class=\"nav nav-pills mb-3\" id=\"chartTabs\" role=\"tablist\">\n                                        <li class=\"nav-item\" role=\"presentation\">\n                                            <button class=\"nav-link active\" id=\"tab-effectiveness\"\n                                                type=\"button\">Effectiveness by Basic Block</button>\n                                        </li>\n                                        <li class=\"nav-item\" role=\"presentation\">\n                                            <button class=\"nav-link\" id=\"tab-secondary\" type=\"button\">Saved proving cost\n                                                vs added verifier cost</button>\n                                        </li>\n                                    </ul>\n                                    <div id=\"chartTabEffectiveness\">\n                                        <div id=\"chart\"></div>\n                                    </div>\n                                    <div id=\"chartTabSecondary\" style=\"display: none;\">\n                                        <div id=\"chartValueCost\"></div>\n                                    </div>\n                                </div>\n                            </div>\n                        </div>\n\n                        <!-- Labels Summary Section -->\n                        <div class=\"row mt-4\" id=\"labelsSummarySection\" style=\"display: none;\">\n                            <div class=\"col-12\">\n                                <div class=\"chart-container\">\n                                    <div class=\"collapsible-header\" id=\"labelsHeader\">\n                                        <span class=\"collapse-icon collapsed\">▼</span>\n                                        <h5 style=\"margin: 0;\">Labels</h5>\n                                    </div>\n                                    <div id=\"labelsContent\" style=\"display: none; margin-top: 10px;\">\n                                        <div class=\"labels-table-wrapper\">\n                                            <div id=\"labelsTableContainer\"></div>\n                                        </div>\n                                    </div>\n                                </div>\n                            </div>\n                        </div>\n\n                        <!-- Code Panel Section -->\n                        <div class=\"row mt-4\">\n                            <div class=\"col-12\">\n                                <div class=\"chart-container\">\n                                    <h5>Program Code</h5>\n                                    <div class=\"code-panel-container\">\n                                        <div id=\"stickyLabelHeader\" class=\"sticky-label-header\"></div>\n                                        <div id=\"codePanel\" class=\"code-panel\">\n                                            <span class=\"text-muted\">No data loaded</span>\n                                        </div>\n                                    </div>\n                                </div>\n                            </div>\n                        </div>\n                    </div>\n                </div>\n\n                <!-- Right column: controls -->\n                <div class=\"col-12 col-lg-3\">\n                    <div id=\"infoSection\" class=\"mb-4\" style=\"display: none;\">\n                        <div class=\"chart-container\">\n                            <h5>Info</h5>\n                            <p class=\"text-muted small\" id=\"plotInfoText\" style=\"margin-bottom: 0;\">\n                            </p>\n                        </div>\n                    </div>\n\n                    <div id=\"controlsSection\" class=\"mb-4\" style=\"display: none;\">\n                        <div class=\"chart-container\">\n                            <div class=\"row g-3\">\n                                <div class=\"col-12\">\n                                    <label for=\"effectivenessType\" class=\"form-label\">Cost Metric:</label>\n                                    <select id=\"effectivenessType\" class=\"form-select\">\n                                        <option value=\"cost\" selected>Total Cost</option>\n                                        <option value=\"main_columns\">Main Columns</option>\n                                        <option value=\"constraints\">Constraints</option>\n                                        <option value=\"bus_interactions\">Bus Interactions</option>\n                                    </select>\n                                </div>\n                                <div class=\"col-12\">\n                                    <label for=\"pcSearch\" class=\"form-label\">Block ID:</label>\n                                    <div class=\"input-group\">\n                                        <input type=\"text\" id=\"pcSearch\" class=\"form-control\">\n                                        <button class=\"btn btn-primary\" id=\"pcSearchBtn\">Go</button>\n                                    </div>\n                                </div>\n                            </div>\n                        </div>\n                    </div>\n\n                    <div id=\"selectedBlockSection\" class=\"mb-4\" style=\"display: none;\">\n                        <div class=\"chart-container\">\n                            <h5>Selected Block</h5>\n                            <div class=\"mb-2\">\n                                <span id=\"codeBlockInfo\" class=\"text-muted\">Click on a bar or code line to select a\n                                    block</span>\n                            </div>\n                        </div>\n                    </div>\n                </div>\n            </div>\n        </div>\n    </div>\n\n    <script>\n        let currentData = null;\n        let currentLabels = {};\n        let chart = null;\n        let selectedBlock = null;\n        let lastData = null;\n        let lastMeanEffectiveness = null;\n\n        // File handling\n        const dropZone = document.getElementById('dropZone');\n        const fileInput = document.getElementById('fileInput');\n        const uploadSection = document.getElementById('uploadSection');\n        const appSection = document.getElementById('appSection');\n        const controlsSection = document.getElementById('controlsSection');\n        const vizSection = document.getElementById('vizSection');\n        const effectivenessType = document.getElementById('effectivenessType');\n        const pageTitle = document.getElementById('pageTitle');\n        const dataSourceDisplay = document.getElementById('dataSourceDisplay');\n        const pcSearch = document.getElementById('pcSearch');\n        const pcSearchBtn = document.getElementById('pcSearchBtn');\n        const tabEffectiveness = document.getElementById('tab-effectiveness');\n        const tabSecondary = document.getElementById('tab-secondary');\n        const chartTabEffectiveness = document.getElementById('chartTabEffectiveness');\n        const chartTabSecondary = document.getElementById('chartTabSecondary');\n        const plotInfoText = document.getElementById('plotInfoText');\n\n        // Plot point sizing (value-cost plot)\n        const POINT_RADIUS = 3;\n        const HOVER_RADIUS = 6;\n        const SELECTED_RADIUS = 6;\n\n        const DATA_DISPLAY_MAX = 70;\n        const DATA_DISPLAY_HEAD = 35;\n        const DATA_DISPLAY_TAIL = 35;\n\n        function shortenUrl(urlText) {\n            if (urlText.length <= DATA_DISPLAY_MAX) return urlText;\n            return `${urlText.slice(0, DATA_DISPLAY_HEAD)}...${urlText.slice(-DATA_DISPLAY_TAIL)}`;\n        }\n\n        function updateDataSourceDisplay(source) {\n            if (source) {\n                const displayText = shortenUrl(source.replace(/^https?:\\/\\//i, ''));\n                const href = source;\n                dataSourceDisplay.innerHTML = `Data: <a href=\"${href}\" target=\"_blank\" rel=\"noopener noreferrer\" style=\"color: white; text-decoration: underline;\">${displayText}</a>`\n                    + ` <svg class=\"copy-icon\" onclick=\"navigator.clipboard.writeText('${href.replace(/'/g, \"\\\\'\")}').then(() => { this.classList.add('copied'); this.setAttribute('title','Copied!'); setTimeout(() => { this.classList.remove('copied'); this.setAttribute('title','Copy data URL'); }, 2000); })\" `\n                    + `title=\"Copy data URL\" width=\"16\" height=\"16\" viewBox=\"0 0 16 16\" fill=\"currentColor\" style=\"margin-left:0.4rem; cursor:pointer; color:rgba(255,255,255,0.7); vertical-align:middle;\">`\n                    + `<path class=\"copy-default\" d=\"M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 010 1.5h-1.5a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-1.5a.75.75 0 011.5 0v1.5A1.75 1.75 0 019.25 16h-7.5A1.75 1.75 0 010 14.25z\"/>`\n                    + `<path class=\"copy-default\" d=\"M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0114.25 11h-7.5A1.75 1.75 0 015 9.25zm1.75-.25a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-7.5a.25.25 0 00-.25-.25z\"/>`\n                    + `<path class=\"copy-check\" d=\"M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z\"/>`\n                    + `</svg>`;\n                dataSourceDisplay.style.display = 'inline';\n            } else {\n                dataSourceDisplay.innerHTML = '';\n                dataSourceDisplay.style.display = 'none';\n            }\n        }\n\n        // Drop zone events\n        dropZone.addEventListener('click', () => fileInput.click());\n\n        dropZone.addEventListener('dragover', (e) => {\n            e.preventDefault();\n            dropZone.classList.add('dragover');\n        });\n\n        dropZone.addEventListener('dragleave', () => {\n            dropZone.classList.remove('dragover');\n        });\n\n        dropZone.addEventListener('drop', (e) => {\n            e.preventDefault();\n            dropZone.classList.remove('dragover');\n            const files = e.dataTransfer.files;\n            if (files.length > 0) {\n                handleFile(files[0]);\n            }\n        });\n\n        fileInput.addEventListener('change', (e) => {\n            if (e.target.files.length > 0) {\n                handleFile(e.target.files[0]);\n            }\n        });\n\n        // URL loading functionality\n        const urlInput = document.getElementById('urlInput');\n        const loadUrlBtn = document.getElementById('loadUrlBtn');\n\n        tabEffectiveness.addEventListener('click', () => {\n            tabEffectiveness.classList.add('active');\n            tabSecondary.classList.remove('active');\n            chartTabEffectiveness.style.display = 'block';\n            chartTabSecondary.style.display = 'none';\n            updateInfoText();\n            if (lastData) {\n                const totalBefore = lastData.reduce((sum, d) => sum + d.metric_before, 0);\n                createChart(lastData, totalBefore, lastMeanEffectiveness || 0);\n            } else if (currentData) {\n                updateVisualization();\n            }\n            const newUrl = new URL(window.location);\n            newUrl.searchParams.delete('plot');\n            window.history.replaceState({}, document.title, newUrl);\n        });\n\n        tabSecondary.addEventListener('click', () => {\n            tabSecondary.classList.add('active');\n            tabEffectiveness.classList.remove('active');\n            chartTabEffectiveness.style.display = 'none';\n            chartTabSecondary.style.display = 'block';\n            updateInfoText();\n            if (lastData) {\n                createValueCostPlot(lastData);\n            } else if (currentData) {\n                createValueCostPlot(processData());\n            }\n            const newUrl = new URL(window.location);\n            newUrl.searchParams.set('plot', 'value-cost');\n            window.history.replaceState({}, document.title, newUrl);\n        });\n        loadUrlBtn.addEventListener('click', loadFromUrl);\n        urlInput.addEventListener('keypress', (e) => {\n            if (e.key === 'Enter') {\n                loadFromUrl();\n            }\n        });\n\n        async function loadFromUrl() {\n            const url = urlInput.value.trim();\n            if (!url) {\n                alert('Please enter a URL');\n                return;\n            }\n\n            // Convert GitHub URLs to raw URLs\n            let fetchUrl = url;\n            if (url.includes('github.com') && !url.includes('raw.githubusercontent.com')) {\n                // Convert github.com/user/repo/blob/branch/file to raw.githubusercontent.com/user/repo/branch/file\n                fetchUrl = url.replace('github.com', 'raw.githubusercontent.com').replace('/blob/', '/');\n            }\n\n            try {\n                loadUrlBtn.disabled = true;\n                loadUrlBtn.textContent = 'Loading...';\n\n                const response = await fetch(fetchUrl);\n                if (!response.ok) {\n                    throw new Error(`HTTP error! status: ${response.status}`);\n                }\n\n                const text = await response.text();\n                try {\n                    const jsonData = JSON.parse(text);\n                    loadData(jsonData);\n                    updateDataSourceDisplay(url);\n                    uploadSection.style.display = 'none';\n                    appSection.style.display = 'block';\n                    controlsSection.style.display = 'block';\n                    vizSection.style.display = 'block';\n                    updateVisualization();\n\n                    // Update URL to include the data parameter\n                    const newUrl = new URL(window.location);\n                    newUrl.searchParams.set('data', url);\n                    window.history.replaceState({}, document.title, newUrl);\n                } catch (parseError) {\n                    alert('Error parsing JSON from URL: ' + parseError.message);\n                }\n            } catch (error) {\n                alert('Error loading URL: ' + error.message);\n            } finally {\n                loadUrlBtn.disabled = false;\n                loadUrlBtn.textContent = 'Load from URL';\n            }\n        }\n\n        effectivenessType.addEventListener('change', () => {\n            if (currentData) {\n                const selectedId = selectedBlock ? selectedBlock.block_id : null;\n                updateVisualization();\n\n                // Restore selection if it exists\n                if (selectedId) {\n                    const data = processData();\n                    const block = data.find(d => d.block_id === selectedId);\n                    if (block) {\n                        selectBlock(block);\n                    }\n                }\n            }\n        });\n\n        // PC search functionality\n        pcSearchBtn.addEventListener('click', searchForPC);\n        pcSearch.addEventListener('keypress', (e) => {\n            if (e.key === 'Enter') {\n                searchForPC();\n            }\n        });\n\n        function searchForPC() {\n            if (!currentData) {\n                alert('Please load data first');\n                return;\n            }\n\n            const input = pcSearch.value.trim();\n            if (!input) {\n                // Empty input means unselect\n                selectBlock(null);\n                return;\n            }\n\n            // Parse hex (0x...) or decimal\n            let pcValue;\n            if (input.toLowerCase().startsWith('0x')) {\n                pcValue = parseInt(input, 16);\n            } else {\n                pcValue = parseInt(input, 10);\n            }\n\n            if (isNaN(pcValue)) {\n                alert('Invalid PC address. Please enter a valid hex (0x...) or decimal number.');\n                return;\n            }\n\n            // Find the superblock that contains this PC in any of its basic blocks\n            // Each instruction is 4 bytes, so PC advances by 4\n            const data = processData();\n            const block = data.find(d => {\n                if (d.is_other) return false; // Skip \"Other\" grouped blocks\n                return d.original_blocks.some(b => {\n                    const endPc = b.start_pc + b.instructions.length * 4;\n                    return pcValue >= b.start_pc && pcValue < endPc;\n                });\n            });\n\n            if (block) {\n                selectBlock(block);\n            } else {\n                alert(`No block found containing PC: 0x${pcValue.toString(16)} (${pcValue})`);\n            }\n        }\n\n        pageTitle.addEventListener('click', () => {\n            currentData = null;\n            currentLabels = {};\n            selectedBlock = null;\n            uploadSection.style.display = 'block';\n            appSection.style.display = 'none';\n            controlsSection.style.display = 'none';\n            document.getElementById('selectedBlockSection').style.display = 'none';\n            document.getElementById('infoSection').style.display = 'none';\n            vizSection.style.display = 'none';\n            fileInput.value = '';\n            urlInput.value = '';\n            updateDataSourceDisplay('');\n            // Clear URL parameters when returning to upload screen\n            window.history.replaceState({}, document.title, window.location.pathname);\n        });\n\n        // Check for URL parameter on page load\n        window.addEventListener('DOMContentLoaded', () => {\n            const urlParams = new URLSearchParams(window.location.search);\n            const dataUrl = urlParams.get('url') || urlParams.get('data');\n            const plotParam = urlParams.get('plot');\n\n            if (dataUrl) {\n                urlInput.value = dataUrl;\n                loadFromUrl();\n            }\n            if (plotParam === 'value-cost') {\n                tabSecondary.click();\n            } else {\n                updateInfoText();\n            }\n        });\n\n        function handleFile(file) {\n            if (!file.name.endsWith('.json')) {\n                alert('Please upload a JSON file');\n                return;\n            }\n\n            const reader = new FileReader();\n            reader.onload = (e) => {\n                try {\n                    const jsonData = JSON.parse(e.target.result);\n                    loadData(jsonData);\n                    updateDataSourceDisplay('');\n                    uploadSection.style.display = 'none';\n                    appSection.style.display = 'block';\n                    controlsSection.style.display = 'block';\n                    vizSection.style.display = 'block';\n                    updateVisualization();\n                } catch (error) {\n                    alert('Error parsing JSON file: ' + error.message);\n                }\n            };\n            reader.readAsText(file);\n        }\n\n        function updateApcDataV1(apc) {\n            // Normalize old single-block format into original_blocks array\n            apc.original_blocks = [{\n                start_pc: apc.original_block.start_pc,\n                instructions: apc.original_block.statements,\n            }];\n            return apc;\n        }\n\n        // json is a direct list of APCs\n        function loadDataV0(jsonData) {\n            return { data: jsonData.map(updateApcDataV1), labels: {} };\n        }\n\n        // json with APCs and labels but no version\n        function loadDataV1(jsonData) {\n            return { data: jsonData.apcs.map(updateApcDataV1), labels: jsonData.labels };\n        }\n\n        // json in version 2 or 3. The difference is that the `apc_candidate_file` field doesn't exist,\n        // which is not used in the visualization anyway, so we can use the same loader for both versions.\n        function loadDataV2V3(jsonData) {\n            const apcs = jsonData.apcs.map(apc => {\n                apc.original_blocks = [apc.original_block];\n                return apc;\n            });\n            return { data: apcs, labels: jsonData.labels };\n        }\n\n        // json in version 4: original_blocks is a list of BasicBlocks (may be more than one).\n        function loadDataV4(jsonData) {\n            return { data: jsonData.apcs, labels: jsonData.labels };\n        }\n\n        function loadData(jsonData) {\n            // Handle backward compatibility with older json formats\n            let result;\n            if (Array.isArray(jsonData)) {\n                result = loadDataV0(jsonData);\n            } else if (!('version' in jsonData)) {\n                result = loadDataV1(jsonData);\n            } else {\n                switch (jsonData.version) {\n                    case 2: result = loadDataV2V3(jsonData); break;\n                    case 3: result = loadDataV2V3(jsonData); break;\n                    case 4: result = loadDataV4(jsonData); break;\n                    default: throw new Error(`Unsupported version: ${jsonData.version}`);\n                }\n            }\n            currentData = result.data;\n            currentLabels = result.labels;\n            // Show right-side panes when data is loaded\n            document.getElementById('selectedBlockSection').style.display = 'block';\n            document.getElementById('infoSection').style.display = 'block';\n            controlsSection.style.display = 'block';\n            updateInfoText();\n        }\n\n        function getMetricValues(item, effType) {\n            switch (effType) {\n                case 'cost':\n                    return { before: item.cost_before, after: item.cost_after };\n                case 'main_columns':\n                    return { before: item.stats.before.main_columns, after: item.stats.after.main_columns };\n                case 'constraints':\n                    return { before: item.stats.before.constraints, after: item.stats.after.constraints };\n                case 'bus_interactions':\n                    return { before: item.stats.before.bus_interactions, after: item.stats.after.bus_interactions };\n                default:\n                    throw new Error(`Unknown effectiveness type: ${effType}`);\n            }\n        }\n\n        function calculateEffectiveness(item, effType) {\n            const { before, after } = getMetricValues(item, effType);\n            return before / after;\n        }\n\n        function formatMetric(count) {\n            if (count >= 1e9) {\n                return (count / 1e9).toFixed(1) + 'B';\n            } else if (count >= 1e6) {\n                return (count / 1e6).toFixed(1) + 'M';\n            } else if (count >= 1e3) {\n                return (count / 1e3).toFixed(1) + 'K';\n            } else {\n                return count.toFixed(0);\n            }\n        }\n\n        function processData() {\n            const effType = effectivenessType.value;\n            const processed = currentData.map(item => {\n                const { before, after } = getMetricValues(item, effType);\n                const metric_before = before * item.execution_frequency;\n                const metric_after = after * item.execution_frequency;\n                const value = (before - after) * item.execution_frequency;\n                const density = metric_after > 0 ? value / after : 0;\n                const lastBlock = item.original_blocks[item.original_blocks.length - 1];\n                const block_pcs = item.original_blocks.map(b => b.start_pc);\n                return {\n                    start_pc: item.original_blocks[0].start_pc,\n                    block_id: block_pcs.map(pc => '0x' + pc.toString(16)).join(','),\n                    block_pcs,\n                    original_blocks: item.original_blocks,\n                    end_pc: lastBlock.start_pc + lastBlock.instructions.length * 4,\n                    effectiveness: before / after,\n                    instructions: item.original_blocks.reduce((sum, b) => sum + b.instructions.length, 0),\n                    software_version_cells: metric_before, // alias used throughout charting\n                    metric_before,\n                    metric_after,\n                    metric_before_raw: before,\n                    metric_after_raw: after,\n                    value,\n                    density,\n                    width_before: item.width_before,\n                    execution_frequency: item.execution_frequency,\n                    statements: item.original_blocks.flatMap(b => b.instructions),  // flat list for legacy uses\n                    stats_after: item.stats.after  // Keep APC stats\n                };\n            });\n\n            // Sort by metric_before (matches Python)\n            processed.sort((a, b) => b.metric_before - a.metric_before);\n\n            return processed;\n        }\n\n        function updateVisualization() {\n            const data = processData();\n            const totalBefore = data.reduce((sum, d) => sum + d.metric_before, 0);\n            const totalAfter = data.reduce((sum, d) => sum + d.metric_after, 0);\n\n            // Calculate weighted mean (weight by cost after, matches Python)\n            const meanEffectiveness = totalAfter > 0\n                ? data.reduce((sum, d) => sum + d.effectiveness * d.metric_after, 0) / totalAfter\n                : 0;\n\n            lastData = data;\n            lastMeanEffectiveness = meanEffectiveness;\n\n            // Create visualizations\n            createChart(data, totalBefore, meanEffectiveness);\n            if (chartTabSecondary.style.display !== 'none') {\n                createValueCostPlot(data);\n            }\n\n            // Show all code in the code panel\n            showAllCode();\n\n            // Create labels summary table if there are labels\n            createLabelsSummary();\n\n            // Check for block parameter in URL and select it\n            const urlParams = new URLSearchParams(window.location.search);\n            const blockParam = urlParams.get('block');\n            if (blockParam) {\n                // blockParam is a comma-separated list of hex PCs (block_id)\n                const block = data.find(d => d.block_id === blockParam);\n                if (block) {\n                    selectBlock(block);\n                }\n            }\n        }\n\n        function aggregateLabelData() {\n            const effType = effectivenessType.value;\n            const labelStats = {};\n\n            // Sort blocks by PC to process them in order\n            const sortedBlocks = [...currentData].sort((a, b) =>\n                a.original_blocks[0].start_pc - b.original_blocks[0].start_pc\n            );\n\n            // Track current active label(s)\n            let currentActiveLabels = [];\n\n            // Iterate through all blocks in PC order\n            sortedBlocks.forEach(item => {\n                // Check all basic block PCs for label matches\n                const matchingPcKey = item.original_blocks\n                    .map(b => b.start_pc.toString())\n                    .find(key => currentLabels[key] && currentLabels[key].length > 0);\n\n                // Check if this block starts a new label\n                if (matchingPcKey) {\n                    currentActiveLabels = currentLabels[matchingPcKey];\n\n                    // Initialize label stats if needed\n                    currentActiveLabels.forEach(label => {\n                        if (!labelStats[label]) {\n                            labelStats[label] = {\n                                label: label,\n                                pc: item.original_blocks[0].start_pc,\n                                blocks: [],\n                                totalTraceCells: 0,\n                                totalCostBefore: 0,\n                                totalCostAfter: 0\n                            };\n                        }\n                    });\n                }\n\n                // Assign this block to all current active labels\n                currentActiveLabels.forEach(label => {\n                    labelStats[label].blocks.push(item);\n\n                    // Add trace cells (same as x-axis in chart)\n                    labelStats[label].totalTraceCells += item.width_before * item.execution_frequency;\n\n                    // Aggregate costs based on effectiveness type\n                    switch (effType) {\n                        case 'cost':\n                            labelStats[label].totalCostBefore += item.cost_before;\n                            labelStats[label].totalCostAfter += item.cost_after;\n                            break;\n                        case 'main_columns':\n                            labelStats[label].totalCostBefore += item.stats.before.main_columns;\n                            labelStats[label].totalCostAfter += item.stats.after.main_columns;\n                            break;\n                        case 'constraints':\n                            labelStats[label].totalCostBefore += item.stats.before.constraints;\n                            labelStats[label].totalCostAfter += item.stats.after.constraints;\n                            break;\n                        case 'bus_interactions':\n                            labelStats[label].totalCostBefore += item.stats.before.bus_interactions;\n                            labelStats[label].totalCostAfter += item.stats.after.bus_interactions;\n                            break;\n                    }\n                });\n            });\n\n            // Calculate effectiveness for each label (weighted by cost)\n            const labelArray = Object.values(labelStats).map(stat => {\n                // Process and sort blocks\n                const processedBlocks = stat.blocks.map(block => ({\n                    pc: block.original_blocks[0].start_pc,\n                    block_pcs: block.original_blocks.map(b => b.start_pc),\n                    traceCells: block.width_before * block.execution_frequency,\n                    effectiveness: calculateEffectiveness(block, effType),\n                    instructions: block.original_blocks.reduce((sum, b) => sum + b.instructions.length, 0)\n                }));\n\n                // Sort blocks by the same criteria as the current label sort\n                sortBlocksInLabel(processedBlocks);\n\n                return {\n                    label: stat.label,\n                    pc: stat.pc,\n                    traceCells: stat.totalTraceCells,\n                    costBefore: stat.totalCostBefore,\n                    costAfter: stat.totalCostAfter,\n                    effectiveness: stat.totalCostBefore / stat.totalCostAfter,\n                    blockCount: stat.blocks.length,\n                    blocks: processedBlocks\n                };\n            });\n\n            return labelArray;\n        }\n\n        function createValueCostPlot(data) {\n            const container = document.getElementById('chartValueCost');\n            if (!container) return;\n            d3.select(container).selectAll('*').remove();\n\n            // Sort by density (value / cost_after)\n            const sorted = [...data].sort((a, b) => b.density - a.density);\n\n            if (sorted.length === 0) {\n                container.innerHTML = '<span class=\"text-muted\">No data available for value-cost plot.</span>';\n                return;\n            }\n\n            // Build cumulative arrays (cost scaled by execution frequency)\n            let cumulativeCost = 0;\n            let cumulativeValue = 0;\n            let cumulativeBefore = 0;\n            const series = sorted.map((d, idx) => {\n                cumulativeCost += d.metric_after_raw;\n                cumulativeValue += d.value;\n                cumulativeBefore += d.metric_before;\n                return { x: cumulativeCost, y: cumulativeValue, beforeCum: cumulativeBefore, block: d, idx };\n            });\n            const totalSoftwareCost = data.reduce((sum, d) => sum + d.metric_before, 0);\n\n            const margin = { top: 20, right: 20, bottom: 40, left: 60 };\n            const containerWidth = container.clientWidth || 300;\n            const width = containerWidth - margin.left - margin.right;\n            const height = 270 - margin.top - margin.bottom;\n\n            // Avoid log(0): start domain at min positive cost\n            const minCost = d3.min(series, d => d.x) || 1;\n            const maxCost = d3.max(series, d => d.x) || minCost;\n            const xScale = d3.scaleLog()\n                .domain([Math.max(1, minCost), maxCost])\n                .range([0, width]);\n\n            const yScale = d3.scaleLinear()\n                .domain([0, Math.max(d3.max(series, d => d.y), totalSoftwareCost)])\n                .range([height, 0]);\n\n            const svg = d3.select(container)\n                .append('svg')\n                .attr('width', width + margin.left + margin.right)\n                .attr('height', height + margin.top + margin.bottom)\n                .append('g')\n                .attr('transform', `translate(${margin.left},${margin.top})`);\n\n            // Hover guide lines\n            const guideH = svg.append('line')\n                .attr('stroke', '#bbbbbb')\n                .attr('stroke-dasharray', '4 4')\n                .attr('stroke-width', 1)\n                .style('display', 'none');\n            const guideV = svg.append('line')\n                .attr('stroke', '#bbbbbb')\n                .attr('stroke-dasharray', '4 4')\n                .attr('stroke-width', 1)\n                .style('display', 'none');\n            const guideHText = svg.append('text')\n                .style('fill', '#555')\n                .style('font-size', '11px')\n                .style('display', 'none');\n            const guideVText = svg.append('text')\n                .style('fill', '#555')\n                .style('font-size', '11px')\n                .style('display', 'none');\n\n            const line = d3.line()\n                .x(d => xScale(d.x))\n                .y(d => yScale(d.y))\n                .curve(d3.curveLinear);\n\n            // Background for deselect on click (send behind everything)\n            svg.append('rect')\n                .attr('width', width)\n                .attr('height', height)\n                .attr('fill', 'transparent')\n                .attr('pointer-events', 'all')\n                .on('click', () => selectBlock(null))\n                .lower();\n\n            svg.append('path')\n                .datum(series)\n                .attr('fill', 'none')\n                .attr('stroke', '#0d6efd')\n                .attr('stroke-width', 2)\n                .attr('d', line);\n\n            // Horizontal lines at percentages of total software cost (upper bound)\n            if (totalSoftwareCost > 0) {\n                const percentages = [0.2, 0.4, 0.6, 0.8, 1.0];\n                percentages.forEach(pct => {\n                    const yVal = totalSoftwareCost * pct;\n                    svg.append('line')\n                        .attr('x1', 0)\n                        .attr('x2', width)\n                        .attr('y1', yScale(yVal))\n                        .attr('y2', yScale(yVal))\n                        .attr('stroke', '#888')\n                        .attr('stroke-dasharray', '4 4')\n                        .attr('stroke-width', pct === 1 ? 1.5 : 1);\n\n                    const label = pct === 1\n                        ? `Software cost: ${formatMetric(yVal)}`\n                        : `${Math.round(pct * 100)}%`;\n\n                    svg.append('text')\n                        .attr('x', 5)\n                        .attr('y', yScale(yVal) - 6)\n                        .attr('text-anchor', 'start')\n                        .style('fill', '#666')\n                        .style('font-size', '11px')\n                        .text(label);\n                });\n            }\n\n            // Point labels for selected counts (powers of 10 and 3×powers of 10)\n            const refCounts = [];\n            const maxCount = series.length;\n            for (let p = 0; Math.pow(10, p) <= maxCount; p++) {\n                const pow = Math.pow(10, p);\n                if (p > 0) {\n                    refCounts.push(pow);\n                }\n                const three = 3 * pow;\n                if (three <= maxCount) refCounts.push(three);\n            }\n            const labelYOffset = 14;\n            refCounts.forEach((count, idx) => {\n                if (count > series.length || count < 1) return;\n                const point = series[count - 1];\n\n                svg.append('text')\n                    .attr('x', xScale(point.x))\n                    .attr('y', yScale(point.y) - labelYOffset)\n                    .attr('text-anchor', 'middle')\n                    .style('fill', '#555')\n                    .style('font-size', '11px')\n                    .text(`${count} APCs`);\n            });\n\n            // Tooltip\n            const tooltip = d3.select('body').append('div')\n                .attr('class', 'tooltip')\n                .style('opacity', 0);\n\n            // Secondary hover line text\n            const guideHText2 = svg.append('text')\n                .style('fill', '#555')\n                .style('font-size', '11px')\n                .style('display', 'none');\n\n            const baseColor = '#0d6efd';\n            const hoverColor = '#0b5ed7';\n            const selectedColor = '#d32f2f';\n\n            // Points\n            svg.selectAll('.value-point')\n                .data(series)\n                .enter()\n                .append('circle')\n                .attr('class', 'value-point')\n                .attr('cx', d => xScale(d.x))\n                .attr('cy', d => yScale(d.y))\n                .attr('r', POINT_RADIUS)\n                .attr('fill', baseColor)\n                .attr('stroke', baseColor)\n                .attr('stroke-width', 1)\n                .on('mouseover', function (event, d) {\n                    const isSelected = selectedBlock && !selectedBlock.is_other && selectedBlock.block_id === d.block.block_id;\n                    d3.select(this)\n                        .attr('r', isSelected ? SELECTED_RADIUS : HOVER_RADIUS)\n                        .attr('fill', isSelected ? selectedColor : hoverColor)\n                        .attr('stroke', isSelected ? selectedColor : hoverColor);\n                    tooltip.transition().duration(200).style('opacity', 0.9);\n                    tooltip.html(blockTooltipHtml(d.block))\n                        .style('left', (event.pageX + 10) + 'px')\n                        .style('top', (event.pageY + 10) + 'px');\n\n                    // Guides\n                    guideH\n                        .attr('x1', 0)\n                        .attr('x2', width)\n                        .attr('y1', yScale(d.y))\n                        .attr('y2', yScale(d.y))\n                        .style('display', 'block');\n                    guideV\n                        .attr('x1', xScale(d.x))\n                        .attr('x2', xScale(d.x))\n                        .attr('y1', 0)\n                        .attr('y2', height)\n                        .style('display', 'block');\n\n                    const pctSaved = totalSoftwareCost > 0 ? (d.y / totalSoftwareCost * 100) : 0;\n                    const pctAccel = totalSoftwareCost > 0 ? (d.beforeCum / totalSoftwareCost * 100) : 0;\n                    const reductionFactor = (100 - pctSaved) > 0 ? (100 / (100 - pctSaved)) : Infinity;\n\n                    const isFirstHalf = xScale(d.x) < (width / 2);\n                    // Horizontal labels: right side for first half, left side for second half\n                    const hLabelX = isFirstHalf ? width - 5 : 5;\n                    const hAnchor = isFirstHalf ? 'end' : 'start';\n                    guideHText\n                        .attr('x', hLabelX)\n                        .attr('y', yScale(d.y) - 6)\n                        .attr('text-anchor', hAnchor)\n                        .style('display', 'block')\n                        .text(`Accelerated prover cost: ${formatMetric(d.beforeCum)} (${pctAccel.toFixed(1)}%)`);\n\n                    guideHText2\n                        .attr('x', hLabelX)\n                        .attr('y', yScale(d.y) + 14)\n                        .attr('text-anchor', hAnchor)\n                        .style('display', 'block')\n                        .text(`Saved prover cost: ${formatMetric(d.y)} (${pctSaved.toFixed(1)}%, ${reductionFactor === Infinity ? '∞' : reductionFactor.toFixed(2) + 'x'} reduction)`);\n\n                    guideVText\n                        .attr('x', isFirstHalf ? xScale(d.x) + 4 : xScale(d.x) - 4)\n                        .attr('y', 20)\n                        .attr('text-anchor', isFirstHalf ? 'start' : 'end')\n                        .style('display', 'block')\n                        .text(`Verifier cost: ${formatMetric(d.x)} · APCs: ${d.idx + 1}`);\n                })\n                .on('mouseout', function () {\n                    tooltip.transition().duration(300).style('opacity', 0);\n                    updateValueCostPointStyles();\n                    guideH.style('display', 'none');\n                    guideV.style('display', 'none');\n                    guideHText.style('display', 'none');\n                    guideHText2.style('display', 'none');\n                    guideVText.style('display', 'none');\n                })\n                .on('click', function (event, d) {\n                    event.stopPropagation();\n                    selectBlock(d.block);\n                });\n\n            // Apply selection styling initially\n            updateValueCostPointStyles();\n\n            // Axes\n            svg.append('g')\n                .attr('transform', `translate(0,${height})`)\n                .call(d3.axisBottom(xScale).ticks(5, \"~s\"));\n\n            svg.append('g')\n                .call(d3.axisLeft(yScale).ticks(5).tickFormat(formatMetric));\n\n            // Labels\n            svg.append('text')\n                .attr('transform', `translate(${width / 2}, ${height + margin.bottom - 5})`)\n                .style('text-anchor', 'middle')\n                .text('Added verifier cost (log)');\n\n            svg.append('text')\n                .attr('transform', 'rotate(-90)')\n                .attr('y', 0 - margin.left + 15)\n                .attr('x', 0 - (height / 2))\n                .style('text-anchor', 'middle')\n                .text('Saved prover cost');\n        }\n\n        function sortBlocksInLabel(blocks) {\n            // Sort blocks by trace cells (descending) by default\n            blocks.sort((a, b) => {\n                const column = currentLabelSort.column;\n                let aVal, bVal;\n\n                switch (column) {\n                    case 'traceCells':\n                        aVal = a.traceCells;\n                        bVal = b.traceCells;\n                        break;\n                    case 'effectiveness':\n                        aVal = a.effectiveness;\n                        bVal = b.effectiveness;\n                        break;\n                    case 'pc':\n                        aVal = a.pc;\n                        bVal = b.pc;\n                        break;\n                    case 'blockCount':\n                        // For blockCount, sort by traceCells instead\n                        aVal = a.traceCells;\n                        bVal = b.traceCells;\n                        break;\n                    default:\n                        aVal = a.traceCells;\n                        bVal = b.traceCells;\n                }\n\n                if (currentLabelSort.direction === 'asc') {\n                    return aVal < bVal ? -1 : aVal > bVal ? 1 : 0;\n                } else {\n                    return aVal > bVal ? -1 : aVal < bVal ? 1 : 0;\n                }\n            });\n        }\n\n        let currentLabelSort = { column: 'traceCells', direction: 'desc' };\n\n        function createLabelsSummary() {\n            const labelsSummarySection = document.getElementById('labelsSummarySection');\n            const labelsTableContainer = document.getElementById('labelsTableContainer');\n\n            // Check if there are any labels\n            if (Object.keys(currentLabels).length === 0) {\n                labelsSummarySection.style.display = 'none';\n                return;\n            }\n\n            const labelData = aggregateLabelData();\n\n            if (labelData.length === 0) {\n                labelsSummarySection.style.display = 'none';\n                return;\n            }\n\n            labelsSummarySection.style.display = 'block';\n\n            // Set up collapsible behavior (only once)\n            const labelsHeader = document.getElementById('labelsHeader');\n            const labelsContent = document.getElementById('labelsContent');\n            const collapseIcon = labelsHeader.querySelector('.collapse-icon');\n\n            // Remove old listener if exists and add new one\n            labelsHeader.replaceWith(labelsHeader.cloneNode(true));\n            const newLabelsHeader = document.getElementById('labelsHeader');\n            const newCollapseIcon = newLabelsHeader.querySelector('.collapse-icon');\n\n            newLabelsHeader.addEventListener('click', function () {\n                const content = document.getElementById('labelsContent');\n                const icon = this.querySelector('.collapse-icon');\n\n                if (content.style.display === 'none') {\n                    content.style.display = 'block';\n                    icon.classList.remove('collapsed');\n                } else {\n                    content.style.display = 'none';\n                    icon.classList.add('collapsed');\n                }\n            });\n\n            // Sort data\n            sortLabelData(labelData, currentLabelSort.column, currentLabelSort.direction);\n\n            // Get metric name for column header\n            const effType = effectivenessType.options[effectivenessType.selectedIndex].text;\n\n            // Create table\n            let tableHtml = `\n                <table class=\"table labels-table\">\n                    <thead>\n                        <tr>\n                            <th style=\"width: 30px;\"></th>\n                            <th data-column=\"pc\">PC</th>\n                            <th data-column=\"label\">Label</th>\n                            <th data-column=\"blockCount\">Blocks</th>\n                            <th data-column=\"traceCells\">Trace Cells</th>\n                            <th data-column=\"effectiveness\">Effectiveness (${effType})</th>\n                        </tr>\n                    </thead>\n                    <tbody>\n            `;\n\n            labelData.forEach((row, idx) => {\n                const labelId = `label-${idx}`;\n                tableHtml += `\n                    <tr class=\"label-row\" data-pc=\"${row.pc}\" data-label-id=\"${labelId}\">\n                        <td><span class=\"expand-icon\">►</span></td>\n                        <td>0x${row.pc.toString(16)}</td>\n                        <td class=\"label-cell\">${escapeHtml(row.label)}</td>\n                        <td>${row.blockCount}</td>\n                        <td>${formatMetric(row.traceCells)}</td>\n                        <td>${row.effectiveness.toFixed(2)}</td>\n                    </tr>\n                `;\n\n                // Add blocks as separate rows\n                row.blocks.forEach(block => {\n                    tableHtml += `\n                        <tr class=\"blocks-detail-row\" id=\"${labelId}-detail-${block.pc}\" style=\"display: none;\" data-block-pc=\"${block.pc}\" data-label-id=\"${labelId}\">\n                            <td></td>\n                            <td>0x${block.pc.toString(16)}</td>\n                            <td>...</td>\n                            <td></td>\n                            <td>${formatMetric(block.traceCells)}</td>\n                            <td>${block.effectiveness.toFixed(2)}</td>\n                        </tr>\n                    `;\n                });\n            });\n\n            tableHtml += `\n                    </tbody>\n                </table>\n            `;\n\n            labelsTableContainer.innerHTML = tableHtml;\n\n            // Add sort indicators\n            document.querySelectorAll('.labels-table th').forEach(th => {\n                const column = th.getAttribute('data-column');\n                th.classList.remove('sorted-asc', 'sorted-desc');\n                if (column === currentLabelSort.column) {\n                    th.classList.add(`sorted-${currentLabelSort.direction}`);\n                }\n            });\n\n            // Add click handlers for sorting\n            document.querySelectorAll('.labels-table th').forEach(th => {\n                th.addEventListener('click', function () {\n                    const column = this.getAttribute('data-column');\n                    if (currentLabelSort.column === column) {\n                        // Toggle direction\n                        currentLabelSort.direction = currentLabelSort.direction === 'asc' ? 'desc' : 'asc';\n                    } else {\n                        // New column, default to descending\n                        currentLabelSort.column = column;\n                        currentLabelSort.direction = 'desc';\n                    }\n                    createLabelsSummary();\n                });\n            });\n\n            // Add click handlers for label rows (expand/collapse)\n            document.querySelectorAll('.label-row').forEach(tr => {\n                const expandIcon = tr.querySelector('.expand-icon');\n                const labelId = tr.getAttribute('data-label-id');\n\n                // Click on expand icon or first cell to expand\n                const expandCell = tr.querySelector('td:first-child');\n                expandCell.addEventListener('click', function (e) {\n                    e.stopPropagation();\n\n                    // Find all detail rows for this label\n                    const detailRows = document.querySelectorAll(`[data-label-id=\"${labelId}\"].blocks-detail-row`);\n                    const isExpanded = expandIcon.classList.contains('expanded');\n\n                    if (isExpanded) {\n                        detailRows.forEach(row => row.style.display = 'none');\n                        expandIcon.classList.remove('expanded');\n                    } else {\n                        detailRows.forEach(row => row.style.display = 'table-row');\n                        expandIcon.classList.add('expanded');\n                    }\n                });\n\n                // Click on rest of row to select first block\n                tr.addEventListener('click', function (e) {\n                    if (e.target.closest('td:first-child')) return; // Ignore if clicking expand cell\n                    const pc = parseInt(this.getAttribute('data-pc'));\n                    const data = processData();\n                    const block = data.find(d => d.block_pcs && d.block_pcs.includes(pc));\n                    if (block) {\n                        selectBlock(block);\n                    }\n                });\n            });\n\n            // Add click handlers for individual block rows\n            document.querySelectorAll('.blocks-detail-row').forEach(tr => {\n                tr.addEventListener('click', function () {\n                    const pc = parseInt(this.getAttribute('data-block-pc'));\n                    const data = processData();\n                    const block = data.find(d => d.block_pcs && d.block_pcs.includes(pc));\n                    if (block) {\n                        selectBlock(block);\n                    }\n                });\n            });\n        }\n\n        function sortLabelData(data, column, direction) {\n            data.sort((a, b) => {\n                let aVal = a[column];\n                let bVal = b[column];\n\n                // Special handling for label (case-insensitive string sort)\n                if (column === 'label') {\n                    aVal = aVal.toLowerCase();\n                    bVal = bVal.toLowerCase();\n                }\n\n                if (direction === 'asc') {\n                    return aVal < bVal ? -1 : aVal > bVal ? 1 : 0;\n                } else {\n                    return aVal > bVal ? -1 : aVal < bVal ? 1 : 0;\n                }\n            });\n        }\n\n        function blockTooltipHtml(b) {\n            if (b.is_other) {\n                return `<strong>Other (${b.count} APCs)</strong><br/>` +\n                    `Execution frequency: ${formatMetric(b.execution_frequency)}<br/>` +\n                    `Cost (software version): ${formatMetric(b.software_version_cells)}<br/>` +\n                    `Cost (accelerated): ${formatMetric(b.metric_after || 0)}<br/>` +\n                    `Effectiveness: ${b.effectiveness.toFixed(2)}<br/>` +\n                    `Verifier cost (accelerated): n/a<br/>` +\n                    `APC size (total): ${b.stats_after.main_columns || 'N/A'} cols, ${b.stats_after.bus_interactions || 'N/A'} bus, ${b.stats_after.constraints || 'N/A'} constraints`;\n            }\n            const pcsDisplay = b.block_pcs.length === 1\n                ? `0x${b.start_pc.toString(16)}`\n                : b.block_pcs.map(pc => '0x' + pc.toString(16)).join(', ');\n            return `<strong>PC: ${pcsDisplay}</strong><br/>` +\n                `Execution frequency: ${formatMetric(b.execution_frequency)}<br/>` +\n                `Instructions: ${b.instructions}<br/>` +\n                `Cost (software version): ${formatMetric(b.metric_before)}<br/>` +\n                `Cost (accelerated): ${formatMetric(b.metric_after)}<br/>` +\n                `Effectiveness: ${b.effectiveness.toFixed(2)}<br/>` +\n                `Verifier cost (accelerated): ${formatMetric(b.metric_after_raw)}<br/>` +\n                `Density (saved cost / verifier cost): ${formatMetric(b.density)}<br/>` +\n                `APC size: ${b.stats_after.main_columns} cols, ${b.stats_after.bus_interactions} bus, ${b.stats_after.constraints} constraints`;\n        }\n\n        function highlightElements(blockId, isHover = true) {\n            // Highlight corresponding bar\n            d3.selectAll('.bar')\n                .classed('bar-highlight', d => d.block_id === blockId);\n        }\n\n        function clearHighlights() {\n            d3.selectAll('.bar').classed('bar-highlight', false);\n        }\n\n        function updateBarSelection() {\n            if (!selectedBlock || selectedBlock.is_other) return;\n            d3.selectAll('.bar')\n                .classed('bar-selected', d => d.block_id === selectedBlock.block_id);\n        }\n\n        function updateInfoText() {\n            if (!plotInfoText) return;\n            const showingEffectiveness = chartTabSecondary.style.display === 'none';\n            if (showingEffectiveness) {\n                plotInfoText.innerHTML = 'The plot shows each basic block, sorted the proving cost it causes in the software execution:<br>• The <strong>width</strong> corresponds to its cost before acceleration.<br>• The <strong>height</strong> shows the factor by which the cost is reduced after acceleration.';\n            } else {\n                plotInfoText.innerHTML = 'The plot shows the trade-off between added verifier cost and saved proving cost for accelerating basic blocks:<br>• The <strong>x-axis</strong> shows the cumulative verifier cost of accelerated blocks. APCs are added by decreasing density, i.e., the saved proving cost divided by the added verification cost.<br>• The <strong>y-axis</strong> shows the cumulative saved cost achieved by accelerating those blocks.';\n            }\n        }\n\n        function updateValueCostPointStyles() {\n            const baseColor = '#0d6efd';\n            const selectedColor = '#d32f2f';\n            d3.selectAll('.value-point').each(function (d) {\n                const isSelected = selectedBlock && !selectedBlock.is_other && selectedBlock.block_id === d.block.block_id;\n                d3.select(this)\n                    .attr('fill', isSelected ? selectedColor : baseColor)\n                    .attr('stroke', isSelected ? selectedColor : baseColor)\n                    .attr('stroke-width', 1)\n                    .attr('r', isSelected ? SELECTED_RADIUS : POINT_RADIUS);\n            });\n        }\n\n        function selectBlock(blockData) {\n            // Clear previous selection\n            d3.selectAll('.bar').classed('bar-selected', false);\n\n            // Set new selection\n            selectedBlock = blockData;\n\n            // Update URL\n            const newUrl = new URL(window.location);\n            if (blockData && !blockData.is_other) {\n                // Highlight selected bar\n                d3.selectAll('.bar')\n                    .classed('bar-selected', d => d.block_id === blockData.block_id);\n\n                // Update URL with block parameter (block_id is comma-separated hex PCs)\n                newUrl.searchParams.set('block', blockData.block_id);\n\n                // Update the PC search input field with the entry PC\n                pcSearch.value = '0x' + blockData.start_pc.toString(16);\n            } else {\n                // Clear block parameter if deselecting or selecting \"Other\"\n                newUrl.searchParams.delete('block');\n\n                // Clear the PC search input field\n                pcSearch.value = '';\n            }\n            window.history.replaceState({}, document.title, newUrl);\n\n            // Update code panel selection\n            updateCodePanelSelection(blockData);\n            // Update value-cost plot point styling\n            updateValueCostPointStyles();\n            // Update bar selection styling (for re-rendered charts)\n            updateBarSelection();\n        }\n\n        function showAllCode() {\n            const codePanel = document.getElementById('codePanel');\n\n            // Get all blocks sorted by start_pc\n            const allBlocks = processData().sort((a, b) => a.start_pc - b.start_pc);\n\n            let codeHtml = '';\n            allBlocks.forEach((block, index) => {\n                if (block.is_other) return; // Skip \"Other\" grouped blocks\n\n                // Check for gap between previous superblock and current superblock\n                if (index > 0) {\n                    const prevBlock = allBlocks[index - 1];\n                    if (!prevBlock.is_other) {\n                        const prevBlockEnd = prevBlock.end_pc;\n                        const gap = block.start_pc - prevBlockEnd;\n                        const missingInstructions = gap / 4;\n\n                        if (missingInstructions > 0) {\n                            codeHtml += `\n                                <div style=\"padding: 10px; margin: 10px 0; background-color: #f0f0f0; border-left: 3px solid #999; font-style: italic; color: #666;\">\n                                    ... ${missingInstructions} instruction${missingInstructions !== 1 ? 's' : ''} not shown (0x${prevBlockEnd.toString(16)} - 0x${(block.start_pc - 4).toString(16)}) ...\n                                </div>\n                            `;\n                        }\n                    }\n                }\n\n                // Check all basic block PCs for labels\n                let labelData = '';\n                for (const basicBlock of block.original_blocks) {\n                    const pcKey = basicBlock.start_pc.toString();\n                    if (currentLabels[pcKey] && currentLabels[pcKey].length > 0) {\n                        const labels = currentLabels[pcKey];\n                        const labelsHtml = labels.map(label =>\n                            `<div class=\"label-name\">${escapeHtml(label)}</div>`\n                        ).join('');\n                        labelData = JSON.stringify(labels);\n                        codeHtml += `\n                            <div class=\"label-line\" data-pc=\"${block.start_pc}\" data-labels='${labelData}'>\n                                ${labelsHtml}\n                            </div>\n                        `;\n                        break; // Use first matching label for the sticky header\n                    }\n                }\n\n                const blockId = `block-${block.block_pcs.join('_')}`;\n                const pcsText = block.block_pcs.length === 1\n                    ? `PC: 0x${block.start_pc.toString(16)}`\n                    : `PCs: ${block.block_pcs.map(pc => '0x' + pc.toString(16)).join(', ')}`;\n                const headerText = `${pcsText} | Cost (software): ${formatMetric(block.metric_before)} | Effectiveness: ${block.effectiveness.toFixed(2)} | Instructions: ${block.instructions}`;\n\n                // Render each basic block as a sub-section within the superblock\n                const linesHtml = block.original_blocks.map((basicBlock, bbIdx) => {\n                    const subHeader = block.original_blocks.length > 1\n                        ? `<div style=\"padding: 2px 8px; font-size: 0.8em; color: #888; border-top: 1px dashed #ccc; margin-top: 2px;\">basic block 0x${basicBlock.start_pc.toString(16)}</div>`\n                        : '';\n                    const instrs = basicBlock.instructions.map((stmt, idx) =>\n                        `<div class=\"code-line\" data-pc=\"${basicBlock.start_pc}\" data-line=\"${idx}\">${escapeHtml(stmt)}</div>`\n                    ).join('');\n                    return subHeader + instrs;\n                }).join('');\n\n                codeHtml += `\n                    <div class=\"code-block\" id=\"${blockId}\" data-pc=\"${block.start_pc}\" data-labels='${labelData}'>\n                        <div class=\"code-block-header\">${headerText}</div>\n                        ${linesHtml}\n                    </div>\n                `;\n            });\n\n            codePanel.innerHTML = codeHtml;\n\n            // Add click handlers to code lines\n            document.querySelectorAll('.code-line').forEach(line => {\n                line.addEventListener('click', function () {\n                    const pc = parseInt(this.getAttribute('data-pc'));\n                    const blockData = allBlocks.find(b => b.block_pcs && b.block_pcs.includes(pc));\n                    if (blockData) {\n                        selectBlock(blockData);\n                    }\n                });\n            });\n\n            // Set up scroll listener to update sticky header\n            updateStickyLabel();\n            codePanel.removeEventListener('scroll', updateStickyLabel);\n            codePanel.addEventListener('scroll', updateStickyLabel);\n\n            // Set up click handler for sticky header\n            const stickyHeader = document.getElementById('stickyLabelHeader');\n            if (stickyHeader) {\n                stickyHeader.removeEventListener('click', handleStickyLabelClick);\n                stickyHeader.addEventListener('click', handleStickyLabelClick);\n            }\n        }\n\n        function handleStickyLabelClick() {\n            const stickyHeader = document.getElementById('stickyLabelHeader');\n            const pc = stickyHeader.getAttribute('data-pc');\n\n            if (pc) {\n                const pcValue = parseInt(pc);\n                const data = processData();\n                const block = data.find(d => d.block_pcs && d.block_pcs.includes(pcValue));\n\n                if (block) {\n                    selectBlock(block);\n                }\n            }\n        }\n\n        function updateStickyLabel() {\n            const codePanel = document.getElementById('codePanel');\n            const stickyHeader = document.getElementById('stickyLabelHeader');\n\n            if (!codePanel || !stickyHeader) return;\n\n            // Find the first visible block or label\n            const scrollTop = codePanel.scrollTop;\n            const panelTop = codePanel.getBoundingClientRect().top;\n\n            // Get all blocks and labels\n            const elements = codePanel.querySelectorAll('.label-line, .code-block');\n            let currentLabels = null;\n            let currentPc = null;\n\n            for (let i = 0; i < elements.length; i++) {\n                const element = elements[i];\n                const elementTop = element.offsetTop;\n\n                // If this element is past our scroll position, use the previous label\n                if (elementTop > scrollTop) {\n                    break;\n                }\n\n                // Check if this element has labels\n                const labelsAttr = element.getAttribute('data-labels');\n                if (labelsAttr && labelsAttr !== '\"\"' && labelsAttr !== '') {\n                    try {\n                        currentLabels = JSON.parse(labelsAttr);\n                        currentPc = element.getAttribute('data-pc');\n                    } catch (e) {\n                        // Ignore parse errors\n                    }\n                }\n            }\n\n            // Update sticky header\n            if (currentLabels && currentLabels.length > 0 && currentPc) {\n                const labelHtml = currentLabels.map(label =>\n                    `<div class=\"label-name\">${escapeHtml(label)}</div>`\n                ).join('');\n                stickyHeader.innerHTML = labelHtml;\n                stickyHeader.setAttribute('data-pc', currentPc);\n                stickyHeader.classList.add('active');\n            } else {\n                stickyHeader.classList.remove('active');\n                stickyHeader.removeAttribute('data-pc');\n            }\n        }\n\n        function updateCodePanelSelection(blockData) {\n            const codeInfo = document.getElementById('codeBlockInfo');\n\n            // Remove all previous selections\n            document.querySelectorAll('.code-block').forEach(block => {\n                block.classList.remove('selected');\n            });\n            document.querySelectorAll('.code-line').forEach(line => {\n                line.classList.remove('highlighted');\n            });\n\n            if (!blockData) {\n                codeInfo.innerHTML = '<span class=\"text-muted\">Click on a bar or code line to select a block</span>';\n                return;\n            }\n\n            codeInfo.innerHTML = blockTooltipHtml(blockData);\n\n            if (blockData.is_other) return;\n\n            // Highlight the selected superblock\n            const blockElement = document.getElementById(`block-${blockData.block_pcs.join('_')}`);\n            if (blockElement) {\n                blockElement.classList.add('selected');\n\n                // Highlight all lines in this block\n                blockElement.querySelectorAll('.code-line').forEach(line => {\n                    line.classList.add('highlighted');\n                });\n\n                // Scroll within the code panel only, not the whole page\n                const codePanel = document.getElementById('codePanel');\n                const blockTop = blockElement.offsetTop;\n                codePanel.scrollTo({ top: blockTop, behavior: 'smooth' });\n            }\n        }\n\n        function clearCode() {\n            // Only clear if there's no selected block\n            if (!selectedBlock) {\n                document.getElementById('codeBlockInfo').innerHTML = '<span class=\"text-muted\">Click on a bar or code line to select a block</span>';\n                showAllCode();\n            }\n        }\n\n        function escapeHtml(text) {\n            const div = document.createElement('div');\n            div.textContent = text;\n            return div.innerHTML;\n        }\n\n        function createChart(data, totalCells, meanEffectiveness) {\n            // Clear existing chart\n            d3.select('#chart').selectAll('*').remove();\n\n            // Set dimensions and margins\n            const margin = { top: 40, right: 120, bottom: 60, left: 80 };\n            const containerWidth = document.getElementById('chart').clientWidth || 1200;\n            const width = containerWidth - margin.left - margin.right;\n            const height = 270 - margin.top - margin.bottom;\n\n            // Group small blocks (< 0.1% threshold)\n            const threshold = totalCells * 0.001;\n            const largeBlocks = data.filter(d => d.software_version_cells >= threshold);\n            const smallBlocks = data.filter(d => d.software_version_cells < threshold);\n\n            let plotData = [...largeBlocks];\n\n            if (smallBlocks.length > 0) {\n                const otherCells = smallBlocks.reduce((sum, d) => sum + d.software_version_cells, 0);\n                const otherEffectiveness = smallBlocks.reduce((sum, d) => sum + d.effectiveness * d.software_version_cells, 0) / otherCells;\n                const otherExecFreq = smallBlocks.reduce((sum, d) => sum + d.execution_frequency, 0);\n                const otherStatsAfter = smallBlocks.reduce((acc, d) => {\n                    if (d.stats_after) {\n                        acc.main_columns = (acc.main_columns || 0) + d.stats_after.main_columns;\n                        acc.bus_interactions = (acc.bus_interactions || 0) + d.stats_after.bus_interactions;\n                        acc.constraints = (acc.constraints || 0) + d.stats_after.constraints;\n                    }\n                    return acc;\n                }, {});\n                plotData.push({\n                    effectiveness: otherEffectiveness,\n                    software_version_cells: otherCells,\n                    execution_frequency: otherExecFreq,\n                    instructions: -1,\n                    is_other: true,\n                    count: smallBlocks.length,\n                    statements: [],  // No individual statements for grouped blocks\n                    stats_after: otherStatsAfter\n                });\n            }\n\n            // Calculate positions\n            let xPos = 0;\n            plotData.forEach(d => {\n                d.x = xPos;\n                d.width = d.software_version_cells;\n                xPos += d.width;\n            });\n\n            // Create SVG\n            const svg = d3.select('#chart')\n                .append('svg')\n                .attr('width', width + margin.left + margin.right)\n                .attr('height', height + margin.top + margin.bottom)\n                .on('click', function (event) {\n                    // If clicking on the background (not a bar), deselect\n                    if (event.target.tagName === 'svg') {\n                        selectBlock(null);\n                    }\n                })\n                .append('g')\n                .attr('transform', `translate(${margin.left},${margin.top})`);\n\n            // Add background rect to capture clicks\n            svg.append('rect')\n                .attr('width', width)\n                .attr('height', height)\n                .style('fill', 'none')\n                .style('pointer-events', 'all')\n                .on('click', function (event) {\n                    event.stopPropagation();\n                    selectBlock(null);\n                });\n\n            // Create scales\n            const xScale = d3.scaleLinear()\n                .domain([0, totalCells])\n                .range([0, width]);\n\n            // Calculate 99th percentile effectiveness by trace cells\n            // Sort by effectiveness ascending\n            const sortedByEffectiveness = [...plotData].sort((a, b) => a.effectiveness - b.effectiveness);\n\n            // Find the effectiveness value at 99th percentile weighted by trace cells\n            let cumulativeCells = 0;\n            const p99Threshold = totalCells * 0.99;\n            let maxEffectivenessP99 = 0;\n\n            for (const d of sortedByEffectiveness) {\n                cumulativeCells += d.software_version_cells;\n                maxEffectivenessP99 = d.effectiveness;\n                if (cumulativeCells >= p99Threshold) {\n                    break;\n                }\n            }\n\n            // Use 99th percentile for y-axis scaling\n            const yScale = d3.scaleLinear()\n                .domain([0, maxEffectivenessP99 * 1.1])\n                .range([height, 0]);\n\n            // Color scale for instructions (log scale)\n            const validInstructions = plotData.filter(d => !d.is_other && d.instructions > 0).map(d => d.instructions);\n            const instrMin = d3.min(validInstructions) || 1;\n            const instrMax = d3.max(validInstructions) || instrMin;\n            const colorScale = d3.scaleSequentialLog()\n                .domain([instrMin, instrMax])\n                .interpolator(d3.interpolateRdYlGn);\n\n            // Add grid\n            svg.append('g')\n                .attr('class', 'grid')\n                .attr('transform', `translate(0,${height})`)\n                .call(d3.axisBottom(xScale)\n                    .tickSize(-height)\n                    .tickFormat(''));\n\n            svg.append('g')\n                .attr('class', 'grid')\n                .call(d3.axisLeft(yScale)\n                    .tickSize(-width)\n                    .tickFormat(''));\n\n            // Create tooltip\n            const tooltip = d3.select('body').append('div')\n                .attr('class', 'tooltip')\n                .style('opacity', 0);\n\n            // Add bars\n            svg.selectAll('.bar')\n                .data(plotData)\n                .enter().append('rect')\n                .attr('class', 'bar')\n                .attr('x', d => xScale(d.x))\n                .attr('y', d => Math.max(0, yScale(d.effectiveness)))\n                .attr('width', d => xScale(d.width) - xScale(0))\n                .attr('height', d => height - Math.max(0, yScale(d.effectiveness)))\n                .style('fill', d => d.is_other ? 'lightgray' : colorScale(d.instructions))\n                .on('mouseover', function (event, d) {\n                    // Show tooltip\n                    tooltip.transition()\n                        .duration(200)\n                        .style('opacity', .9);\n\n                    const content = blockTooltipHtml(d);\n\n                    tooltip.html(content)\n                        .style('left', (event.pageX - 50) + 'px')\n                        .style('top', (event.pageY + 20) + 'px');\n\n                    // Highlight elements (but don't change code on hover if something is selected)\n                    if (!d.is_other) {\n                        highlightElements(d.block_id, true);\n                    }\n                })\n                .on('mouseout', function (d) {\n                    // Hide tooltip\n                    tooltip.transition()\n                        .duration(500)\n                        .style('opacity', 0);\n\n                    // Clear highlights\n                    clearHighlights();\n                })\n                .on('click', function (event, d) {\n                    event.stopPropagation();\n                    // Select this superblock on click\n                    selectBlock(d);\n                });\n\n            // Note: Selection is handled in the selectBlock function after chart is created\n\n            // Add \"Other\" label for wide enough other blocks\n            plotData.filter(d => d.is_other && d.width > totalCells * 0.02).forEach(d => {\n                svg.append('text')\n                    .attr('x', xScale(d.x + d.width / 2))\n                    .attr('y', yScale(d.effectiveness / 2))\n                    .attr('text-anchor', 'middle')\n                    .attr('font-size', '10px')\n                    .attr('font-weight', 'bold')\n                    .text(`Other (${d.count} APCs)`);\n            });\n\n            // Add mean line\n            svg.append('line')\n                .attr('class', 'mean-line')\n                .attr('x1', 0)\n                .attr('x2', width)\n                .attr('y1', yScale(meanEffectiveness))\n                .attr('y2', yScale(meanEffectiveness));\n\n            // Add axes\n            svg.append('g')\n                .attr('transform', `translate(0,${height})`)\n                .call(d3.axisBottom(xScale)\n                    .tickFormat(d => formatMetric(d)));\n\n            svg.append('g')\n                .call(d3.axisLeft(yScale));\n\n            // Add labels\n            const effType = effectivenessType.options[effectivenessType.selectedIndex].text;\n            svg.append('text')\n                .attr('transform', 'rotate(-90)')\n                .attr('y', 0 - margin.left)\n                .attr('x', 0 - (height / 2))\n                .attr('dy', '1em')\n                .style('text-anchor', 'middle')\n                .text('Effectiveness');\n\n            svg.append('text')\n                .attr('transform', `translate(${width / 2}, ${height + margin.bottom})`)\n                .style('text-anchor', 'middle')\n                .text(`Cumulative ${effType.toLowerCase()} before (software version)`);\n\n            // Add title\n            svg.append('text')\n                .attr('x', width / 2)\n                .attr('y', 0 - margin.top / 2)\n                .attr('text-anchor', 'middle')\n                .style('font-size', '16px')\n                .style('font-weight', 'bold')\n                .text(`Effectiveness by Basic Block (reduction in ${effType})`);\n\n            // Add mean text box\n            svg.append('rect')\n                .attr('x', 5)\n                .attr('y', 5)\n                .attr('width', 80)\n                .attr('height', 25)\n                .style('fill', 'wheat')\n                .style('opacity', 0.8)\n                .style('stroke', 'gray')\n                .style('stroke-width', 1)\n                .style('rx', 3);\n\n            svg.append('text')\n                .attr('x', 45)\n                .attr('y', 22)\n                .attr('text-anchor', 'middle')\n                .style('font-size', '12px')\n                .text(`Mean: ${meanEffectiveness.toFixed(2)}`);\n\n            // Add color legend\n            if (validInstructions.length > 0) {\n                const legendWidth = 20;\n                const legendHeight = 200;\n\n                const legendScale = d3.scaleLinear()\n                    .domain([Math.log10(instrMin), Math.log10(instrMax)])\n                    .range([legendHeight, 0]);\n\n                const legendAxis = d3.axisRight(legendScale)\n                    .ticks(5)\n                    .tickFormat(d => Math.pow(10, d).toFixed(0));\n\n                const legend = svg.append('g')\n                    .attr('transform', `translate(${width + 40}, ${height / 2 - legendHeight / 2})`);\n\n                // Create gradient\n                const gradientId = 'instruction-gradient';\n                const gradient = svg.append('defs')\n                    .append('linearGradient')\n                    .attr('id', gradientId)\n                    .attr('x1', '0%')\n                    .attr('y1', '100%')\n                    .attr('x2', '0%')\n                    .attr('y2', '0%');\n\n                const steps = 20;\n                for (let i = 0; i <= steps; i++) {\n                    const t = i / steps;\n                    const value = instrMin * Math.pow(instrMax / instrMin, t);\n                    gradient.append('stop')\n                        .attr('offset', `${t * 100}%`)\n                        .style('stop-color', colorScale(value));\n                }\n\n                legend.append('rect')\n                    .attr('width', legendWidth)\n                    .attr('height', legendHeight)\n                    .style('fill', `url(#${gradientId})`);\n\n                legend.append('g')\n                    .attr('transform', `translate(${legendWidth}, 0)`)\n                    .call(legendAxis);\n\n                legend.append('text')\n                    .attr('transform', `rotate(90)`)\n                    .attr('y', -legendWidth - 30)\n                    .attr('x', legendHeight / 2)\n                    .style('text-anchor', 'middle')\n                    .style('font-size', '12px')\n                    .text('Instructions (log)');\n            }\n\n            // Apply selection styling if a block is already selected\n            updateBarSelection();\n        }\n    </script>\n</body>\n\n</html>"
  },
  {
    "path": "autoprecompiles/Cargo.toml",
    "content": "[package]\nname = \"powdr-autoprecompiles\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[dependencies]\npowdr-expression.workspace = true\npowdr-number.workspace = true\npowdr-constraint-solver.workspace = true\n\nitertools.workspace = true\nlog.workspace = true\nnum-traits.workspace = true\nserde.workspace = true\ntracing.workspace = true\ntracing-subscriber = { version = \"0.3.17\", features = [\"std\", \"env-filter\"] }\nserde_json.workspace = true\nrayon = \"1.10.0\"\nstrum = { version = \"0.27.0\", features = [\"derive\"] }\npriority-queue = \"2.7.0\"\n\nmetrics.workspace = true\ndeepsize2 = \"0.1.0\"\nderive_more.workspace = true\nderivative.workspace = true\n\n[dev-dependencies]\nexpect-test = \"1.5.1\"\nflate2 = \"1.1.2\"\npowdr-openvm-bus-interaction-handler.workspace = true\ntest-log.workspace = true\ncriterion = { version = \"0.4\", features = [\"html_reports\"] }\n\n[package.metadata.cargo-udeps.ignore]\ndevelopment = [\"env_logger\"]\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n\n[[bench]]\nname = \"optimizer_benchmark\"\nharness = false\n"
  },
  {
    "path": "autoprecompiles/benches/optimizer_benchmark.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion};\nuse powdr_autoprecompiles::{\n    bus_map::BusMap,\n    export::{ApcWithBusMap, SimpleInstruction},\n    optimizer::optimize,\n    Apc, ColumnAllocator, DegreeBound,\n};\nuse powdr_number::BabyBearField;\n\nuse powdr_openvm_bus_interaction_handler::{\n    bus_map::OpenVmBusType, memory_bus_interaction::OpenVmMemoryBusInteraction,\n    OpenVmBusInteractionHandler,\n};\n\ntype TestApc = Apc<BabyBearField, SimpleInstruction<BabyBearField>, (), ()>;\n\nconst DEFAULT_DEGREE_BOUND: DegreeBound = DegreeBound {\n    identities: 3,\n    bus_interactions: 2,\n};\n\n/// Benching the `test_optimize` test\nfn optimize_keccak_benchmark(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"optimize-keccak\");\n    group.sample_size(10);\n\n    let file = std::fs::File::open(\"tests/keccak_apc_pre_opt.json.gz\").unwrap();\n    let reader = flate2::read::GzDecoder::new(file);\n    let apc: ApcWithBusMap<TestApc, BusMap<OpenVmBusType>> =\n        serde_json::from_reader(reader).unwrap();\n\n    group.bench_function(\"optimize\", |b| {\n        b.iter_batched(\n            || {\n                (\n                    apc.apc.machine.clone(),\n                    ColumnAllocator::from_max_poly_id_of_machine(&apc.apc.machine),\n                )\n            },\n            |(machine, column_allocator)| {\n                optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n                    black_box(machine),\n                    OpenVmBusInteractionHandler::default(),\n                    DEFAULT_DEGREE_BOUND,\n                    &apc.bus_map,\n                    column_allocator,\n                    &mut Default::default(),\n                )\n                .unwrap()\n            },\n            criterion::BatchSize::SmallInput,\n        );\n    });\n    group.finish();\n}\n\ncriterion_group!(benches, optimize_keccak_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "autoprecompiles/scripts/plot_effectiveness.py",
    "content": "#!/usr/bin/env python3\n\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport argparse\n\ndef load_apc_data(json_path, effectiveness_type='cost'):\n    \"\"\"Load APC candidates and compute effectiveness.\"\"\"\n    with open(json_path, 'r') as f:\n        data = json.load(f)[\"apcs\"]\n    \n    def get_before_after_cost(item, eff_type):\n        if eff_type == 'cost':\n            return (item['cost_before'], item['cost_after'])\n        elif eff_type == 'main_columns':\n            return (item['stats']['before']['main_columns'], item['stats']['after']['main_columns'])\n        elif eff_type == 'constraints':\n            return (item['stats']['before']['constraints'], item['stats']['after']['constraints'])\n        elif eff_type == 'bus_interactions':\n            return (item['stats']['before']['bus_interactions'], item['stats']['after']['bus_interactions'])\n        else:\n            raise ValueError(f\"Unknown effectiveness type: {eff_type}\")\n        \n    rows = []\n    for item in data:\n        cost_before, cost_after = get_before_after_cost(item, effectiveness_type)\n        rows.append({\n            'start_pcs': [b['start_pc'] for b in item['original_blocks']],\n            'cost_before': cost_before * item['execution_frequency'],\n            'cost_after': cost_after * item['execution_frequency'],\n            'effectiveness': cost_before / cost_after,\n            'instructions': sum(len(b['instructions']) for b in item['original_blocks']),\n        })\n\n    return pd.DataFrame(rows)\n\ndef format_cell_count(count):\n    \"\"\"Format cell count with appropriate units.\"\"\"\n    if count >= 1e9:\n        return f\"{count/1e9:.1f}B\"\n    elif count >= 1e6:\n        return f\"{count/1e6:.1f}M\"\n    elif count >= 1e3:\n        return f\"{count/1e3:.1f}K\"\n    else:\n        return f\"{count:.0f}\"\n\ndef plot_effectiveness(json_path, filename=None, effectiveness_type='cost'):\n    \"\"\"Generate bar plot of effectiveness data.\"\"\"\n    df = load_apc_data(json_path, effectiveness_type)\n    total_cost_before = df['cost_before'].sum()\n    total_cost_after = df['cost_after'].sum()\n\n    # Print top 10 basic blocks\n    top10 = df.nlargest(10, 'cost_before')[['start_pcs', 'cost_before', 'effectiveness', 'instructions']]\n    print(top10)\n    top10['cost_before'] = top10['cost_before'].apply(format_cell_count)\n    top10.columns = ['Start PCs', 'Cost before', 'Effectiveness', 'Instructions']\n    print(f\"\\nTop 10 Basic Blocks by {effectiveness_type}:\")\n    print(top10.to_string(index=False))\n    print()\n    \n    # Calculate weighted mean effectiveness, corresponding to the overall effectiveness\n    # assuming that all basic blocks are accelerated.\n    mean_effectiveness = (df['effectiveness'] * df['cost_after']).sum() / total_cost_after\n    print(f\"Mean effectiveness: {mean_effectiveness:.2f}\")\n    \n    # Separate large and small APCs (< 0.1% threshold)\n    threshold = total_cost_before * 0.001\n    df_large = df[df['cost_before'] >= threshold].copy()\n    df_small = df[df['cost_before'] < threshold]\n    \n    # Sort large APCs by cost\n    df_large = df_large.sort_values('cost_before', ascending=False)\n    \n    # Create 'Other' entry if there are small APCs\n    if len(df_small) > 0:\n        other_cost = df_small['cost_before'].sum()\n        other_effectiveness = (df_small['effectiveness'] * df_small['cost_before']).sum() / other_cost\n        other_row = pd.DataFrame([{\n            'effectiveness': other_effectiveness,\n            'cost_before': other_cost,\n            'instructions': -1,  # Special marker for Other\n            'is_other': True\n        }])\n        df_plot = pd.concat([df_large.assign(is_other=False), other_row], ignore_index=True)\n    else:\n        df_plot = df_large.assign(is_other=False)\n    \n    # Create plot\n    fig, ax = plt.subplots(figsize=(12, 6))\n    \n    # Set up color mapping with log scale\n    valid_instructions = df_plot[~df_plot['is_other']]['instructions']\n    if len(valid_instructions) > 0:\n        norm = mcolors.LogNorm(vmin=valid_instructions.min(), vmax=valid_instructions.max())\n        cmap = plt.cm.RdYlGn  # Red-Yellow-Green colormap\n    \n    # Plot bars\n    x_pos = 0\n    for idx, row in df_plot.iterrows():\n        width = row['cost_before']\n        \n        if row.get('is_other', False):\n            color = 'lightgray'\n        else:\n            color = cmap(norm(row['instructions']))\n        \n        ax.bar(x_pos + width/2, row['effectiveness'], width=width,\n               color=color, edgecolor='black', linewidth=0.5, alpha=0.8)\n        \n        # Label 'Other' box if it's wide enough\n        if row.get('is_other', False) and width > total_cost_before * 0.02:  # Only label if > 2% of total width\n            ax.text(x_pos + width/2, row['effectiveness']/2, \n                   f'Other\\n({len(df_small)} APCs)',\n                   ha='center', va='center', fontsize=10, \n                   color='black', weight='bold')\n        \n        x_pos += width\n    \n    # Formatting\n    ax.set_xlabel('Cumulative cost before (software version)', fontsize=12)\n    ax.set_ylabel('Effectiveness', fontsize=12)\n    ax.set_title(f\"Effectiveness by Basic Block (reduction in {effectiveness_type})\", fontsize=14)\n    ax.grid(True, alpha=0.3, axis='y')\n    ax.axhline(mean_effectiveness, color='red', linestyle='--', linewidth=2, alpha=0.7)\n    \n    # Format x-axis\n    ax.set_xlim(0, total_cost_before)\n    x_ticks = ax.get_xticks()\n    ax.set_xticklabels([format_cell_count(x) for x in x_ticks])\n    \n    # Add colorbar for instruction count\n    if len(valid_instructions) > 0:\n        sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n        sm.set_array([])\n        cbar = plt.colorbar(sm, ax=ax, pad=0.02)\n        cbar.set_label('Instructions (log scale)', rotation=270, labelpad=20)\n    \n    # Add mean text\n    ax.text(0.02, 0.97, f'Mean: {mean_effectiveness:.2f}', \n            transform=ax.transAxes, fontsize=10, verticalalignment='top',\n            bbox=dict(boxstyle='round,pad=0.5', facecolor='wheat', alpha=0.8))\n    \n    plt.tight_layout()\n    \n    # Save or show\n    if filename:\n        plt.savefig(filename, dpi=300, bbox_inches='tight')\n    else:\n        plt.show()\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Plot effectiveness analysis from APC candidates JSON file.\")\n    parser.add_argument(\"json_path\", help=\"Path to the APC candidates JSON file\")\n    parser.add_argument(\"-o\", \"--output\", help=\"Optional file name to save the plot\", default=None)\n    parser.add_argument(\"-e\", \"--effectiveness\", \n                       choices=['cost', 'main_columns', 'constraints', 'bus_interactions'],\n                       default='cost',\n                       help=\"Type of effectiveness calculation (default: cost_before/cost_after)\")\n    args = parser.parse_args()\n    \n    plot_effectiveness(args.json_path, args.output, args.effectiveness)\n"
  },
  {
    "path": "autoprecompiles/scripts/rank_apc_candidates.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nSimple APC Candidates JSON Parser\n\nThis script parses the apc_candidates.json file and extracts key information\nin a concise format.\n\"\"\"\n\nimport json\nimport sys\nimport argparse\nfrom pathlib import Path\nfrom tabulate import tabulate\n\n\ndef main():\n    \"\"\"Parse APC candidates and show key information.\"\"\"\n    parser = argparse.ArgumentParser(description=\"Parse APC candidates and show key information.\")\n    parser.add_argument(\"json_file\", help=\"Path to the APC candidates JSON file\")\n    parser.add_argument(\"-o\", \"--output\", help=\"Output file (default: stdout)\", default=None)\n    args = parser.parse_args()\n    \n    json_file = Path(args.json_file)\n    output_file = args.output\n    \n    if not json_file.exists():\n        print(f\"Error: File {json_file} not found!\")\n        sys.exit(1)\n    \n    try:\n        with open(json_file, 'r') as f:\n            data = json.load(f)[\"apcs\"]\n    except Exception as e:\n        print(f\"Error reading file: {e}\")\n        sys.exit(1)\n    \n    # Capture output to write to file\n    output_lines = []\n    \n    # Process and calculate densitys for each candidate\n    candidates_with_densitys = []\n    \n    for i, candidate in enumerate(data):\n        start_pcs = [b[\"start_pc\"] for b in candidate[\"original_blocks\"]]\n        freq = candidate[\"execution_frequency\"]\n        num_instructions = sum(len(b[\"instructions\"]) for b in candidate[\"original_blocks\"])\n        \n        # Get optimization stats\n        before_constraints = candidate[\"stats\"][\"before\"][\"constraints\"]\n        after_constraints = candidate[\"stats\"][\"after\"][\"constraints\"]\n        before_main_columns = candidate[\"stats\"][\"before\"][\"main_columns\"]\n        after_main_columns = candidate[\"stats\"][\"after\"][\"main_columns\"]\n        before_bus_interactions = candidate[\"stats\"][\"before\"][\"bus_interactions\"]\n        after_bus_interactions = candidate[\"stats\"][\"after\"][\"bus_interactions\"]\n        value = candidate[\"value\"]\n        cost_before = candidate[\"cost_before\"]\n        cost_after = candidate[\"cost_after\"]\n        \n        # Calculate improvements as factors (before/after ratios)\n        cost_improvement_factor = cost_before / cost_after\n        constraint_improvement_factor = before_constraints / after_constraints\n        main_columns_improvement_factor = before_main_columns / after_main_columns\n        bus_interactions_improvement_factor = before_bus_interactions / after_bus_interactions\n\n        # Calculate density used for ranking candidates\n        density = value / cost_after\n        \n        candidates_with_densitys.append({\n            'index': i + 1,\n            'start_pcs': start_pcs,\n            'freq': freq,\n            'num_instructions': num_instructions,\n            'before_constraints': before_constraints,\n            'after_constraints': after_constraints,\n            'before_main_columns': before_main_columns,\n            'after_main_columns': after_main_columns,\n            'before_bus_interactions': before_bus_interactions,\n            'after_bus_interactions': after_bus_interactions,\n            'cost_improvement_factor': cost_improvement_factor,\n            'constraint_improvement_factor': constraint_improvement_factor,\n            'main_columns_improvement_factor': main_columns_improvement_factor,\n            'bus_interactions_improvement_factor': bus_interactions_improvement_factor,\n            'value': value,\n            'cost_before': cost_before,\n            'cost_after': cost_after,\n            'density': density,\n        })\n    \n    # Sort by descending density\n    candidates_with_densitys.sort(key=lambda x: x['density'], reverse=True)\n    \n    # Summary statistics (moved to top)\n    output_lines.append(\"\")\n    output_lines.append(\"=\" * 120)\n    output_lines.append(f\"SUMMARY STATISTICS OVER ALL APC CANDIDATES\")\n    output_lines.append(\"=\" * 120)\n    \n    total_candidates = len(data)\n    total_instructions = sum(len(b[\"instructions\"]) for c in data for b in c[\"original_blocks\"])\n    \n    total_cost_before = sum(c[\"cost_before\"] for c in data)\n    total_cost_after = sum(c[\"cost_after\"] for c in data)\n    total_cost_improvement_factor = total_cost_before / total_cost_after\n    \n    total_before_constraints = sum(c[\"stats\"][\"before\"][\"constraints\"] for c in data)\n    total_after_constraints = sum(c[\"stats\"][\"after\"][\"constraints\"] for c in data)\n    total_constraint_improvement_factor = total_before_constraints / total_after_constraints\n    \n    total_before_main_columns = sum(c[\"stats\"][\"before\"][\"main_columns\"] for c in data)\n    total_after_main_columns = sum(c[\"stats\"][\"after\"][\"main_columns\"] for c in data)\n    main_columns_improvement_factor = total_before_main_columns / total_after_main_columns\n    \n    total_before_bus_interactions = sum(c[\"stats\"][\"before\"][\"bus_interactions\"] for c in data)\n    total_after_bus_interactions = sum(c[\"stats\"][\"after\"][\"bus_interactions\"] for c in data)\n    total_bus_interactions_improvement_factor = total_before_bus_interactions / total_after_bus_interactions\n    \n    output_lines.append(f\"# of APC Candidates: {total_candidates}\")\n    output_lines.append(f\"Sum of Instructions: {total_instructions}\")\n    output_lines.append(f\"Average Instructions per APC Candidate: {total_instructions / total_candidates:.1f}\")\n    output_lines.append(\"\")\n    output_lines.append(f\"Sum of Cost: {total_cost_before} → {total_cost_after} ({total_cost_improvement_factor:.2f}x reduction)\")\n    output_lines.append(f\"Sum of Main Columns: {total_before_main_columns} → {total_after_main_columns} ({main_columns_improvement_factor:.2f}x reduction)\")\n    output_lines.append(f\"Sum of Constraints: {total_before_constraints} → {total_after_constraints} ({total_constraint_improvement_factor:.2f}x reduction)\")\n    output_lines.append(f\"Sum of Bus Interactions: {total_before_bus_interactions} → {total_after_bus_interactions} ({total_bus_interactions_improvement_factor:.2f}x reduction)\")\n    \n    # Statement count distribution\n    stmt_dist = {}\n    for c in data:\n        stmt_count = sum(len(b[\"instructions\"]) for b in c[\"original_blocks\"])\n        stmt_dist[stmt_count] = stmt_dist.get(stmt_count, 0) + 1\n    \n    output_lines.append(\"\")\n    output_lines.append(\"# of Instructions Distribution:\")\n    stmt_table_data = []\n    for stmt_count in sorted(stmt_dist.keys()):\n        count = stmt_dist[stmt_count]\n        percentage = (count / total_candidates) * 100\n        stmt_table_data.append([stmt_count, count, f\"{percentage:.1f}%\"])\n    \n    stmt_table_headers = [\"Instructions\", \"# of Candidates\", \"Percentage\"]\n    stmt_table_output = tabulate(stmt_table_data, headers=stmt_table_headers, tablefmt=\"grid\")\n    output_lines.append(stmt_table_output)\n    \n    # Frequency distribution\n    freq_dist = {}\n    for c in data:\n        freq = c[\"execution_frequency\"]\n        freq_dist[freq] = freq_dist.get(freq, 0) + 1\n    \n    output_lines.append(\"\")\n    output_lines.append(\"Execution Frequency Distribution:\")\n    freq_table_data = []\n    for freq in sorted(freq_dist.keys()):\n        count = freq_dist[freq]\n        percentage = (count / total_candidates) * 100\n        freq_table_data.append([f\"{freq}x\", count, f\"{percentage:.1f}%\"])\n    \n    freq_table_headers = [\"Frequency\", \"# of Candidates\", \"Percentage\"]\n    freq_table_output = tabulate(freq_table_data, headers=freq_table_headers, tablefmt=\"grid\")\n    output_lines.append(freq_table_output)\n    \n    # Show sorted candidates by density using tabulate\n    output_lines.append(\"\")\n    output_lines.append(\"=\" * 120)\n    output_lines.append(\"APC CANDIDATES RANKED BY DENSITY (VALUE / COST_AFTER)\")\n    output_lines.append(\"=\" * 120)\n    \n    # Prepare table data for tabulate\n    table_headers = [\n        \"Rank\", \"Start PCs\", \"# of Instr\", \"Freq\", \"Value\", \"Cost Before -> After (Redux)\", \n        \"Density\", \"Main Cols Before -> After (Redux)\",\n        \"Constraints Before -> After (Redux)\", \"Bus Int Before -> After (Redux)\"\n    ]\n    \n    table_data = []\n    for i, candidate in enumerate(candidates_with_densitys):\n        row = [\n            i + 1,\n            str(candidate['start_pcs']),\n            candidate['num_instructions'],\n            f\"{candidate['freq']}x\",\n            f\"{candidate['value']:.0f}\",\n            f\"{candidate['cost_before']:.0f} -> {candidate['cost_after']:.0f} ({candidate['cost_improvement_factor']:.1f}x)\",\n            f\"{candidate['density']:.2f}\",\n            f\"{candidate['before_main_columns']} -> {candidate['after_main_columns']} ({candidate['main_columns_improvement_factor']:.1f}x)\",\n            f\"{candidate['before_constraints']} -> {candidate['after_constraints']} ({candidate['constraint_improvement_factor']:.1f}x)\",\n            f\"{candidate['before_bus_interactions']} -> {candidate['after_bus_interactions']} ({candidate['bus_interactions_improvement_factor']:.1f}x)\"\n        ]\n        table_data.append(row)\n    \n    # Generate table using tabulate\n    table_output = tabulate(table_data, headers=table_headers, tablefmt=\"grid\")\n    output_lines.append(table_output)\n    \n    # Write output to file or stdout\n    try:\n        if output_file:\n            with open(output_file, 'w') as f:\n                for line in output_lines:\n                    f.write(line + '\\n')\n            print(f\"Output written to: {output_file}\")\n        else:\n            # Write to stdout\n            for line in output_lines:\n                print(line)\n    except Exception as e:\n        print(f\"Error writing to output file: {e}\")\n        # Fallback to console output\n        for line in output_lines:\n            print(line)\n\n\nif __name__ == \"__main__\":\n    main() \n"
  },
  {
    "path": "autoprecompiles/scripts/readme.md",
    "content": "### Scripts\n\nSet up (from the project root):\n\n```bash\npython3 -m venv .venv\nsource .venv/bin/activate\npip install -r autoprecompiles/scripts/requirements.txt\n```"
  },
  {
    "path": "autoprecompiles/scripts/requirements.txt",
    "content": "pandas\nmatplotlib"
  },
  {
    "path": "autoprecompiles/src/adapter.rs",
    "content": "use powdr_constraint_solver::constraint_system::BusInteractionHandler;\nuse std::collections::BTreeMap;\nuse std::hash::Hash;\nuse std::{fmt::Display, sync::Arc};\n\nuse powdr_number::FieldElement;\nuse serde::{Deserialize, Serialize};\n\nuse crate::blocks::{detect_superblocks, ExecutionBlocks, SuperBlock};\nuse crate::empirical_constraints::EmpiricalConstraints;\nuse crate::evaluation::EvaluationResult;\nuse crate::execution::{ExecutionState, OptimisticConstraint, OptimisticConstraints};\nuse crate::execution_profile::ExecutionProfile;\nuse crate::{\n    blocks::{BasicBlock, Instruction, Program},\n    constraint_optimizer::IsBusStateful,\n    memory_optimizer::MemoryBusInteraction,\n    range_constraint_optimizer::RangeConstraintHandler,\n    Apc, InstructionHandler, PowdrConfig, VmConfig,\n};\n\n#[derive(Serialize, Deserialize)]\npub struct ApcWithStats<F, I, A, V, S> {\n    apc: Arc<Apc<F, I, A, V>>,\n    stats: S,\n    evaluation_result: EvaluationResult,\n}\nimpl<F, I, A, V, S> ApcWithStats<F, I, A, V, S> {\n    pub fn new(apc: Arc<Apc<F, I, A, V>>, stats: S, evaluation_result: EvaluationResult) -> Self {\n        Self {\n            apc,\n            stats,\n            evaluation_result,\n        }\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn into_parts(self) -> (Arc<Apc<F, I, A, V>>, S, EvaluationResult) {\n        (self.apc, self.stats, self.evaluation_result)\n    }\n\n    pub fn apc(&self) -> &Apc<F, I, A, V> {\n        &self.apc\n    }\n\n    pub fn stats(&self) -> &S {\n        &self.stats\n    }\n\n    pub fn evaluation_result(&self) -> EvaluationResult {\n        self.evaluation_result\n    }\n}\n\npub trait PgoAdapter {\n    type Adapter: Adapter;\n\n    fn filter_blocks_and_create_apcs_with_pgo(\n        &self,\n        blocks: Vec<AdapterBasicBlock<Self::Adapter>>,\n        config: &PowdrConfig,\n        vm_config: AdapterVmConfig<Self::Adapter>,\n        labels: BTreeMap<u64, Vec<String>>,\n        empirical_constraints: EmpiricalConstraints,\n    ) -> Vec<AdapterApcWithStats<Self::Adapter>> {\n        let blocks = if let Some(prof) = self.execution_profile() {\n            detect_superblocks(config, &prof.pc_list, blocks)\n        } else {\n            let superblocks = blocks\n                .into_iter()\n                .map(SuperBlock::from)\n                // filter invalid APC candidates\n                .filter(|sb| sb.instructions().count() > 1)\n                .collect();\n            ExecutionBlocks::new_without_pgo(superblocks)\n        };\n\n        self.create_apcs_with_pgo(blocks, config, vm_config, labels, empirical_constraints)\n    }\n\n    fn create_apcs_with_pgo(\n        &self,\n        exec_blocks: AdapterExecutionBlocks<Self::Adapter>,\n        config: &PowdrConfig,\n        vm_config: AdapterVmConfig<Self::Adapter>,\n        labels: BTreeMap<u64, Vec<String>>,\n        empirical_constraints: EmpiricalConstraints,\n    ) -> Vec<AdapterApcWithStats<Self::Adapter>>;\n\n    fn execution_profile(&self) -> Option<&ExecutionProfile> {\n        None\n    }\n\n    fn pc_execution_count(&self, pc: u64) -> Option<u32> {\n        self.execution_profile()\n            .and_then(|prof| prof.pc_count.get(&pc).cloned())\n    }\n}\n\npub trait Adapter: Sized\nwhere\n    Self::InstructionHandler:\n        InstructionHandler<Field = Self::Field, Instruction = Self::Instruction>,\n{\n    type Field: Serialize + for<'de> Deserialize<'de> + Send + Sync + Clone;\n    type PowdrField: FieldElement;\n    type InstructionHandler: InstructionHandler + Sync;\n    type BusInteractionHandler: BusInteractionHandler<Self::PowdrField>\n        + Clone\n        + IsBusStateful<Self::PowdrField>\n        + RangeConstraintHandler<Self::PowdrField>\n        + Sync;\n    type Program: Program<Self::Instruction> + Send;\n    type Instruction: Instruction<Self::Field> + Serialize + for<'de> Deserialize<'de> + Send + Sync;\n    type MemoryBusInteraction<V: Ord + Clone + Eq + Display + Hash>: MemoryBusInteraction<\n        Self::PowdrField,\n        V,\n    >;\n    type CustomBusTypes: Clone\n        + Display\n        + Sync\n        + Eq\n        + PartialEq\n        + Serialize\n        + for<'de> Deserialize<'de>;\n    type ApcStats: Send + Sync;\n    type AirId: Eq + Hash + Send + Sync;\n    type ExecutionState: ExecutionState;\n\n    fn into_field(e: Self::PowdrField) -> Self::Field;\n\n    fn from_field(e: Self::Field) -> Self::PowdrField;\n\n    /// Given the autoprecompile and the original instructions, return the stats\n    fn apc_stats(\n        apc: Arc<AdapterApc<Self>>,\n        instruction_handler: &Self::InstructionHandler,\n    ) -> Self::ApcStats;\n\n    fn is_branching(instr: &Self::Instruction) -> bool;\n\n    fn is_allowed(instr: &Self::Instruction) -> bool;\n}\n\npub type AdapterApcWithStats<A> = ApcWithStats<\n    <A as Adapter>::Field,\n    <A as Adapter>::Instruction,\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n    <A as Adapter>::ApcStats,\n>;\npub type ApcStats<A> = <A as Adapter>::ApcStats;\npub type AdapterApc<A> = Apc<\n    <A as Adapter>::Field,\n    <A as Adapter>::Instruction,\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n>;\npub type AdapterApcOverPowdrField<A> = Apc<\n    <A as Adapter>::PowdrField,\n    <A as Adapter>::Instruction,\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n>;\npub type AdapterVmConfig<'a, A> = VmConfig<\n    'a,\n    <A as Adapter>::InstructionHandler,\n    <A as Adapter>::BusInteractionHandler,\n    <A as Adapter>::CustomBusTypes,\n>;\npub type AdapterExecutionState<A> = <A as Adapter>::ExecutionState;\npub type AdapterOptimisticConstraints<A> = OptimisticConstraints<\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n>;\npub type AdapterOptimisticConstraint<A> = OptimisticConstraint<\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n>;\npub type AdapterBasicBlock<A> = BasicBlock<<A as Adapter>::Instruction>;\npub type AdapterSuperBlock<A> = SuperBlock<<A as Adapter>::Instruction>;\npub type AdapterExecutionBlocks<A> = ExecutionBlocks<<A as Adapter>::Instruction>;\n"
  },
  {
    "path": "autoprecompiles/src/blocks/detection.rs",
    "content": "use std::collections::BTreeSet;\n\nuse crate::{\n    adapter::Adapter,\n    blocks::{BasicBlock, Program},\n};\n\n/// Collects basic blocks from a program\npub fn collect_basic_blocks<A: Adapter>(\n    program: &A::Program,\n    jumpdest_set: &BTreeSet<u64>,\n) -> Vec<BasicBlock<A::Instruction>> {\n    let mut blocks = Vec::new();\n    let mut curr_block = BasicBlock {\n        start_pc: program.instruction_index_to_pc(0),\n        instructions: Vec::new(),\n    };\n    for (i, instr) in program.instructions().enumerate() {\n        let is_target = jumpdest_set.contains(&program.instruction_index_to_pc(i));\n        let is_branching = A::is_branching(&instr);\n        let is_allowed = A::is_allowed(&instr);\n\n        // If this opcode cannot be in an apc, we make sure it's alone in a BB.\n        if !is_allowed {\n            // If not empty, push the current block.\n            if !curr_block.instructions.is_empty() {\n                blocks.push(curr_block);\n            }\n            // Push the instruction itself\n            blocks.push(BasicBlock {\n                start_pc: program.instruction_index_to_pc(i),\n                instructions: vec![instr.clone()],\n            });\n            // Skip the instruction and start a new block from the next instruction.\n            curr_block = BasicBlock {\n                start_pc: program.instruction_index_to_pc(i + 1),\n                instructions: Vec::new(),\n            };\n        } else {\n            // If the instruction is a target, we need to close the previous block\n            // as is if not empty and start a new block from this instruction.\n            if is_target {\n                if !curr_block.instructions.is_empty() {\n                    blocks.push(curr_block);\n                }\n                curr_block = BasicBlock {\n                    start_pc: program.instruction_index_to_pc(i),\n                    instructions: Vec::new(),\n                };\n            }\n            curr_block.instructions.push(instr.clone());\n            // If the instruction is a branch, we need to close this block\n            // with this instruction and start a new block from the next one.\n            if is_branching {\n                blocks.push(curr_block); // guaranteed to be non-empty because an instruction was just pushed\n                curr_block = BasicBlock {\n                    start_pc: program.instruction_index_to_pc(i + 1),\n                    instructions: Vec::new(),\n                };\n            }\n        }\n    }\n\n    if !curr_block.instructions.is_empty() {\n        blocks.push(curr_block);\n    }\n\n    tracing::info!(\n        \"Got {} basic blocks from `collect_basic_blocks`\",\n        blocks.len()\n    );\n\n    blocks\n}\n"
  },
  {
    "path": "autoprecompiles/src/blocks/mod.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap},\n    fmt::Display,\n};\n\nuse itertools::Itertools;\nuse rayon::iter::{\n    IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,\n};\nuse serde::{Deserialize, Serialize};\n\n/// Tools to detect basic blocks in a program\nmod detection;\n\npub use detection::collect_basic_blocks;\n\nuse crate::PowdrConfig;\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\n/// A sequence of instructions starting at a given PC.\npub struct BasicBlock<I> {\n    /// The program counter of the first instruction in this block.\n    pub start_pc: u64,\n    pub instructions: Vec<I>,\n}\n\nimpl<I: PcStep> BasicBlock<I> {\n    /// Returns an iterator over the program counters of the instructions in this block.\n    pub fn pcs(&self) -> impl Iterator<Item = u64> + '_ {\n        (0..self.instructions.len()).map(move |i| self.start_pc + (i as u64 * I::pc_step() as u64))\n    }\n\n    /// Returns an iterator over the program counters of the instructions in this block.\n    pub fn instructions(&self) -> impl Iterator<Item = (u64, &I)> + '_ {\n        self.instructions\n            .iter()\n            .enumerate()\n            .map(|(index, i)| (self.start_pc + (index as u64 * I::pc_step() as u64), i))\n    }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\n/// A sequence of basic blocks that can be made into an autoprecompile.\n/// A single basic block is represented as a SuperBlock with one element.\npub struct SuperBlock<I> {\n    blocks: Vec<BasicBlock<I>>,\n}\n\nimpl<I> From<BasicBlock<I>> for SuperBlock<I> {\n    fn from(basic_block: BasicBlock<I>) -> Self {\n        SuperBlock {\n            blocks: vec![basic_block],\n        }\n    }\n}\n\nimpl<I> From<Vec<BasicBlock<I>>> for SuperBlock<I> {\n    fn from(blocks: Vec<BasicBlock<I>>) -> Self {\n        assert!(!blocks.is_empty());\n        SuperBlock { blocks }\n    }\n}\n\nimpl<I> SuperBlock<I> {\n    pub fn is_basic_block(&self) -> bool {\n        self.blocks.len() == 1\n    }\n\n    pub fn try_as_basic_block(&self) -> Option<&BasicBlock<I>> {\n        if self.is_basic_block() {\n            Some(&self.blocks[0])\n        } else {\n            None\n        }\n    }\n}\n\nimpl<I> SuperBlock<I> {\n    /// Sequence of basic block start PCs, uniquely identifies this superblock\n    pub fn start_pcs(&self) -> Vec<u64> {\n        self.blocks.iter().map(|b| b.start_pc).collect()\n    }\n\n    /// For each basic block in the superblock, returns the index of its first instruction\n    /// (within the superblock's flat instruction list) together with the block's start PC.\n    pub fn instruction_indexed_start_pcs(&self) -> Vec<(usize, u64)> {\n        let mut idx = 0;\n        self.blocks\n            .iter()\n            .map(|b| {\n                let elem = (idx, b.start_pc);\n                idx += b.instructions.len();\n                elem\n            })\n            .collect()\n    }\n\n    /// Sequence of basic blocks composing this superblock\n    pub fn blocks(&self) -> impl Iterator<Item = &BasicBlock<I>> {\n        self.blocks.iter()\n    }\n\n    /// Apply fn to every instruction in this superblock, returning a new superblock with the transformed instructions.\n    pub fn map_instructions<F, I2>(self, f: F) -> SuperBlock<I2>\n    where\n        F: Fn(I) -> I2 + Clone,\n    {\n        SuperBlock {\n            blocks: self\n                .blocks\n                .into_iter()\n                .map(|b| BasicBlock {\n                    start_pc: b.start_pc,\n                    instructions: b.instructions.into_iter().map(f.clone()).collect(),\n                })\n                .collect(),\n        }\n    }\n}\n\nimpl<I: PcStep> SuperBlock<I> {\n    /// Returns an iterator over the program counters of the instructions in this block.\n    pub fn pcs(&self) -> impl Iterator<Item = u64> + '_ {\n        self.blocks.iter().flat_map(BasicBlock::pcs)\n    }\n\n    /// Sequence of instructions across all basic blocks in this superblock\n    pub fn instructions(&self) -> impl Iterator<Item = (u64, &I)> {\n        self.blocks.iter().flat_map(BasicBlock::instructions)\n    }\n\n    /// Parallel iterator over instructions across all basic blocks in this superblock\n    pub fn par_instructions(&self) -> impl IndexedParallelIterator<Item = (u64, &I)>\n    where\n        I: Sync,\n    {\n        // note: we need collect_vec() because parallel flat_map does not implement IndexedParallelIterator\n        self.instructions().collect_vec().into_par_iter()\n    }\n}\n\nimpl<I: Display> Display for SuperBlock<I> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        if let Some(bb) = self.try_as_basic_block() {\n            return bb.fmt(f);\n        }\n        writeln!(f, \"SuperBlock(\")?;\n        let mut insn_idx = 0;\n        for block in &self.blocks {\n            writeln!(f, \"   pc: {}, statements: [\", block.start_pc)?;\n            for instr in block.instructions.iter() {\n                writeln!(f, \"      instr {insn_idx:>3}:   {instr}\")?;\n                insn_idx += 1;\n            }\n            write!(f, \"   ],\")?;\n        }\n        write!(f, \")\")\n    }\n}\n\nimpl<I: Display> Display for BasicBlock<I> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        writeln!(f, \"BasicBlock(start_pc: {}, statements: [\", self.start_pc)?;\n        for (i, instr) in self.instructions.iter().enumerate() {\n            writeln!(f, \"   instr {i:>3}:   {instr}\")?;\n        }\n        write!(f, \"])\")\n    }\n}\n\npub trait Program<I: PcStep> {\n    /// Returns the base program counter.\n    fn base_pc(&self) -> u64;\n\n    /// Converts an instruction index to a program counter.\n    fn instruction_index_to_pc(&self, idx: usize) -> u64 {\n        self.base_pc() + (idx as u64 * I::pc_step() as u64)\n    }\n\n    /// Returns an iterator over the instructions in the program.\n    fn instructions(&self) -> Box<dyn Iterator<Item = I> + '_>;\n\n    /// Returns the number of instructions in the program.\n    fn length(&self) -> u32;\n}\n\npub trait PcStep {\n    fn pc_step() -> u32;\n}\n\npub trait Instruction<T>: Clone + Display + PcStep {\n    /// Returns a list of concrete values that the LHS of the PC lookup should be assigned to.\n    fn pc_lookup_row(&self, pc: u64) -> Vec<T>;\n}\n\n/// A sequence of basic blocks seen in the execution, identified by their start PCs.\n/// A run is interrupted by an invalid APC block (i.e., single instruction).\n#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct ExecutionBasicBlockRun(pub Vec<u64>);\n\n/// A superblock present in the program, together with execution statistics (if PGO is enabled)\npub struct BlockAndStats<I> {\n    pub block: SuperBlock<I>,\n    /// amount of times this block appears in the execution\n    pub count: u32,\n}\n\n/// The result of superblock generation: a set of blocks with optional statistics for PGO.\npub struct ExecutionBlocks<I> {\n    /// Superblocks seen in the execution.\n    pub blocks: Vec<BlockAndStats<I>>,\n    /// Basic block runs in the execution (if PGO is enabled).\n    /// Each run is paired with the number of times it was seen.\n    pub execution_bb_runs: Vec<(ExecutionBasicBlockRun, u32)>,\n}\n\nimpl<I> ExecutionBlocks<I> {\n    pub fn new_without_pgo(blocks: Vec<SuperBlock<I>>) -> Self {\n        Self {\n            blocks: blocks\n                .into_iter()\n                .map(|block| BlockAndStats { block, count: 0 })\n                .collect(),\n            execution_bb_runs: vec![],\n        }\n    }\n}\n\n/// Find the starting indices of non-overlapping occurrences of `needle` in `haystack`.\n/// (e.g. `aba` is found at indices [0, 4] in `abababa`).\npub fn find_non_overlapping<T: Eq>(haystack: &[T], needle: &[T]) -> Vec<usize> {\n    let mut indices = vec![];\n    let mut pos = 0;\n    while pos + needle.len() <= haystack.len() {\n        if haystack[pos..pos + needle.len()] == needle[..] {\n            indices.push(pos);\n            pos += needle.len();\n        } else {\n            pos += 1;\n        }\n    }\n    indices\n}\n\n/// Find basic block runs in the execution.\n/// A run is interrupted upon hitting an invalid APC basic block (i.e., a single-instruction block).\n/// Returns a list of the runs, coupled with how many times each appears (a run may repeat in the execution).\nfn detect_execution_bb_runs<I>(\n    // start PC to basic blocks. Should include every basic block in the program, including those with len=1 (invalid APC)\n    start_pc_to_bb: &HashMap<u64, BasicBlock<I>>,\n    execution: &[u64],\n) -> Vec<(ExecutionBasicBlockRun, u32)> {\n    // Basic block runs in the execution.\n    // The same run can appear multiple times in the execution, so we keep a count using a hashmap.\n    // Each BB is identified by its starting PC.\n    let mut execution_bb_runs = BTreeMap::new();\n    let mut current_run = vec![];\n\n    let mut pos = 0;\n    while pos < execution.len() {\n        let pc = execution[pos];\n        let bb = start_pc_to_bb\n            .get(&pc)\n            .expect(\"PC in execution not part of any basic blocks\");\n        assert!(!bb.instructions.is_empty());\n        if bb.instructions.len() == 1 {\n            // if starting a single instruction BB (i.e., invalid for APC), end current run\n            if !current_run.is_empty() {\n                *execution_bb_runs\n                    .entry(std::mem::take(&mut current_run))\n                    .or_insert(0) += 1;\n            }\n        } else {\n            // extend the run with this basic block\n            current_run.push(pc);\n        }\n        // move to next bb\n        pos += bb.instructions.len();\n    }\n    if !current_run.is_empty() {\n        *execution_bb_runs\n            .entry(std::mem::take(&mut current_run))\n            .or_insert(0) += 1;\n    }\n\n    execution_bb_runs\n        .into_iter()\n        .map(|(run, count)| (ExecutionBasicBlockRun(run), count))\n        .collect()\n}\n\n/// Find all superblocks up to max_len in the basic block run and count their occurrences.\n/// Returns a map from superblock to its count.\nfn count_superblocks_in_run(\n    bb_run: &ExecutionBasicBlockRun,\n    max_len: usize,\n) -> BTreeMap<Vec<u64>, u32> {\n    let mut superblocks_in_run = BTreeMap::new();\n    // first, we identify the superblocks in this run\n    for len in 1..=std::cmp::min(max_len, bb_run.0.len()) {\n        superblocks_in_run.extend(bb_run.0.windows(len).map(|w| (w.to_vec(), 0)));\n    }\n    // then we count their occurrences\n    for (sblock, count) in superblocks_in_run.iter_mut() {\n        *count = find_non_overlapping(&bb_run.0, sblock).len() as u32;\n    }\n    superblocks_in_run\n}\n\n/// Find all superblocks up to max_len in the execution and count their occurrences.\n/// Returns a map from superblock to its count.\nfn count_superblocks_in_execution(\n    execution_bb_runs: &[(ExecutionBasicBlockRun, u32)],\n    max_len: usize,\n) -> BTreeMap<Vec<u64>, u32> {\n    let sblocks = execution_bb_runs\n        .par_iter()\n        .map(|(run, run_count)| {\n            count_superblocks_in_run(run, max_len)\n                .into_iter()\n                .map(|(sblock, sblock_occurrences_in_run)| {\n                    (sblock, sblock_occurrences_in_run * run_count)\n                })\n                .collect()\n        })\n        .reduce(BTreeMap::new, |mut sblocks_a, sblocks_b| {\n            // merge counts of b into a\n            for (sblock, count) in sblocks_b {\n                *sblocks_a.entry(sblock).or_insert(0) += count;\n            }\n            sblocks_a\n        });\n    sblocks\n}\n\n/// Detect basic blocks and superblocks present in the given execution.\n/// Returns the detected blocks, together with their execution information.\n/// Does not return invalid APC blocks (i.e., single instruction) and blocks that are never executed.\npub fn detect_superblocks<I: Clone + PcStep>(\n    cfg: &PowdrConfig,\n    // program execution as a sequence of PCs\n    execution_pc_list: &[u64],\n    // all program basic blocks (including single instruction ones), in no particular order\n    basic_blocks: Vec<BasicBlock<I>>,\n) -> ExecutionBlocks<I> {\n    tracing::info!(\n        \"Detecting superblocks with <= {} basic blocks, over the sequence of {} PCs\",\n        cfg.superblock_max_bb_count,\n        execution_pc_list.len()\n    );\n\n    let start = std::time::Instant::now();\n\n    // index basic blocks by start PC\n    let start_pc_to_bb: HashMap<_, _> = basic_blocks\n        .into_iter()\n        .map(|bb| (bb.start_pc, bb))\n        .collect();\n\n    let execution_bb_runs = detect_execution_bb_runs(&start_pc_to_bb, execution_pc_list);\n\n    let blocks_found =\n        count_superblocks_in_execution(&execution_bb_runs, cfg.superblock_max_bb_count as usize);\n\n    tracing::info!(\n        \"Found {} blocks in {} basic block runs. Took {:?}\",\n        blocks_found.len(),\n        execution_bb_runs.len(),\n        start.elapsed(),\n    );\n\n    // build the result\n    let mut block_stats = vec![];\n    let mut skipped_exec_count = 0;\n    let mut skipped_max_insn = 0;\n    blocks_found.into_iter().for_each(|(sblock_pcs, count)| {\n        let block = SuperBlock::from(\n            sblock_pcs\n                .iter()\n                .map(|start_pc| start_pc_to_bb[start_pc].clone())\n                .collect_vec(),\n        );\n\n        // skip superblocks that were executed less than the cutoff\n        if count < cfg.apc_exec_count_cutoff {\n            skipped_exec_count += 1;\n            return;\n        }\n\n        // skip superblocks with too many instructions\n        if block.instructions().count() > cfg.apc_max_instructions as usize {\n            skipped_max_insn += 1;\n            return;\n        }\n\n        block_stats.push(BlockAndStats { block, count });\n    });\n\n    tracing::info!(\n        \"Skipped blocks: {} to execution cutoff, {} to instruction count\",\n        skipped_exec_count,\n        skipped_max_insn,\n    );\n\n    tracing::info!(\n        \"Of the {} remaining blocks, {} are basic blocks and {} are superblocks\",\n        block_stats.len(),\n        block_stats\n            .iter()\n            .filter(|b| b.block.is_basic_block())\n            .count(),\n        block_stats\n            .iter()\n            .filter(|b| !b.block.is_basic_block())\n            .count(),\n    );\n\n    ExecutionBlocks {\n        blocks: block_stats,\n        execution_bb_runs,\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use std::collections::BTreeMap;\n\n    use crate::{DegreeBound, PowdrConfig};\n\n    use super::*;\n\n    #[derive(Clone)]\n    struct TestInstruction;\n\n    impl PcStep for TestInstruction {\n        fn pc_step() -> u32 {\n            1\n        }\n    }\n\n    #[test]\n    fn test_find_non_overlapping() {\n        assert_eq!(find_non_overlapping(&[1, 2, 1, 2, 1], &[1, 2, 1]), vec![0]);\n        assert_eq!(find_non_overlapping(&[1, 2, 3], &[1, 2, 3]), vec![0]);\n        assert_eq!(find_non_overlapping(&[1, 2, 3], &[4]), vec![] as Vec<usize>);\n        assert_eq!(find_non_overlapping(&[1, 1, 1], &[1]), vec![0, 1, 2]);\n    }\n\n    #[test]\n    fn test_superblocks_in_run() {\n        let run = ExecutionBasicBlockRun(vec![4, 1, 2, 3, 5, 1, 2, 3, 4]);\n        let max_len = 3;\n        let counts = count_superblocks_in_run(&run, max_len);\n        assert_eq!(\n            counts.len(),\n            5 + // size 1\n            6 + // size 2\n            6 // size 3\n        );\n        assert_eq!(counts[&vec![1]], 2);\n        assert_eq!(counts[&vec![1, 2]], 2);\n        assert_eq!(counts[&vec![4]], 2);\n        assert_eq!(counts[&vec![5]], 1);\n        assert_eq!(counts[&vec![4, 1, 2]], 1);\n        assert_eq!(counts[&vec![1, 2, 3]], 2);\n        assert_eq!(counts[&vec![2, 3, 4]], 1);\n    }\n\n    #[test]\n    fn test_detect_superblocks_counts_and_execution_runs() {\n        let bb = |start_pc: u64, len: usize| BasicBlock {\n            start_pc,\n            instructions: vec![TestInstruction; len],\n        };\n\n        let cfg = PowdrConfig::new(\n            10,\n            0,\n            DegreeBound {\n                identities: 2,\n                bus_interactions: 2,\n            },\n        )\n        .with_superblocks(2, None, None);\n\n        let basic_blocks = vec![bb(100, 2), bb(200, 2), bb(300, 1), bb(400, 3), bb(500, 2)];\n\n        let execution = vec![100, 101, 200, 201, 300, 400, 401, 402, 100, 101, 200, 201];\n\n        let result = detect_superblocks(&cfg, &execution, basic_blocks);\n\n        assert_eq!(\n            result.execution_bb_runs,\n            vec![\n                (ExecutionBasicBlockRun(vec![100, 200]), 1),\n                (ExecutionBasicBlockRun(vec![400, 100, 200]), 1),\n            ]\n        );\n\n        let counts = result\n            .blocks\n            .into_iter()\n            .map(|entry| (entry.block.start_pcs(), entry.count))\n            .collect::<BTreeMap<_, _>>();\n\n        assert_eq!(counts.get(&vec![100]), Some(&2));\n        assert_eq!(counts.get(&vec![200]), Some(&2));\n        assert_eq!(counts.get(&vec![400]), Some(&1));\n        assert_eq!(counts.get(&vec![100, 200]), Some(&2));\n        assert_eq!(counts.get(&vec![400, 100]), Some(&1));\n        assert!(!counts.contains_key(&vec![300]));\n        assert!(!counts.contains_key(&vec![500]));\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/bus_map.rs",
    "content": "use serde::{Deserialize, Serialize};\nuse std::{collections::BTreeMap, fmt::Display};\n\n#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]\npub enum BusType<C> {\n    /// In a no-CPU architecture, instruction AIRs receive the current state and send the next state.\n    /// Typically the state would include the current time stamp and program counter, but powdr does\n    /// not make any assumptions about the state.\n    ExecutionBridge,\n    /// Memory bus for reading and writing memory.\n    Memory,\n    /// A lookup to fetch the instruction arguments for a given PC.\n    PcLookup,\n    /// Other types, specific to the VM integration. Powdr largely ignores these.\n    Other(C),\n}\n\nimpl<C: Display> std::fmt::Display for BusType<C> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        let name = match self {\n            BusType::ExecutionBridge => \"EXECUTION_BRIDGE\",\n            BusType::Memory => \"MEMORY\",\n            BusType::PcLookup => \"PC_LOOKUP\",\n            BusType::Other(other_type) => &other_type.to_string(),\n        };\n        write!(f, \"{name}\")\n    }\n}\n\n#[derive(Clone, Deserialize, Serialize)]\npub struct BusMap<C> {\n    bus_ids: BTreeMap<u64, BusType<C>>,\n}\n\nimpl<C: PartialEq + Eq + Clone + Display> BusMap<C> {\n    /// Construct a new `BusMap`, ensuring the same id is not used for different `BusType`s\n    pub fn from_id_type_pairs(pairs: impl IntoIterator<Item = (u64, BusType<C>)>) -> Self {\n        let mut bus_ids = BTreeMap::new();\n        for (k, v) in pairs.into_iter() {\n            bus_ids.entry(k).and_modify(|existing| {\n                if existing != &v {\n                    panic!(\"BusType `{v}` already exists under ID `{existing}`, cannot map to `{v}`\");\n                }\n            }).or_insert(v);\n        }\n\n        BusMap { bus_ids }\n    }\n\n    /// Lookup the `BusType` for a given ID.\n    pub fn bus_type(&self, bus_id: u64) -> BusType<C> {\n        self.bus_ids.get(&bus_id).cloned().unwrap_or_else(|| {\n            panic!(\"No bus type found for ID: {bus_id}\");\n        })\n    }\n\n    /// View the entire map.\n    pub fn all_types_by_id(&self) -> &BTreeMap<u64, BusType<C>> {\n        &self.bus_ids\n    }\n\n    /// Find the ID for a given `BusType` (if any).\n    pub fn get_bus_id(&self, bus_type: &BusType<C>) -> Option<u64> {\n        self.bus_ids\n            .iter()\n            .find_map(|(id, bus)| if bus == bus_type { Some(*id) } else { None })\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/constraint_optimizer.rs",
    "content": "use std::{\n    collections::{HashMap, HashSet},\n    fmt::Display,\n    hash::Hash,\n    iter::once,\n};\n\nuse itertools::Itertools;\nuse num_traits::Zero;\nuse powdr_constraint_solver::{\n    constraint_system::{\n        AlgebraicConstraint, BusInteractionHandler, ConstraintRef, ConstraintSystem,\n    },\n    grouped_expression::GroupedExpression,\n    indexed_constraint_system::IndexedConstraintSystem,\n    inliner::DegreeBound,\n    reachability::reachable_variables,\n    rule_based_optimizer::rule_based_optimization,\n    solver::Solver,\n};\nuse powdr_number::FieldElement;\nuse serde::Serialize;\n\nuse crate::{\n    export::ExportOptions,\n    low_degree_bus_interaction_optimizer::LowDegreeBusInteractionOptimizer,\n    memory_optimizer::{optimize_memory, MemoryBusInteraction},\n    range_constraint_optimizer::RangeConstraintHandler,\n    stats_logger::StatsLogger,\n};\n\n#[derive(Debug)]\npub enum Error {\n    ConstraintSolverError(powdr_constraint_solver::solver::Error),\n}\n\nimpl From<powdr_constraint_solver::solver::Error> for Error {\n    fn from(err: powdr_constraint_solver::solver::Error) -> Self {\n        Error::ConstraintSolverError(err)\n    }\n}\n\n/// Simplifies the constraints as much as possible.\n/// This function is similar to powdr_pilopt::qse_opt::run_qse_optimization, except it:\n/// - Runs on the entire constraint system, including bus interactions.\n/// - Panics if the solver fails.\n/// - Removes trivial constraints (e.g. `0 = 0` or bus interaction with multiplicity `0`)\n///   from the constraint system.\n#[allow(clippy::too_many_arguments)]\npub fn optimize_constraints<\n    P: FieldElement,\n    V: Ord + Clone + Eq + Hash + Display + Serialize,\n    M: MemoryBusInteraction<P, V>,\n>(\n    constraint_system: IndexedConstraintSystem<P, V>,\n    solver: &mut impl Solver<P, V>,\n    bus_interaction_handler: impl BusInteractionHandler<P>\n        + IsBusStateful<P>\n        + RangeConstraintHandler<P>\n        + Clone,\n    stats_logger: &mut StatsLogger,\n    memory_bus_id: Option<u64>,\n    degree_bound: DegreeBound,\n    new_var: &mut impl FnMut(&str) -> V,\n    export_options: &mut ExportOptions,\n) -> Result<ConstraintSystem<P, V>, Error> {\n    let constraint_system = solver_based_optimization(constraint_system, solver, export_options)?;\n    stats_logger.log(\"solver-based optimization\", &constraint_system);\n    export_options.export_optimizer_inner_constraint_system(constraint_system.system(), \"solver\");\n\n    let constraint_system = remove_trivial_constraints(constraint_system);\n    stats_logger.log(\"removing trivial constraints\", &constraint_system);\n    export_options\n        .export_optimizer_inner_constraint_system(constraint_system.system(), \"remove_trivial\");\n\n    let constraint_system =\n        remove_free_variables(constraint_system, solver, bus_interaction_handler.clone());\n    stats_logger.log(\"removing free variables\", &constraint_system);\n    export_options\n        .export_optimizer_inner_constraint_system(constraint_system.system(), \"remove_free\");\n\n    let constraint_system =\n        remove_disconnected_columns(constraint_system, solver, bus_interaction_handler.clone());\n    stats_logger.log(\"removing disconnected columns\", &constraint_system);\n    export_options.export_optimizer_inner_constraint_system(\n        constraint_system.system(),\n        \"remove_disconnected\",\n    );\n\n    let constraint_system = trivial_simplifications(\n        constraint_system,\n        bus_interaction_handler.clone(),\n        stats_logger,\n    );\n    export_options\n        .export_optimizer_inner_constraint_system(constraint_system.system(), \"trivial_simp\");\n\n    let (constraint_system, assignments) = rule_based_optimization(\n        constraint_system,\n        &*solver,\n        bus_interaction_handler.clone(),\n        new_var,\n        // No degree bound given, i.e. only perform replacements that\n        // do not increase the degree.\n        None,\n    );\n    solver.add_algebraic_constraints(assignments.iter().map(|(v, val)| {\n        AlgebraicConstraint::assert_eq(\n            GroupedExpression::from_unknown_variable(v.clone()),\n            val.clone(),\n        )\n    }));\n    stats_logger.log(\"rule-based optimization\", &constraint_system);\n    export_options.register_substituted_variables(assignments);\n    export_options\n        .export_optimizer_inner_constraint_system(constraint_system.system(), \"rule_based\");\n\n    // At this point, we throw away the index and only keep the constraint system, since the rest of the optimisations are defined on the system alone\n    let constraint_system: ConstraintSystem<P, V> = constraint_system.into();\n\n    let constraint_system = substitute_bus_interaction_fields(solver, constraint_system);\n    stats_logger.log(\n        \"substituting fields in bus interactions\",\n        &constraint_system,\n    );\n    export_options.export_optimizer_inner_constraint_system(\n        &constraint_system,\n        \"substitute_bus_interactio_fields\",\n    );\n\n    let constraint_system = optimize_memory::<_, _, M>(constraint_system, solver, memory_bus_id);\n\n    stats_logger.log(\"memory optimization\", &constraint_system);\n    export_options.export_optimizer_inner_constraint_system(&constraint_system, \"memory\");\n\n    let constraint_system = LowDegreeBusInteractionOptimizer::new(\n        solver,\n        bus_interaction_handler.clone(),\n        degree_bound,\n    )\n    .optimize(constraint_system);\n    stats_logger.log(\n        \"low degree bus interaction optimization\",\n        &constraint_system,\n    );\n    export_options.export_optimizer_inner_constraint_system(&constraint_system, \"low_degree_bus\");\n\n    Ok(constraint_system)\n}\n\n/// Tries to replace each bus interaction field by a constant, if that expression\n/// is known to be constant to the solver.\n/// For each such field, also adds an algebraic constraint asserting that the field\n/// expression is equal to the constant, because this is needed for soundness in some\n/// situations.\n/// For simple situations, this constraint will be optimizer away in subsequent stages.\nfn substitute_bus_interaction_fields<P: FieldElement, V: Ord + Clone + Eq + Hash + Display>(\n    solver: &mut impl Solver<P, V>,\n    mut constraint_system: ConstraintSystem<P, V>,\n) -> ConstraintSystem<P, V> {\n    for field in constraint_system\n        .bus_interactions\n        .iter_mut()\n        .flat_map(|bi| bi.fields_mut())\n    {\n        // If we have an expression of the form `a * x + b` that is known to be constant,\n        // then we would already know the value of `x`.\n        if field.is_affine() && field.linear_components().len() <= 1 {\n            continue;\n        }\n        if let Some(v) = solver.try_to_equivalent_constant(field) {\n            let constr =\n                AlgebraicConstraint::assert_eq(field.clone(), GroupedExpression::from_number(v));\n            *field = GroupedExpression::from_number(v);\n            constraint_system.algebraic_constraints.push(constr);\n        }\n    }\n    constraint_system\n}\n\n/// Performs some very easy simplifications that only remove constraints.\npub fn trivial_simplifications<P: FieldElement, V: Ord + Clone + Eq + Hash + Display>(\n    constraint_system: IndexedConstraintSystem<P, V>,\n    bus_interaction_handler: impl BusInteractionHandler<P>\n        + IsBusStateful<P>\n        + RangeConstraintHandler<P>\n        + Clone,\n    stats_logger: &mut StatsLogger,\n) -> IndexedConstraintSystem<P, V> {\n    let constraint_system = remove_trivial_constraints(constraint_system);\n    stats_logger.log(\"removing trivial constraints\", &constraint_system);\n\n    let constraint_system =\n        remove_equal_bus_interactions(constraint_system, bus_interaction_handler.clone());\n    stats_logger.log(\"removing equal bus interactions\", &constraint_system);\n\n    let constraint_system = remove_redundant_constraints(constraint_system);\n    stats_logger.log(\"removing redundant constraints\", &constraint_system);\n\n    let constraint_system = remove_unreferenced_derived_variables(constraint_system);\n    stats_logger.log(\n        \"removing unreferenced derived variables\",\n        &constraint_system,\n    );\n\n    constraint_system\n}\n\nfn solver_based_optimization<T: FieldElement, V: Clone + Ord + Hash + Display + Serialize>(\n    mut constraint_system: IndexedConstraintSystem<T, V>,\n    solver: &mut impl Solver<T, V>,\n    export_options: &mut ExportOptions,\n) -> Result<IndexedConstraintSystem<T, V>, Error> {\n    let assignments = solver.solve()?;\n    log::trace!(\"Solver figured out the following assignments:\");\n    if log::log_enabled!(log::Level::Trace) {\n        for (var, value) in assignments.iter() {\n            log::trace!(\"  {var} = {value}\");\n        }\n    }\n    // Assert that all substitutions are affine so that the degree\n    // does not increase.\n    assert!(assignments.iter().all(|(_, expr)| expr.is_affine()));\n    export_options.register_substituted_variables(\n        assignments\n            .iter()\n            .map(|(v, expr)| (v.clone(), expr.clone())),\n    );\n    constraint_system.apply_substitutions(assignments);\n\n    // Now try to replace bus interaction fields that the solver knows to be constant\n    let mut bus_interactions = vec![];\n    let mut new_algebraic_constraints = vec![];\n    // We remove all bus interactions because we do not want to change the order.\n    constraint_system.retain_bus_interactions(|bus_interaction| {\n        let mut modified = false;\n        let replacement = bus_interaction\n            .fields()\n            .map(|field| {\n                if let Some(n) = try_replace_by_number(field, solver) {\n                    modified = true;\n                    new_algebraic_constraints\n                        .push(AlgebraicConstraint::assert_eq(n.clone(), field.clone()));\n                    n\n                } else {\n                    field.clone()\n                }\n            })\n            .collect();\n        if modified {\n            log::trace!(\"Replacing bus interaction {bus_interaction} with {replacement}\");\n        }\n        bus_interactions.push(replacement);\n        false\n    });\n    constraint_system.add_bus_interactions(bus_interactions);\n    constraint_system.add_algebraic_constraints(new_algebraic_constraints);\n    Ok(constraint_system)\n}\n\n/// Tries to find a number that is equivalent to the expression and returns it\n/// as a GroupedExpression.\n/// Returns None if it was unsuccessful or if the expression already is a number.\nfn try_replace_by_number<T: FieldElement, V: Clone + Ord + Hash + Display>(\n    expr: &GroupedExpression<T, V>,\n    solver: &impl Solver<T, V>,\n) -> Option<GroupedExpression<T, V>> {\n    if expr.try_to_number().is_some() {\n        return None;\n    }\n    Some(GroupedExpression::from_number(\n        solver\n            .range_constraint_for_expression(expr)\n            .try_to_single_value()?,\n    ))\n}\n\n/// Removes free variables from the constraint system, under some conditions.\n///\n/// Motivation: Suppose there is a constraint `2 * foo = bar` and `foo` only appears in this constraint.\n/// Then, if we assume that all constraints are satisfiable, the prover would be able to satisfy it for\n/// any value of `bar` by solving for `foo`. Therefore, the constraint can be removed.\n/// The same would be true for a *stateless* bus interaction, e.g. `[foo * bar] in [BYTES]`.\n///\n/// This function removes *some* constraints like this (see TODOs below).\nfn remove_free_variables<T: FieldElement, V: Clone + Ord + Eq + Hash + Display>(\n    mut constraint_system: IndexedConstraintSystem<T, V>,\n    solver: &mut impl Solver<T, V>,\n    bus_interaction_handler: impl IsBusStateful<T> + Clone,\n) -> IndexedConstraintSystem<T, V> {\n    let all_variables = constraint_system\n        .system()\n        .referenced_unknown_variables()\n        .cloned()\n        .collect::<HashSet<_>>();\n\n    let variables_to_delete = all_variables\n        .iter()\n        // Find variables that are referenced in exactly one constraint\n        .filter_map(|variable| {\n            constraint_system\n                .constraints_referencing_variables(once(variable))\n                .exactly_one()\n                .ok()\n                .map(|constraint| (variable.clone(), constraint))\n        })\n        .filter(|(variable, constraint)| match constraint {\n            // Remove the algebraic constraint if we can solve for the variable.\n            ConstraintRef::AlgebraicConstraint(constr) => {\n                can_always_be_satisfied_via_free_variable(*constr, variable)\n            }\n            ConstraintRef::BusInteraction(bus_interaction) => {\n                let bus_id = bus_interaction.bus_id.try_to_number().unwrap();\n                // Only stateless bus interactions can be removed.\n                let is_stateless = !bus_interaction_handler.is_stateful(bus_id);\n                // TODO: This is overly strict.\n                // We assume that the bus interaction is satisfiable. Given that it is, there\n                // will be at least one assignment of the payload fields that satisfies it.\n                // If the prover has the freedom to choose each payload field, it can always find\n                // a satisfying assignment.\n                // This could be generalized to multiple unknown fields, but it would be more complicated,\n                // because *each* field would need a *different* free variable.\n                let has_one_unknown_field = bus_interaction\n                    .payload\n                    .iter()\n                    .filter(|field| field.try_to_number().is_none())\n                    .count()\n                    == 1;\n                // If the expression is linear in the free variable, the prover would be able to solve for it\n                // to satisfy the constraint. Otherwise, this is not necessarily the case.\n                // Note that if the above check is true, there will only be one field of degree > 0.\n                let all_degrees_at_most_one = bus_interaction\n                    .payload\n                    .iter()\n                    .all(|field| field.degree_of_variable(variable) <= 1);\n                is_stateless && has_one_unknown_field && all_degrees_at_most_one\n            }\n        })\n        .map(|(variable, _constraint)| variable.clone())\n        .collect::<HashSet<_>>();\n\n    let variables_to_keep = all_variables\n        .difference(&variables_to_delete)\n        .cloned()\n        .collect::<HashSet<_>>();\n\n    solver.retain_variables(&variables_to_keep);\n\n    constraint_system.retain_algebraic_constraints(|constraint| {\n        constraint\n            .referenced_unknown_variables()\n            .all(|var| variables_to_keep.contains(var))\n    });\n\n    constraint_system.retain_bus_interactions(|bus_interaction| {\n        let bus_id = bus_interaction.bus_id.try_to_number().unwrap();\n        bus_interaction_handler.is_stateful(bus_id)\n            || bus_interaction\n                .referenced_unknown_variables()\n                .all(|var| variables_to_keep.contains(var))\n    });\n\n    constraint_system\n}\n\n/// Returns true if the given constraint can always be made to be satisfied by setting the\n/// free variable, regardless of the values of other variables.\nfn can_always_be_satisfied_via_free_variable<\n    T: FieldElement,\n    V: Clone + Hash + Eq + Ord + Display,\n>(\n    constraint: AlgebraicConstraint<&GroupedExpression<T, V>>,\n    free_variable: &V,\n) -> bool {\n    if constraint.try_solve_for(free_variable).is_some() {\n        true\n    } else if let Some((left, right)) = constraint.expression.try_as_single_product() {\n        // If either `left` or `right` can be set to 0, the constraint is satisfied.\n        can_always_be_satisfied_via_free_variable(AlgebraicConstraint::from(left), free_variable)\n            || can_always_be_satisfied_via_free_variable(\n                AlgebraicConstraint::from(right),\n                free_variable,\n            )\n    } else {\n        false\n    }\n}\n\n/// Removes any columns that are not connected to *stateful* bus interactions (e.g. memory),\n/// because those are the only way to interact with the rest of the zkVM (e.g. other\n/// instructions).\n/// We assume that the input constraint system is satisfiable. Because the removed constraints\n/// are not connected to rest of the system, the prover can always satisfy them, so removing\n/// them is safe.\n/// Note that if there were unsatisfiable constraints, they might also be removed, which would\n/// change the statement being proven.\npub fn remove_disconnected_columns<T: FieldElement, V: Clone + Ord + Eq + Hash + Display>(\n    mut constraint_system: IndexedConstraintSystem<T, V>,\n    solver: &mut impl Solver<T, V>,\n    bus_interaction_handler: impl IsBusStateful<T> + Clone,\n) -> IndexedConstraintSystem<T, V> {\n    let initial_variables = variables_in_stateful_bus_interactions(\n        constraint_system.system(),\n        bus_interaction_handler.clone(),\n    )\n    .cloned();\n    let variables_to_keep = reachable_variables(initial_variables, &constraint_system);\n\n    solver.retain_variables(&variables_to_keep);\n\n    constraint_system.retain_algebraic_constraints(|constraint| {\n        constraint\n            .referenced_unknown_variables()\n            .any(|var| variables_to_keep.contains(var))\n    });\n\n    constraint_system.retain_bus_interactions(|bus_interaction| {\n        let bus_id = bus_interaction.bus_id.try_to_number().unwrap();\n        let has_vars_to_keep = bus_interaction\n            .referenced_unknown_variables()\n            .any(|var| variables_to_keep.contains(var));\n        // has_vars_to_keep would also be false for bus interactions containing only\n        // constants, so we also check again whether it is stateful.\n        bus_interaction_handler.is_stateful(bus_id) || has_vars_to_keep\n    });\n\n    constraint_system\n}\n\n/// Returns an iterator over all variables that are referenced in stateful bus interactions.\nfn variables_in_stateful_bus_interactions<'a, P: FieldElement, V: Ord + Clone + Eq + Hash>(\n    constraint_system: &'a ConstraintSystem<P, V>,\n    bus_interaction_handler: impl IsBusStateful<P> + 'a,\n) -> impl Iterator<Item = &'a V> {\n    constraint_system\n        .bus_interactions\n        .iter()\n        .filter(move |bus_interaction| {\n            let bus_id = bus_interaction.bus_id.try_to_number().unwrap();\n            bus_interaction_handler.is_stateful(bus_id)\n        })\n        .flat_map(|bus_interaction| bus_interaction.referenced_unknown_variables())\n}\n\nfn remove_trivial_constraints<P: FieldElement, V: PartialEq + Clone + Hash + Ord>(\n    mut constraint_system: IndexedConstraintSystem<P, V>,\n) -> IndexedConstraintSystem<P, V> {\n    constraint_system.retain_algebraic_constraints(|constraint| !constraint.is_redundant());\n    constraint_system\n        .retain_bus_interactions(|bus_interaction| !bus_interaction.multiplicity.is_zero());\n    constraint_system\n}\n\nfn remove_equal_bus_interactions<P: FieldElement, V: Ord + Clone + Eq + Hash>(\n    mut constraint_system: IndexedConstraintSystem<P, V>,\n    bus_interaction_handler: impl IsBusStateful<P>,\n) -> IndexedConstraintSystem<P, V> {\n    let mut seen = HashSet::new();\n    constraint_system.retain_bus_interactions(|interaction| {\n        // We only touch interactions with non-stateful buses.\n        if let Some(bus_id) = interaction.bus_id.try_to_number() {\n            if !bus_interaction_handler.is_stateful(bus_id) && !seen.insert(interaction.clone()) {\n                return false;\n            }\n        }\n        true\n    });\n    constraint_system\n}\n\npub trait IsBusStateful<T: FieldElement> {\n    /// Returns true if the bus with the given ID is stateful, i.e., whether there is any\n    /// interaction with the rest of the zkVM. Examples of stateful buses are memory and\n    /// execution bridge. Examples of non-stateful buses are fixed lookups.\n    fn is_stateful(&self, bus_id: T) -> bool;\n}\n\n/// Removes constraints that are factors of other constraints.\nfn remove_redundant_constraints<P: FieldElement, V: Clone + Ord + Hash + Display>(\n    constraint_system: IndexedConstraintSystem<P, V>,\n) -> IndexedConstraintSystem<P, V> {\n    // First, remove duplicate factors from the constraints.\n    let mut constraint_system = remove_duplicate_factors(constraint_system);\n\n    // Maps each factor to the set of constraints that contain it.\n    let mut constraints_by_factor = HashMap::new();\n    // Turns each constraint into a set of factors.\n    let constraints_as_factors = constraint_system\n        .algebraic_constraints()\n        .iter()\n        .enumerate()\n        .map(|(i, c)| {\n            let factors = c.expression.to_factors();\n            assert!(!factors.is_empty());\n            for f in &factors {\n                constraints_by_factor\n                    .entry(f.clone())\n                    .or_insert_with(HashSet::new)\n                    .insert(i);\n            }\n            factors\n        })\n        .collect_vec();\n\n    let mut redundant_constraints = HashSet::<usize>::new();\n    for (i, factors) in constraints_as_factors.iter().enumerate() {\n        // Go through all factors `f` and compute the intersection of all\n        // constraints in `constraints_by_factor[f]`. These constraints\n        // are multiples of the current constraint, so they are redundant\n        // if they are proper multiples, i.e. have at least one more factor.\n        let mut redundant = factors\n            .iter()\n            .map(|f| constraints_by_factor[f].clone())\n            .reduce(|a, b| a.intersection(&b).copied().collect())\n            .unwrap();\n        // Only remove constraints that have the same factors if their index\n        // is larger than the current one.\n        // Counting the factors is sufficient here.\n        redundant.retain(|j| {\n            let other_factors = &constraints_as_factors[*j];\n            // This assertion can fail if `remove_duplicate_factors` is not called at the start of this function.\n            assert!(other_factors.len() >= factors.len());\n            other_factors.len() > factors.len() || *j > i\n        });\n        redundant_constraints.extend(redundant);\n    }\n    let mut counter = 0;\n    constraint_system.retain_algebraic_constraints(|_| {\n        let retain = !redundant_constraints.contains(&counter);\n        counter += 1;\n        retain\n    });\n    constraint_system\n}\n\n/// If a constraint contains the same factor multiple times removes the duplicate factors.\nfn remove_duplicate_factors<P: FieldElement, V: Clone + Ord + Hash + Display>(\n    mut constraint_system: IndexedConstraintSystem<P, V>,\n) -> IndexedConstraintSystem<P, V> {\n    let mut constraint_to_add = vec![];\n    constraint_system.retain_algebraic_constraints(|constraint| {\n        let factors = constraint.expression.to_factors();\n        assert!(!factors.is_empty());\n        let factor_count = factors.len();\n        let unique_factors = factors.into_iter().unique().collect_vec();\n        if unique_factors.len() < factor_count {\n            constraint_to_add.push(AlgebraicConstraint::assert_zero(\n                unique_factors\n                    .into_iter()\n                    .reduce(|acc, factor| acc * factor)\n                    .unwrap(),\n            ));\n            false\n        } else {\n            true\n        }\n    });\n    constraint_system.add_algebraic_constraints(constraint_to_add);\n    constraint_system\n}\n\nfn remove_unreferenced_derived_variables<P: FieldElement, V: Clone + Ord + Hash + Display>(\n    mut constraint_system: IndexedConstraintSystem<P, V>,\n) -> IndexedConstraintSystem<P, V> {\n    // Note that `referenced_unknown_variables` only returns variables referenced in constraints.\n    let referenced_variables = constraint_system\n        .referenced_unknown_variables()\n        .cloned()\n        .collect::<HashSet<_>>();\n\n    constraint_system.retain_derived_variables(|derived_var| {\n        referenced_variables.contains(&derived_var.variable)\n    });\n    constraint_system\n}\n"
  },
  {
    "path": "autoprecompiles/src/empirical_constraints.rs",
    "content": "use std::collections::btree_map::Entry;\nuse std::collections::BTreeMap;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse serde::{Deserialize, Serialize};\n\npub use crate::equivalence_classes::{EquivalenceClass, Partition};\n\nuse crate::{\n    adapter::Adapter,\n    blocks::{PcStep, SuperBlock},\n    expression::{AlgebraicExpression, AlgebraicReference},\n    optimistic::{\n        algebraic_references::BlockCellAlgebraicReferenceMapper,\n        config::optimistic_precompile_config,\n    },\n    symbolic_machine::SymbolicConstraint,\n};\n\n/// \"Constraints\" that were inferred from execution statistics. They hold empirically\n/// (most of the time), but are not guaranteed to hold in all cases.\n#[derive(Serialize, Deserialize, Default, Debug)]\npub struct EmpiricalConstraints {\n    /// For each program counter, the range constraints for each column.\n    /// The range might not hold in 100% of cases.\n    pub column_ranges_by_pc: BTreeMap<u32, Vec<(u32, u32)>>,\n    /// For each basic block (identified by its starting PC), the equivalence classes of columns.\n    pub equivalence_classes_by_block: BTreeMap<u64, Partition<BlockCell>>,\n    pub debug_info: DebugInfo,\n    /// Count of how many times each program counter was executed in the sampled executions.\n    /// This can be used to set a threshold for applying constraints only to frequently executed PCs.\n    pub pc_counts: BTreeMap<u32, u64>,\n}\n\n/// Empirical constraints for a specific basic block.\npub struct BlockEmpiricalConstraints {\n    /// The pcs this block executes\n    pcs: Vec<u64>,\n    /// For each program counter in the block, the range constraints for each column, if any.\n    /// The range might not hold in 100% of cases.\n    pub column_ranges_by_pc: BTreeMap<u32, BTreeMap<usize, (u32, u32)>>,\n    /// The equivalence classes of columns in the block.\n    pub equivalence_classes: Partition<BlockCell>,\n}\n\n/// Debug information mapping AIR ids to program counters and column names.\n#[derive(Serialize, Deserialize, Default, Debug, Clone)]\npub struct DebugInfo {\n    /// Mapping from program counter to the ID of the AIR implementing this instruction.\n    pub air_id_by_pc: BTreeMap<u32, usize>,\n    /// Mapping from AIR ID to column names.\n    pub column_names_by_air_id: BTreeMap<usize, Vec<String>>,\n}\n\nimpl EmpiricalConstraints {\n    /// Combines the empirical constraints with another set of empirical constraints.\n    /// The resulting constraints are the most conservative combination of both.\n    pub fn combine_with(&mut self, other: EmpiricalConstraints) {\n        // Combine column ranges by PC\n        for (pc, ranges) in other.column_ranges_by_pc {\n            self.column_ranges_by_pc\n                .entry(pc)\n                .and_modify(|existing_ranges| {\n                    for (i, (min, max)) in ranges.iter().enumerate() {\n                        if let Some((existing_min, existing_max)) = existing_ranges.get_mut(i) {\n                            *existing_min = (*existing_min).min(*min);\n                            *existing_max = (*existing_max).max(*max);\n                        }\n                    }\n                })\n                .or_insert(ranges);\n        }\n\n        // Combine equivalence classes by block\n        for (block_pc, classes) in other.equivalence_classes_by_block {\n            // Compute the new equivalence classes for this block\n            let new_equivalence_class = match self.equivalence_classes_by_block.entry(block_pc) {\n                Entry::Vacant(_) => classes,\n                Entry::Occupied(e) => e.remove().intersected_with(classes),\n            };\n            assert!(self\n                .equivalence_classes_by_block\n                .insert(block_pc, new_equivalence_class)\n                .is_none());\n        }\n\n        self.debug_info.combine_with(other.debug_info);\n\n        // Combine pc counts\n        for (pc, count) in other.pc_counts {\n            *self.pc_counts.entry(pc).or_insert(0) += count;\n        }\n    }\n\n    /// Extracts the empirical constraints relevant for a specific basic block.\n    pub fn for_block<I: PcStep>(&self, block: &SuperBlock<I>) -> BlockEmpiricalConstraints {\n        let pcs = block.pcs().collect_vec();\n\n        let column_ranges_by_pc = pcs\n            .iter()\n            .filter_map(|pc| {\n                self.column_ranges_by_pc\n                    .get(&(*pc as u32))\n                    .cloned()\n                    .map(|ranges| (*pc as u32, ranges.into_iter().enumerate().collect()))\n            })\n            .collect();\n\n        let bb_independent_equivalence_classes = block\n            .instruction_indexed_start_pcs()\n            .into_iter()\n            .map(|(insn_idx, bb_pc)| {\n                self.equivalence_classes_by_block\n                    .get(&bb_pc)\n                    .cloned()\n                    .unwrap_or_default()\n                    // shift instructions indices according to index in super block\n                    .map_elements(|mut elem| {\n                        elem.instruction_idx += insn_idx;\n                        elem\n                    })\n            });\n\n        let equivalence_classes = bb_independent_equivalence_classes\n            .into_iter()\n            .reduce(|bb1, bb2| bb1.combine(bb2))\n            .unwrap();\n\n        BlockEmpiricalConstraints {\n            pcs,\n            column_ranges_by_pc,\n            equivalence_classes,\n        }\n    }\n\n    /// Returns a new `EmpiricalConstraints` instance containing only the constraints\n    /// (both range and equivalence) that are based on a number of executions greater\n    /// than or equal to a threshold passed in the `POWDR_OP_EXECUTION_COUNT_THRESHOLD`\n    /// environment variable (or `DEFAULT_EXECUTION_COUNT_THRESHOLD`).\n    /// This should mitigate overfitting to rare execution paths.\n    pub fn apply_pc_threshold(self) -> Self {\n        let threshold = optimistic_precompile_config().execution_count_threshold;\n        EmpiricalConstraints {\n            column_ranges_by_pc: self\n                .column_ranges_by_pc\n                .into_iter()\n                .filter(|(pc, _)| self.pc_counts.get(pc).cloned().unwrap_or(0) >= threshold)\n                .collect(),\n            equivalence_classes_by_block: self\n                .equivalence_classes_by_block\n                .into_iter()\n                .filter(|&(block_pc, _)| {\n                    // For equivalence classes, it is enough to check the pc_counts of the first\n                    // instruction in the block, as all other instruction will be executed at least\n                    // as often.\n                    self.pc_counts.get(&(block_pc as u32)).cloned().unwrap_or(0) >= threshold\n                })\n                .collect(),\n            pc_counts: self.pc_counts.clone(),\n            debug_info: self.debug_info.clone(),\n        }\n    }\n}\n\nimpl BlockEmpiricalConstraints {\n    /// Returns a new `BlockEmpiricalConstraints` instance containing only the\n    /// constraints (both range and equivalence) for which the provided\n    /// predicate on `BlockCell`s returns true.\n    pub fn filtered(self, predicate: impl Fn(&BlockCell) -> bool) -> Self {\n        let column_ranges_by_pc = self\n            .column_ranges_by_pc\n            .into_iter()\n            .map(|(pc, ranges)| {\n                // with superblocks, there might be multiple instructions with the same PC\n                let pc_instruction_indices = self\n                    .pcs\n                    .iter()\n                    .enumerate()\n                    .filter(|(_, &opc)| opc == pc as u64)\n                    .map(|(idx, _)| idx)\n                    .collect_vec();\n                let ranges = ranges\n                    .into_iter()\n                    .enumerate()\n                    .filter_map(|(col_idx, range)| {\n                        // check that the predicate holds in all matching instructions\n                        pc_instruction_indices\n                            .iter()\n                            .all(|idx| predicate(&BlockCell::new(*idx, col_idx)))\n                            .then_some(range)\n                    })\n                    .collect();\n                (pc, ranges)\n            })\n            .collect();\n        let equivalence_classes = self\n            .equivalence_classes\n            .to_classes()\n            .into_iter()\n            .map(|class| {\n                // Remove cells from the equivalence class for which the predicate does not hold\n                class\n                    .into_iter()\n                    .filter(|cell| predicate(cell))\n                    .collect_vec()\n            })\n            .collect();\n        Self {\n            pcs: self.pcs,\n            column_ranges_by_pc,\n            equivalence_classes,\n        }\n    }\n}\n\nimpl DebugInfo {\n    pub fn combine_with(&mut self, other: DebugInfo) {\n        merge_maps(&mut self.air_id_by_pc, other.air_id_by_pc);\n        merge_maps(\n            &mut self.column_names_by_air_id,\n            other.column_names_by_air_id,\n        );\n    }\n\n    pub fn take(&mut self) -> Self {\n        Self {\n            air_id_by_pc: std::mem::take(&mut self.air_id_by_pc),\n            column_names_by_air_id: std::mem::take(&mut self.column_names_by_air_id),\n        }\n    }\n}\n\n/// Merges two maps, asserting that existing keys map to equal values.\nfn merge_maps<K: Ord, V: Eq + Debug>(map1: &mut BTreeMap<K, V>, map2: BTreeMap<K, V>) {\n    for (key, value) in map2 {\n        match map1.entry(key) {\n            Entry::Vacant(v) => {\n                v.insert(value);\n            }\n            Entry::Occupied(existing) => {\n                assert_eq!(*existing.get(), value,);\n            }\n        }\n    }\n}\n\n#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Copy, Clone)]\npub struct BlockCell {\n    /// Instruction index within the basic block\n    instruction_idx: usize,\n    /// The column index within the instruction air\n    column_idx: usize,\n}\n\nimpl BlockCell {\n    pub fn new(instruction_idx: usize, column_idx: usize) -> Self {\n        Self {\n            instruction_idx,\n            column_idx,\n        }\n    }\n}\n\n/// Generates symbolic constraints based on empirical constraints for a given block.\npub struct ConstraintGenerator<'a, A: Adapter> {\n    empirical_constraints: BlockEmpiricalConstraints,\n    algebraic_references: BlockCellAlgebraicReferenceMapper,\n    block: &'a SuperBlock<A::Instruction>,\n}\n\nimpl<'a, A: Adapter> ConstraintGenerator<'a, A> {\n    /// Creates a new `ConstraintGenerator`.\n    ///\n    /// Arguments:\n    /// - `empirical_constraints`: The empirical constraints to use.\n    /// - `algebraic_references`: The mapping from block cells to algebraic references.\n    /// - `block`: The block for which to generate constraints.\n    pub fn new(\n        empirical_constraints: BlockEmpiricalConstraints,\n        algebraic_references: BlockCellAlgebraicReferenceMapper,\n        block: &'a SuperBlock<A::Instruction>,\n    ) -> Self {\n        Self {\n            empirical_constraints,\n            algebraic_references,\n            block,\n        }\n    }\n\n    fn get_algebraic_reference(&self, block_cell: &BlockCell) -> AlgebraicReference {\n        self.algebraic_references\n            .get_algebraic_reference(block_cell)\n            .cloned()\n            .unwrap_or_else(|| {\n                panic!(\n                    \"Missing reference in block {:?}: {block_cell:?}\",\n                    self.block.start_pcs()\n                )\n            })\n    }\n\n    /// Generates all equality constraints\n    pub fn generate_constraints(&self) -> Vec<EqualityConstraint<A::PowdrField>> {\n        self.range_constraints()\n            .into_iter()\n            .chain(self.equivalence_constraints())\n            .collect_vec()\n    }\n\n    /// Generates constraints of the form `var = <value>` for columns whose value is\n    /// always the same empirically.\n    // TODO: We could also enforce looser range constraints.\n    // This is a bit more complicated though, because we'd have to add bus interactions\n    // to actually enforce them.\n    fn range_constraints(&self) -> Vec<EqualityConstraint<A::PowdrField>> {\n        let mut constraints = Vec::new();\n\n        for (idx, pc) in self.block.pcs().enumerate() {\n            let pc = pc as u32;\n            let Some(range_constraints) = self.empirical_constraints.column_ranges_by_pc.get(&pc)\n            else {\n                continue;\n            };\n            for (col_index, (min, max)) in range_constraints {\n                let block_cell = BlockCell::new(idx, *col_index);\n                if min == max {\n                    let value = A::PowdrField::from(*min as u64);\n                    let reference = self.get_algebraic_reference(&block_cell);\n\n                    constraints.push(EqualityConstraint {\n                        left: EqualityExpression::Reference(reference),\n                        right: EqualityExpression::Number(value),\n                    });\n                }\n            }\n        }\n\n        constraints\n    }\n\n    fn equivalence_constraints(&self) -> Vec<EqualityConstraint<A::PowdrField>> {\n        let mut constraints = Vec::new();\n\n        for equivalence_class in self.empirical_constraints.equivalence_classes.to_classes() {\n            let first = equivalence_class.first().unwrap();\n            let first_ref = self.get_algebraic_reference(first);\n            for other in equivalence_class.iter().skip(1) {\n                let other_ref = self.get_algebraic_reference(other);\n                constraints.push(EqualityConstraint {\n                    left: EqualityExpression::Reference(first_ref.clone()),\n                    right: EqualityExpression::Reference(other_ref.clone()),\n                });\n            }\n        }\n\n        constraints\n    }\n}\n\n/// An expression used in equality constraints.\n/// This is a simplified version of `AlgebraicExpression` that only allows\n/// references and numbers.\npub enum EqualityExpression<T> {\n    Reference(AlgebraicReference),\n    Number(T),\n}\n\nimpl<T> From<EqualityExpression<T>> for AlgebraicExpression<T> {\n    fn from(expr: EqualityExpression<T>) -> Self {\n        match expr {\n            EqualityExpression::Reference(r) => AlgebraicExpression::Reference(r),\n            EqualityExpression::Number(n) => AlgebraicExpression::Number(n),\n        }\n    }\n}\n\n/// An equality constraint between two `EqualityExpression`s.\npub struct EqualityConstraint<T> {\n    pub left: EqualityExpression<T>,\n    pub right: EqualityExpression<T>,\n}\n\nimpl<T> From<EqualityConstraint<T>> for SymbolicConstraint<T> {\n    fn from(constraint: EqualityConstraint<T>) -> Self {\n        SymbolicConstraint {\n            expr: AlgebraicExpression::from(constraint.left)\n                - AlgebraicExpression::from(constraint.right),\n        }\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/equivalence_classes.rs",
    "content": "use std::collections::{BTreeSet, HashMap};\nuse std::hash::Hash;\n\nuse derivative::Derivative;\nuse itertools::Itertools;\nuse rayon::prelude::*;\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\n\n/// An equivalence class, i.e, a set of values of type `T` which are considered equivalent\npub type EquivalenceClass<T> = BTreeSet<T>;\n\n/// A collection of equivalence classes where all classes are guaranteed to have at least two elements.\n/// This is enforced by construction of this type only happening through collection, where we ignore empty and singleton classes.\n///\n/// Internally represented as a map from element to class ID for efficient intersection operations.\n/// Serializes as Vec<Vec<T>> for JSON compatibility (JSON requires string keys in objects).\n#[derive(Debug, Clone, Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct Partition<T> {\n    /// Maps each element to its class ID (0..num_classes)\n    /// If an element is not present, it is in a singleton class.\n    class_of: HashMap<T, usize>,\n    /// Number of classes\n    num_classes: usize,\n}\n\nimpl<T: Eq + Hash + Serialize + Clone> Serialize for Partition<T> {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        // Serialize as Vec<Vec<T>> for JSON compatibility\n        self.to_classes().serialize(serializer)\n    }\n}\n\nimpl<'de, T: Eq + Hash + Deserialize<'de>> Deserialize<'de> for Partition<T> {\n    fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {\n        // Deserialize from Vec<Vec<T>>, reusing FromIterator logic\n        let classes: Vec<Vec<T>> = Vec::deserialize(deserializer)?;\n        Ok(classes.into_iter().collect())\n    }\n}\n\nimpl<T: Eq + Hash, C: IntoIterator<Item = T>> FromIterator<C> for Partition<T>\nwhere\n    C::IntoIter: ExactSizeIterator,\n{\n    fn from_iter<I: IntoIterator<Item = C>>(iter: I) -> Self {\n        let mut class_of = HashMap::new();\n        let mut num_classes = 0;\n\n        for class in iter {\n            let class_iter = class.into_iter();\n            // Ignore classes with 0 or 1 elements as they are useless\n            if class_iter.len() > 1 {\n                for element in class_iter {\n                    assert!(class_of.insert(element, num_classes).is_none());\n                }\n                num_classes += 1;\n            }\n        }\n\n        Self {\n            class_of,\n            num_classes,\n        }\n    }\n}\n\nimpl<T: Eq + Hash + Clone> Partition<T> {\n    /// Returns all equivalence classes as a Vec<Vec<T>>.\n    /// Singleton classes are omitted.\n    /// This is O(n) where n is the number of elements.\n    #[allow(clippy::iter_over_hash_type)] // Order within classes doesn't matter semantically\n    pub fn to_classes(&self) -> Vec<Vec<T>> {\n        let mut classes: Vec<Vec<T>> = vec![Vec::new(); self.num_classes];\n        for (elem, &class_id) in &self.class_of {\n            classes[class_id].push(elem.clone());\n        }\n        classes\n    }\n\n    /// Intersects multiple partitions of the same universe into a single partition.\n    /// In other words, two elements are in the same equivalence class in the resulting partition\n    /// if and only if they are in the same equivalence class in all input partitions.\n    /// Singleton equivalence classes are omitted from the result.\n    pub fn intersect_many(partitions: impl IntoIterator<Item = Self>) -> Self {\n        // Pairwise intersection: fold over partitions, intersecting two at a time.\n        // This is more efficient than building Vec<usize> signatures because:\n        // 1. We only hash (usize, usize) tuples instead of Vec<usize>\n        // 2. The result shrinks after each intersection, making later steps faster\n        partitions\n            .into_iter()\n            .reduce(Partition::intersected_with)\n            .expect(\"expected at least one element\")\n    }\n\n    /// Intersects two partitions.\n    pub fn intersected_with(self, other: Self) -> Self {\n        // Group elements by (class_in_self, class_in_other)\n        // Elements with the same pair end up in the same result class\n        self.class_of\n            .into_iter()\n            // Note that if an element is not in self or other, it is a\n            // singleton and will also not be in the intersection.\n            .filter_map(|(elem, class_a)| {\n                other\n                    .class_of\n                    .get(&elem)\n                    .map(|&class_b| ((class_a, class_b), elem.clone()))\n            })\n            .into_group_map()\n            .into_values()\n            .collect()\n    }\n\n    /// Combine two partitions of disjoint universes into a single partition.\n    /// Elements from the two partitions must also not Eq collide.\n    pub fn combine(mut self, other: Self) -> Self {\n        let class_shift = self.num_classes;\n        #[allow(clippy::iter_over_hash_type)]\n        for (elem, class) in other.class_of {\n            if self.class_of.insert(elem, class + class_shift).is_some() {\n                panic!(\"Partition combine element collision\");\n            }\n        }\n        self.num_classes += other.num_classes;\n        self\n    }\n\n    /// Modify elements while keeping their original class.\n    /// The mapped elements must not Eq collide with each other.\n    pub fn map_elements<T2: Eq + Hash + Clone, F: Fn(T) -> T2>(self, f: F) -> Partition<T2> {\n        let mut new_class_of: HashMap<T2, usize> = Default::default();\n        #[allow(clippy::iter_over_hash_type)]\n        for (elem, class) in self.class_of {\n            if new_class_of.insert(f(elem), class).is_some() {\n                panic!(\"Partition element mapping collision\");\n            }\n        }\n        Partition::<T2> {\n            class_of: new_class_of,\n            num_classes: self.num_classes,\n        }\n    }\n}\n\n/// Number of partitions to combine in each chunk before parallelizing.\nconst CHUNK_SIZE: usize = 64;\n\nimpl<T: Eq + Hash + Copy + Send + Sync> Partition<T> {\n    /// Intersects multiple partitions in parallel using a chunked tree reduction.\n    ///\n    /// Partitions are grouped into chunks, each chunk is intersected sequentially,\n    /// then the chunk results are combined recursively in parallel.\n    pub fn parallel_intersect(partitions: impl IndexedParallelIterator<Item = Self>) -> Self {\n        if partitions.len() <= CHUNK_SIZE {\n            // Base case: We only have one chunk, intersect sequentially\n            let partitions = partitions.collect::<Vec<_>>();\n            return Self::intersect_many(partitions);\n        }\n\n        // Chunk partitions and intersect each chunk in parallel\n        let chunk_results = partitions\n            .chunks(CHUNK_SIZE)\n            .map(Self::intersect_many)\n            // Not collecting here causes the type checker to hit the recursion limit...\n            .collect::<Vec<_>>();\n\n        // Recursively combine chunk results\n        Self::parallel_intersect(chunk_results.into_par_iter())\n    }\n}\n\n/// Equality implementation that converts to canonical form for comparison.\n/// This is intentionally simple (not optimized) since it's only used in tests.\nimpl<T: Eq + Hash + Ord + Clone> PartialEq for Partition<T> {\n    fn eq(&self, other: &Self) -> bool {\n        self.to_canonical() == other.to_canonical()\n    }\n}\n\nimpl<T: Eq + Hash + Ord + Clone> Eq for Partition<T> {}\n\nimpl<T: Eq + Hash + Ord + Clone> Partition<T> {\n    /// Converts to a canonical BTreeSet<BTreeSet<T>> form for equality comparison.\n    fn to_canonical(&self) -> BTreeSet<BTreeSet<T>> {\n        self.to_classes()\n            .into_iter()\n            .map(|class| class.into_iter().collect())\n            .collect()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::equivalence_classes::Partition;\n\n    fn partition(sets: Vec<Vec<u32>>) -> Partition<u32> {\n        sets.into_iter().collect()\n    }\n\n    #[test]\n    fn test_intersect_partitions() {\n        let partition1 = partition(vec![\n            // Two classes: {1,2,3,4} and {5,6,7,8,9}\n            vec![1, 2, 3, 4],\n            vec![5, 6, 7, 8, 9],\n        ]);\n        let partition2 = partition(vec![\n            // Classes: {2,3}, {4,5}, {6,7,8} (1 and 9 are singletons)\n            vec![2, 3],\n            vec![4, 5],\n            vec![6, 7, 8],\n        ]);\n        let partition3 = partition(vec![\n            // Classes: {2,3}, {6,7}, {8,9} (splits {6,7,8} into {6,7} and {8})\n            vec![2, 3],\n            vec![6, 7],\n            vec![8, 9],\n        ]);\n\n        let result = Partition::intersect_many([partition1, partition2, partition3]);\n\n        // After intersecting all three:\n        // - {2,3} survives (in same class in all three)\n        // - {6,7} survives (6,7,8 in p2 intersected with 6,7 in p3)\n        // - 8 becomes singleton (was with 6,7 in p2, but with 9 in p3, and 9 not in p1's class)\n        let expected = partition(vec![vec![2, 3], vec![6, 7]]);\n\n        assert_eq!(result, expected);\n    }\n\n    #[test]\n    fn test_default_partition_yields_no_classes() {\n        // The default partition puts every element in its own singleton class,\n        // which are omitted in the list of equivalence classes.\n        let partition: Partition<u32> = Partition::default();\n        assert_eq!(partition.to_classes().len(), 0);\n    }\n\n    #[test]\n    fn test_map_elements() {\n        let p = partition(vec![vec![1u32, 2], vec![3, 4]]);\n        let mapped: Partition<String> = p.map_elements(|x| x.to_string());\n        let expected: Partition<String> = vec![vec![\"1\", \"2\"], vec![\"3\", \"4\"]]\n            .into_iter()\n            .map(|v| v.into_iter().map(str::to_string))\n            .collect();\n        assert_eq!(mapped, expected);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Partition element mapping collision\")]\n    fn test_map_elements_panics_on_collision() {\n        let p = partition(vec![vec![1, 2]]);\n        p.map_elements(|_| 0u32);\n    }\n\n    #[test]\n    fn test_combine() {\n        let p1 = partition(vec![vec![1, 2], vec![3, 4]]);\n        let p2 = partition(vec![vec![5, 6], vec![7, 8]]);\n        let combined = p1.combine(p2);\n        let expected = partition(vec![vec![1, 2], vec![3, 4], vec![5, 6], vec![7, 8]]);\n        assert_eq!(combined, expected);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Partition combine element collision\")]\n    fn test_combine_panics_on_collision() {\n        let p1 = partition(vec![vec![1, 2]]);\n        let p2 = partition(vec![vec![1, 3]]);\n        p1.combine(p2);\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/evaluation.rs",
    "content": "use std::{fmt::Display, iter::Sum, ops::Add, sync::Arc};\n\nuse crate::{\n    adapter::{Adapter, AdapterApc, AdapterApcWithStats},\n    InstructionHandler, SymbolicMachine,\n};\n\nuse serde::{Deserialize, Serialize};\n\n#[derive(Clone, Copy, PartialEq, Default, Eq, Debug, Serialize, Deserialize)]\n/// Statistics of an AIR\npub struct AirStats {\n    /// The number of main columns\n    pub main_columns: usize,\n    /// The number of polynomial constraints\n    pub constraints: usize,\n    /// The number of bus interactions. Note that in some proof systems, they might\n    /// translate to a number of columns. The exact number depends on many factors,\n    /// including the degree of the bus interaction fields, which is not measured here.\n    pub bus_interactions: usize,\n}\n\nimpl AirStats {\n    pub fn new<F>(machine: &SymbolicMachine<F>) -> Self {\n        Self {\n            main_columns: machine.main_columns().count(),\n            constraints: machine.constraints.len(),\n            bus_interactions: machine.bus_interactions.len(),\n        }\n    }\n}\n\nimpl Add for AirStats {\n    type Output = AirStats;\n    fn add(self, rhs: AirStats) -> AirStats {\n        AirStats {\n            main_columns: self.main_columns + rhs.main_columns,\n            constraints: self.constraints + rhs.constraints,\n            bus_interactions: self.bus_interactions + rhs.bus_interactions,\n        }\n    }\n}\n\nimpl Sum<AirStats> for AirStats {\n    fn sum<I: Iterator<Item = AirStats>>(iter: I) -> AirStats {\n        iter.fold(AirStats::default(), Add::add)\n    }\n}\n\n#[derive(Clone, Copy, Serialize, Deserialize)]\n/// Evaluation result of an APC evaluation\npub struct EvaluationResult {\n    /// Statistics before optimizations, i.e., the sum of the AIR stats\n    /// of all AIRs that *would* be involved in proving this block\n    /// if it was run in software.\n    pub before: AirStats,\n    /// The AIR stats of the APC.\n    pub after: AirStats,\n}\n\n/// Evaluate an APC by comparing its cost to the cost of executing the original instructions in software.\n/// This is used by different pgo strategies in different stages. For example, for cell PGO, this is done before selection, and for instruction PGO, it is done after.\npub fn evaluate_apc<A: Adapter>(\n    instruction_handler: &A::InstructionHandler,\n    apc: AdapterApc<A>,\n) -> AdapterApcWithStats<A> {\n    let before = apc\n        .block\n        .instructions()\n        .map(|(_, instruction)| instruction_handler.get_instruction_air_stats(instruction))\n        .sum();\n    let after = AirStats::new(apc.machine());\n    let evaluation_result = EvaluationResult { before, after };\n\n    let apc = Arc::new(apc);\n    let apc_stats = A::apc_stats(apc.clone(), instruction_handler);\n\n    AdapterApcWithStats::<A>::new(apc, apc_stats, evaluation_result)\n}\n\nimpl Display for EvaluationResult {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        let EvaluationResult { before, after } = self;\n        write!(\n            f,\n            \"APC advantage:\\n  - Main columns: {}\\n  - Bus interactions: {}\\n  - Constraints: {}\",\n            render_stat(before.main_columns, after.main_columns),\n            render_stat(before.bus_interactions, after.bus_interactions),\n            render_stat(before.constraints, after.constraints)\n        )\n    }\n}\n\nfn render_stat(before: usize, after: usize) -> String {\n    let effectiveness = before as f64 / after as f64;\n    format!(\"{before} -> {after} ({effectiveness:.2}x reduction)\")\n}\n"
  },
  {
    "path": "autoprecompiles/src/execution/ast.rs",
    "content": "use std::iter;\n\nuse itertools::Itertools;\nuse powdr_expression::visitors::{AllChildren, Children};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{execution::ExecutionState, powdr::UniqueReferences};\n\n#[derive(Debug, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq, Clone)]\npub struct OptimisticConstraint<A, V> {\n    pub left: OptimisticExpression<A, V>,\n    pub right: OptimisticExpression<A, V>,\n}\n\nimpl<A, V> Children<OptimisticExpression<A, V>> for OptimisticConstraint<A, V> {\n    fn children(&self) -> Box<dyn Iterator<Item = &OptimisticExpression<A, V>> + '_> {\n        Box::new([&self.left, &self.right].into_iter())\n    }\n\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut OptimisticExpression<A, V>> + '_> {\n        Box::new([&mut self.left, &mut self.right].into_iter())\n    }\n}\n\nimpl<\n        'a,\n        A: 'a + Copy + PartialEq + Eq + std::hash::Hash,\n        V: 'a,\n        E: AllChildren<OptimisticExpression<A, V>>,\n    > UniqueReferences<'a, (A, V), OptimisticLiteral<A>> for E\n{\n    fn unique_references(&'a self) -> impl Iterator<Item = OptimisticLiteral<A>> {\n        self.all_children()\n            .filter_map(|e| {\n                if let OptimisticExpression::Literal(r) = e {\n                    Some(*r)\n                } else {\n                    None\n                }\n            })\n            .unique()\n    }\n}\n\nimpl<A, V> AllChildren<OptimisticExpression<A, V>> for OptimisticExpression<A, V> {\n    fn all_children(&self) -> Box<dyn Iterator<Item = &OptimisticExpression<A, V>> + '_> {\n        Box::new(iter::once(self).chain(self.children().flat_map(|e| e.all_children())))\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq)]\npub enum OptimisticExpression<A, V> {\n    Number(V),\n    Literal(OptimisticLiteral<A>),\n}\n\nimpl<A, V> OptimisticExpression<A, V> {\n    fn children(&self) -> Box<dyn Iterator<Item = &OptimisticExpression<A, V>> + '_> {\n        match self {\n            OptimisticExpression::Literal(_) | OptimisticExpression::Number(_) => {\n                Box::new(iter::empty())\n            }\n        }\n    }\n}\n\n#[derive(\n    Debug, Clone, Copy, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq, Hash,\n)]\npub enum LocalOptimisticLiteral<A> {\n    /// A register limb value. Limbs are indexed in little-endian order.\n    RegisterLimb(A, usize),\n    Pc,\n}\n\nimpl<A> From<LocalOptimisticLiteral<A>> for LocalFetch<A> {\n    fn from(value: LocalOptimisticLiteral<A>) -> Self {\n        match value {\n            LocalOptimisticLiteral::RegisterLimb(a, _) => Self::Register(a),\n            LocalOptimisticLiteral::Pc => Self::Pc,\n        }\n    }\n}\n\n#[derive(\n    Debug, Clone, Copy, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq, Hash,\n)]\npub enum LocalFetch<A> {\n    Register(A),\n    Pc,\n}\n\nimpl<A> LocalFetch<A> {\n    pub fn get<E: ExecutionState<RegisterAddress = A>>(&self, state: &E) -> E::Value {\n        match self {\n            LocalFetch::Register(a) => state.reg(a),\n            LocalFetch::Pc => state.pc(),\n        }\n    }\n}\n\n#[derive(\n    Debug, Clone, Copy, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq, Hash,\n)]\npub struct Fetch<A> {\n    pub instr_idx: usize,\n    pub val: LocalFetch<A>,\n}\n\nimpl<A> From<OptimisticLiteral<A>> for Fetch<A> {\n    fn from(value: OptimisticLiteral<A>) -> Self {\n        Self {\n            instr_idx: value.instr_idx,\n            val: value.val.into(),\n        }\n    }\n}\n\n#[derive(\n    Debug, Clone, Copy, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Eq, Hash,\n)]\npub struct OptimisticLiteral<A> {\n    pub instr_idx: usize,\n    pub val: LocalOptimisticLiteral<A>,\n}\n"
  },
  {
    "path": "autoprecompiles/src/execution/candidates.rs",
    "content": "use std::cmp::Ordering;\n\nuse itertools::Itertools;\n\nuse crate::execution::{\n    evaluator::OptimisticConstraintFailed, ExecutionState, OptimisticConstraintEvaluator,\n    OptimisticConstraints,\n};\n\n/// An APC candidate tracker\n/// During execution, it keeps track of possible parts of the trace that can be assigned to APCs\npub struct ApcCandidates<E: ExecutionState, A, S> {\n    apcs: Vec<A>,\n    candidates: Vec<ApcCandidate<E, S>>,\n}\n\n/// A selected APC call\n#[derive(\n    Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize, deepsize2::DeepSizeOf,\n)]\npub struct ApcCall<S> {\n    /// The index of the APC that this call runs\n    pub apc_id: usize,\n    /// A snapshot before this APC\n    pub from: S,\n    /// A snapshot after this APC\n    pub to: S,\n}\n\nimpl<E: ExecutionState, A: Apc<E>, S> ApcCandidates<E, A, S> {\n    pub fn new(apcs: Vec<A>) -> Self {\n        Self {\n            apcs,\n            candidates: Default::default(),\n        }\n    }\n\n    /// Given the current state of execution, retain the candidates whose constraints are still\n    /// verified\n    pub fn check_conditions(&mut self, state: &E, snapshot_callback: impl Fn() -> S) {\n        // Filter out failing candidates and upgrade the ones that are done\n        self.candidates\n            .retain_mut(|candidate| match &mut candidate.status {\n                // Check the conditions for unconfirmed candidates\n                CandidateStatus::InProgress(optimistic_constraint_evaluator) => {\n                    if optimistic_constraint_evaluator\n                        .try_next_execution_step(\n                            state,\n                            self.apcs[candidate.apc_id].optimistic_constraints(),\n                        )\n                        .is_err()\n                    {\n                        return false;\n                    }\n                    // If we went through the whole block, confirm it\n                    if candidate.total_check_count\n                        == optimistic_constraint_evaluator.instruction_index()\n                    {\n                        candidate.status = CandidateStatus::Done(Clocked::new(\n                            state.global_clk(),\n                            snapshot_callback(),\n                        ));\n                    }\n                    true\n                }\n                _ => true,\n            });\n    }\n\n    /// Abort all candidates that are in progress.\n    /// This is useful at the end of a segment, where some candidates being in progress block other candidates that are done from being extracted.\n    /// Since we reached the end of the segment, we know that the candidates that are in progress will not be valid, so it's safe to drop them.\n    pub fn abort_in_progress(&mut self) -> Vec<usize> {\n        self.candidates\n            .extract_if(.., |f| matches!(f.status, CandidateStatus::InProgress(_)))\n            .map(|candidate| candidate.apc_id)\n            .collect()\n    }\n\n    /// If no more candidates are in progress, return a set of non-overlapping calls\n    pub fn extract_calls(&mut self) -> Vec<ApcCall<S>> {\n        let are_any_in_progress = self\n            .candidates\n            .iter()\n            .any(|c| matches!(c.status, CandidateStatus::InProgress(_)));\n\n        // If any candidates are in progress, return nothing\n        if are_any_in_progress {\n            return vec![];\n        }\n\n        // Now we have no more candidates in progress\n\n        // We need to solve conflicts to make sure we do not return overlapping candidates\n\n        // Collect metadata needed to resolve overlaps in a single pass\n        let meta = self.candidates.iter().enumerate().map(|(idx, candidate)| {\n            let range = Self::candidate_range(candidate);\n            (\n                CandidateRank {\n                    candidate_id: idx,\n                    len: range.1 - range.0,\n                    priority: self.apcs[candidate.apc_id].priority(),\n                },\n                range,\n            )\n        });\n\n        // Find which candidates to discard by going through all pairs\n        let discard = meta.tuple_combinations().fold(\n            vec![false; self.candidates.len()],\n            |mut discard, ((rank_left, range_left), (rank_right, range_right))| {\n                let (rank_left, range_left) = (rank_left, range_left);\n                let (rank_right, range_right) = (rank_right, range_right);\n                let idx_left = rank_left.candidate_id;\n                let idx_right = rank_right.candidate_id;\n\n                // If one of the two is already discarded, or they do not overlap, do nothing\n                if discard[idx_left]\n                    || discard[idx_right]\n                    || !Self::ranges_overlap(range_left, range_right)\n                {\n                    return discard;\n                }\n\n                // Otherwise, discard the one with lower priority\n                match rank_left.cmp(&rank_right) {\n                    Ordering::Greater => discard[idx_right] = true,\n                    Ordering::Less => discard[idx_left] = true,\n                    Ordering::Equal => unreachable!(\"by construction, two ranks cannot be equal\"),\n                }\n\n                discard\n            },\n        );\n\n        // Keep all candidates that were not marked as discarded\n        self.candidates\n            .drain(..)\n            .zip_eq(discard)\n            .filter_map(|(candidate, discard)| (!discard).then_some(candidate))\n            .map(|candidate| {\n                let CandidateStatus::Done(to) = candidate.status else {\n                    unreachable!()\n                };\n                ApcCall {\n                    apc_id: candidate.apc_id,\n                    from: candidate.from_snapshot.snapshot,\n                    to: to.snapshot,\n                }\n            })\n            .collect()\n    }\n\n    /// Try to insert a new candidate.\n    /// This can fail if the current state is incompatible with the optimistic constraints of the candidate\n    pub fn try_insert(\n        &mut self,\n        state: &E,\n        apc_id: usize,\n        snapshot: impl Fn() -> S,\n    ) -> Result<(), OptimisticConstraintFailed> {\n        let apc_candidate = {\n            let apc = &self.apcs[apc_id];\n            let mut evaluator = OptimisticConstraintEvaluator::new();\n            evaluator.try_next_execution_step(state, apc.optimistic_constraints())?;\n            Ok(ApcCandidate {\n                total_check_count: apc.cycle_count() + 1,\n                apc_id,\n                from_snapshot: Clocked::new(state.global_clk(), snapshot()),\n                status: CandidateStatus::InProgress(evaluator),\n            })\n        }?;\n        self.candidates.push(apc_candidate);\n        Ok(())\n    }\n\n    fn candidate_range(candidate: &ApcCandidate<E, S>) -> (usize, usize) {\n        let start = candidate.from_snapshot.global_clk;\n        let end = match &candidate.status {\n            CandidateStatus::Done(snapshot) => snapshot.global_clk,\n            CandidateStatus::InProgress(_) => {\n                unreachable!(\"candidate_range called on candidate still in progress\")\n            }\n        };\n        (start, end)\n    }\n\n    fn ranges_overlap((start_a, end_a): (usize, usize), (start_b, end_b): (usize, usize)) -> bool {\n        start_a < end_b && start_b < end_a\n    }\n}\n\n#[derive(Clone, Copy, Debug, Eq, PartialEq)]\nstruct CandidateRank {\n    /// Priority of this candidate. Higher is better.\n    priority: usize,\n    /// Length (number of cycles) covered by this candidate. Higher is better.\n    len: usize,\n    /// Index of the candidate within the current list. Lower is better.\n    candidate_id: usize,\n}\n\nimpl Ord for CandidateRank {\n    fn cmp(&self, other: &Self) -> Ordering {\n        self.priority\n            .cmp(&other.priority)\n            .then_with(|| self.len.cmp(&other.len))\n            .then_with(|| other.candidate_id.cmp(&self.candidate_id))\n    }\n}\n\nimpl PartialOrd for CandidateRank {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\n#[derive(Debug)]\nstruct ApcCandidate<E: ExecutionState, S> {\n    /// The total number of steps to run\n    /// This is the number of steps plus one, because we check the state before and after\n    total_check_count: usize,\n    /// The id of the apc candidate being run\n    pub apc_id: usize,\n    /// The state of the execution when this candidate was introduced\n    pub from_snapshot: Clocked<S>,\n    /// The status of this candidate\n    pub status: CandidateStatus<E, S>,\n}\n\n#[derive(Debug)]\nstruct Clocked<S> {\n    global_clk: usize,\n    snapshot: S,\n}\n\nimpl<S> Clocked<S> {\n    fn new(global_clk: usize, snapshot: S) -> Self {\n        Self {\n            global_clk,\n            snapshot,\n        }\n    }\n}\n\n/// A trait to represent APCs at execution time\npub trait Apc<E: ExecutionState> {\n    /// Gets a reference to the optimistic constraints\n    fn optimistic_constraints(&self) -> &OptimisticConstraints<E::RegisterAddress, E::Value>;\n\n    /// The number of cycles to go through this APC\n    fn cycle_count(&self) -> usize;\n\n    /// Larger priority wins when APC execution ranges overlap.\n    fn priority(&self) -> usize;\n}\n\n#[derive(Debug)]\nenum CandidateStatus<E: ExecutionState, S> {\n    /// We don't know yet if this apc candidate is valid. The conditions must be verified\n    InProgress(OptimisticConstraintEvaluator<E::RegisterAddress, E::Value>),\n    /// We know the candidate is valid until the given `Snapshot`\n    Done(Clocked<S>),\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::execution::{OptimisticConstraint, OptimisticExpression, OptimisticLiteral};\n\n    use super::*;\n\n    #[derive(Default, Clone, PartialEq, Debug)]\n    struct TestApc {\n        priority: usize,\n        len: usize,\n        optimistic_constraints: OptimisticConstraints<(), usize>,\n    }\n\n    impl TestApc {\n        fn a(len: usize) -> Self {\n            Self {\n                len,\n                ..Default::default()\n            }\n        }\n\n        fn p(mut self, priority: usize) -> Self {\n            self.priority = priority;\n            self\n        }\n\n        fn c(mut self, optimistic_constraints: OptimisticConstraints<(), usize>) -> Self {\n            self.optimistic_constraints = optimistic_constraints;\n            self\n        }\n    }\n\n    fn a(len: usize) -> TestApc {\n        TestApc::a(len)\n    }\n\n    impl Apc<TestExecutionState> for TestApc {\n        fn cycle_count(&self) -> usize {\n            self.len\n        }\n\n        fn priority(&self) -> usize {\n            self.priority\n        }\n\n        fn optimistic_constraints(&self) -> &OptimisticConstraints<(), usize> {\n            &self.optimistic_constraints\n        }\n    }\n\n    #[derive(Clone, Copy, PartialEq, Debug, Default)]\n    struct TestExecutionState {\n        pc: usize,\n        instret: usize,\n    }\n\n    impl TestExecutionState {\n        fn incr(&mut self) {\n            self.jump(self.pc + 1)\n        }\n\n        fn jump(&mut self, pc: usize) {\n            self.pc = pc;\n            self.instret += 1;\n        }\n\n        fn snapshot(&self) -> TestSnapshot {\n            TestSnapshot {\n                instret: self.instret,\n            }\n        }\n    }\n\n    impl ExecutionState for TestExecutionState {\n        type RegisterAddress = ();\n\n        type Value = usize;\n\n        fn value_limb(_: Self::Value, _: usize) -> Self::Value {\n            todo!(\"Limbs are currently untested\")\n        }\n\n        fn pc(&self) -> Self::Value {\n            self.pc\n        }\n\n        fn reg(&self, _address: &Self::RegisterAddress) -> Self::Value {\n            todo!(\"Constraints on register values is currently untested\")\n        }\n\n        fn global_clk(&self) -> usize {\n            self.instret\n        }\n    }\n\n    #[derive(Clone, PartialEq, Debug, Copy)]\n    struct TestSnapshot {\n        instret: usize,\n    }\n\n    fn s(instret: usize) -> TestSnapshot {\n        TestSnapshot { instret }\n    }\n\n    struct TestVm {\n        state: TestExecutionState,\n        candidates: ApcCandidates<TestExecutionState, TestApc, TestSnapshot>,\n    }\n\n    impl TestVm {\n        fn try_add_candidate(&mut self, apc_id: usize) -> Result<(), OptimisticConstraintFailed> {\n            self.candidates\n                .try_insert(&self.state, apc_id, || self.state.snapshot())\n        }\n\n        // A helper function to go to the next execution state, check the conditions on it, and extract the calls\n        fn incr(&mut self) -> Vec<ApcCall<TestSnapshot>> {\n            self.state.incr();\n            self.candidates\n                .check_conditions(&self.state, || self.state.snapshot());\n            self.candidates.extract_calls()\n        }\n\n        fn jump(&mut self, pc: usize) -> Vec<ApcCall<TestSnapshot>> {\n            self.state.jump(pc);\n            self.candidates\n                .check_conditions(&self.state, || self.state.snapshot());\n            self.candidates.extract_calls()\n        }\n\n        fn count_done(&self) -> usize {\n            self.candidates\n                .candidates\n                .iter()\n                .filter(|c| matches!(c.status, CandidateStatus::Done(_)))\n                .count()\n        }\n\n        fn count_in_progress(&self) -> usize {\n            self.candidates\n                .candidates\n                .iter()\n                .filter(|c| matches!(c.status, CandidateStatus::InProgress(_)))\n                .count()\n        }\n\n        fn new(apcs: impl IntoIterator<Item = TestApc>) -> Self {\n            Self {\n                state: Default::default(),\n                candidates: ApcCandidates::new(apcs.into_iter().collect()),\n            }\n        }\n    }\n\n    #[test]\n    fn single_candidate() {\n        // an apc with 3 steps\n        let mut vm = TestVm::new([a(3).p(1)]);\n        let snapshot = s(0);\n        let final_snapshot = s(3);\n        // it will be checked in 4 steps, because we have conditions on the state before and after. The first check is included in `try_insert`.\n        vm.try_add_candidate(0).unwrap();\n        assert!(vm.incr().is_empty());\n        assert!(vm.incr().is_empty());\n        let output = vm.incr();\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id: 0,\n                from: snapshot,\n                to: final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn single_candidate_final_state_failure() {\n        // single apc with a constraint that fails on the final (step 2) state\n        let failing_constraints =\n            OptimisticConstraints::from_constraints(vec![OptimisticConstraint {\n                left: OptimisticExpression::Literal(OptimisticLiteral {\n                    instr_idx: 2,\n                    val: crate::execution::LocalOptimisticLiteral::Pc,\n                }),\n                right: OptimisticExpression::Number(99),\n            }]);\n        let apc = a(2).p(1).c(failing_constraints);\n        let mut vm = TestVm::new([apc]);\n        vm.try_add_candidate(0).unwrap();\n        assert!(vm.incr().is_empty());\n        assert_eq!(vm.count_in_progress(), 1);\n        let extracted = vm.incr();\n        assert!(extracted.is_empty());\n        assert_eq!(vm.count_in_progress(), 0);\n        assert_eq!(vm.count_done(), 0);\n    }\n\n    #[test]\n    fn two_candidates_same_length() {\n        // insert two apcs with 3 steps each, but different priority\n        let low_priority = a(3).p(1);\n        let high_priority = a(3).p(2);\n        let mut vm = TestVm::new([low_priority, high_priority]);\n        let low_priority_id = 0;\n        let high_priority_id = 1;\n        let snapshot = s(0);\n        let final_snapshot = s(3);\n        vm.try_add_candidate(low_priority_id).unwrap();\n        vm.try_add_candidate(high_priority_id).unwrap();\n        assert!(vm.incr().is_empty());\n        assert!(vm.incr().is_empty());\n        let output = vm.incr();\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id: high_priority_id,\n                from: snapshot,\n                to: final_snapshot\n            }\n        );\n    }\n\n    #[test]\n    fn superblock_success() {\n        // insert two apcs with different length and priority\n        // the superblock (longer block) apc has higher priority and succeeds so it should be picked\n        let low_priority = a(3).p(1);\n        let high_priority = a(4).p(2);\n        let mut vm = TestVm::new([low_priority, high_priority]);\n        let low_priority_id = 0;\n        let high_priority_id = 1;\n        let snapshot: TestSnapshot = s(0);\n        // The final snapshot is the one at the end of the high priority apc, since it succeeds\n        let final_snapshot = s(4);\n        vm.try_add_candidate(low_priority_id).unwrap();\n        vm.try_add_candidate(high_priority_id).unwrap();\n        assert!(vm.incr().is_empty());\n        assert!(vm.incr().is_empty());\n        // Both are still running\n        assert_eq!(vm.count_done(), 0);\n        assert!(vm.incr().is_empty());\n        // The first apc is done\n        assert_eq!(vm.count_done(), 1);\n        let output = vm.incr();\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id: high_priority_id,\n                from: snapshot,\n                to: final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn superblock_failure() {\n        // insert two apcs with different length and priority\n        // the superblock (longer block) apc has higher priority but fails the branching condition, so the low priority apc should be picked\n        let low_priority = a(3).p(1);\n        let failing_constraints =\n            OptimisticConstraints::from_constraints(vec![OptimisticConstraint {\n                left: OptimisticExpression::Literal(OptimisticLiteral {\n                    instr_idx: 3,\n                    val: crate::execution::LocalOptimisticLiteral::Pc,\n                }),\n                right: OptimisticExpression::Number(1234),\n            }]);\n        let high_priority = a(4).p(2).c(failing_constraints);\n        let mut vm = TestVm::new([low_priority, high_priority]);\n        let low_priority_id = 0;\n        let high_priority_id = 1;\n        let snapshot: TestSnapshot = s(0);\n        // The final snapshot is the one at the end of the low priority apc, as the other one failed\n        let final_snapshot = s(3);\n        vm.try_add_candidate(low_priority_id).unwrap();\n        // The high priority candidate requires a jump to pc 1234 for the last cycle. This means the pc at step 3 (before instruction 4) should be 1234.\n        vm.try_add_candidate(high_priority_id).unwrap();\n        assert!(vm.incr().is_empty());\n        assert!(vm.incr().is_empty());\n        // Both apcs are still running\n        assert_eq!(vm.count_done(), 0);\n        // In this check, the low priority apc completes and the high priority one fails (as the jump did not happen)\n        let output = vm.incr();\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id: low_priority_id,\n                from: snapshot,\n                to: final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn superblock_failure_keeps_non_overlapping_calls() {\n        // A and B are separate blocks; ABC spans A+B+C but fails within C.\n        // When ABC fails, A and B should both be emitted since their ranges do not overlap.\n        let a_len = 2;\n        let b_len = 2;\n        let c_len = 2;\n        let abc_len = a_len + b_len + c_len;\n        let fail_instr_idx = a_len + b_len + 1;\n        let failing_constraints =\n            OptimisticConstraints::from_constraints(vec![OptimisticConstraint {\n                left: OptimisticExpression::Literal(OptimisticLiteral {\n                    instr_idx: fail_instr_idx,\n                    val: crate::execution::LocalOptimisticLiteral::Pc,\n                }),\n                right: OptimisticExpression::Number(999),\n            }]);\n        let apc_a = a(a_len).p(1);\n        let apc_b = a(b_len).p(1);\n        let apc_abc = a(abc_len).p(2).c(failing_constraints);\n        let mut vm = TestVm::new([apc_a, apc_b, apc_abc]);\n\n        let apc_a_id = 0;\n        let apc_b_id = 1;\n        let apc_abc_id = 2;\n\n        vm.try_add_candidate(apc_a_id).unwrap();\n        vm.try_add_candidate(apc_abc_id).unwrap();\n\n        for _ in 0..a_len {\n            assert!(vm.incr().is_empty());\n        }\n\n        vm.try_add_candidate(apc_b_id).unwrap();\n\n        for _ in 0..b_len {\n            assert!(vm.incr().is_empty());\n        }\n\n        let output = vm.incr();\n        assert_eq!(\n            output,\n            vec![\n                ApcCall {\n                    apc_id: apc_a_id,\n                    from: s(0),\n                    to: s(2),\n                },\n                ApcCall {\n                    apc_id: apc_b_id,\n                    from: s(2),\n                    to: s(4),\n                },\n            ]\n        );\n    }\n\n    #[test]\n    fn two_candidates_different_start() {\n        // define two apcs with different priorities\n        let low_priority = a(3).p(1);\n        let high_priority = a(3).p(2);\n        let mut vm = TestVm::new([low_priority, high_priority]);\n        let low_priority_id = 0;\n        let high_priority_id = 1;\n        let high_priority_snapshot = s(1);\n        let final_snapshot = s(4);\n        // insert the low priority apc\n        vm.try_add_candidate(low_priority_id).unwrap();\n        assert!(vm.incr().is_empty());\n        // candidate is still running\n        assert_eq!(vm.count_in_progress(), 1);\n        // insert the high priority apc\n        vm.try_add_candidate(high_priority_id).unwrap();\n        assert!(vm.incr().is_empty());\n        // Both are still running\n        assert_eq!(vm.count_in_progress(), 2);\n        assert!(vm.incr().is_empty());\n        // The first apc is done\n        assert_eq!(vm.count_done(), 1);\n        let output = vm.incr();\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id: high_priority_id,\n                from: high_priority_snapshot,\n                to: final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn abort_in_progress_returns_shorter_candidate() {\n        let short_low_priority = a(2).p(1);\n        let long_high_priority = a(4).p(2);\n        let mut vm = TestVm::new([short_low_priority, long_high_priority]);\n        let short_low_priority_id = 0;\n        let short_snapshot = s(0);\n        let short_final_snapshot = s(2);\n\n        vm.try_add_candidate(short_low_priority_id).unwrap();\n        vm.try_add_candidate(1).unwrap();\n\n        for _ in 0..2 {\n            assert!(vm.incr().is_empty());\n        }\n\n        assert_eq!(vm.count_done(), 1);\n        assert_eq!(vm.count_in_progress(), 1);\n\n        vm.candidates.abort_in_progress();\n\n        let extracted = vm.candidates.extract_calls();\n        assert_eq!(extracted.len(), 1);\n        assert_eq!(\n            extracted[0],\n            ApcCall {\n                apc_id: short_low_priority_id,\n                from: short_snapshot,\n                to: short_final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn abort_in_progress_after_segment_end_picks_shorter_candidate() {\n        let short_low_priority = a(2).p(1);\n        let long_high_priority = a(4).p(2);\n        let mut vm = TestVm::new([short_low_priority, long_high_priority]);\n        let short_low_priority_id = 0;\n        let short_snapshot = s(0);\n        let short_final_snapshot = s(2);\n\n        vm.try_add_candidate(short_low_priority_id).unwrap();\n        vm.try_add_candidate(1).unwrap();\n\n        for _ in 0..2 {\n            assert!(vm.incr().is_empty());\n        }\n\n        // The short one is done, the long one is still in progress\n        assert_eq!(vm.count_done(), 1);\n        assert_eq!(vm.count_in_progress(), 1);\n\n        // Segment ends, abort the one in progress\n        vm.candidates.abort_in_progress();\n\n        // The extracted one should be the short one\n        let extracted = vm.candidates.extract_calls();\n        assert_eq!(extracted.len(), 1);\n        assert_eq!(\n            extracted[0],\n            ApcCall {\n                apc_id: short_low_priority_id,\n                from: short_snapshot,\n                to: short_final_snapshot,\n            }\n        );\n    }\n\n    #[test]\n    fn jump_back_and_readd_candidate_does_not_overlap() {\n        // We have a program like\n        // 0: NOOP\n        // 1: JUMP 0\n\n        // We create an apc for the range, and check that calls do not overlap: the first call finishes before the second call starts\n\n        let mut vm = TestVm::new([a(2).p(1)]);\n        let apc_id = 0;\n\n        // pc = 0, add the candidate\n        vm.try_add_candidate(apc_id).unwrap();\n        assert_eq!(vm.count_in_progress(), 1);\n\n        assert!(vm.incr().is_empty());\n        // pc = 1, candidate still in progress\n        let output = vm.jump(0);\n        // pc = 0, first candidate should be done\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id,\n                from: s(0),\n                to: s(2),\n            }\n        );\n\n        // done with the first call, haven't started the second call, clean state.\n        assert_eq!(vm.count_in_progress(), 0);\n        assert_eq!(vm.count_done(), 0);\n\n        // start over\n        vm.try_add_candidate(apc_id).unwrap();\n        assert_eq!(vm.count_in_progress(), 1);\n\n        assert!(vm.incr().is_empty());\n        let output = vm.jump(0);\n        assert_eq!(output.len(), 1);\n        assert_eq!(\n            output[0],\n            ApcCall {\n                apc_id,\n                from: s(2),\n                to: s(4),\n            }\n        );\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/execution/evaluator.rs",
    "content": "use std::collections::HashMap;\n\nuse itertools::Itertools;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    execution::{\n        ast::{\n            LocalOptimisticLiteral, OptimisticConstraint, OptimisticExpression, OptimisticLiteral,\n        },\n        ExecutionState, Fetch, LocalFetch,\n    },\n    powdr::UniqueReferences,\n};\n\n/// A collection of optimistic constraints over the intermediate execution states of a block, to be accessed in chronological order\n#[derive(Debug, Serialize, Deserialize, deepsize2::DeepSizeOf, PartialEq, Clone, Default)]\npub struct OptimisticConstraints<A, V> {\n    /// For each step, the execution values we need to remember for future constraints, excluding this step\n    fetches_by_step: HashMap<usize, Vec<LocalFetch<A>>>,\n    /// For each step, the constraints that must be satisfied\n    constraints_to_check_by_step: HashMap<usize, Vec<OptimisticConstraint<A, V>>>,\n}\n\nimpl<A, V> OptimisticConstraints<A, V> {\n    pub fn empty() -> Self {\n        Self {\n            fetches_by_step: Default::default(),\n            constraints_to_check_by_step: Default::default(),\n        }\n    }\n}\n\nimpl<A: std::hash::Hash + PartialEq + Eq + Copy, V> OptimisticConstraints<A, V> {\n    pub fn from_constraints(constraints: Vec<OptimisticConstraint<A, V>>) -> Self {\n        // Extract each constraint together with the literals it references and the step\n        // at which the constraint becomes evaluable (i.e. when all referenced literals\n        // are available).\n        let constraint_data = constraints\n            .into_iter()\n            .map(|constraint| {\n                let references: Vec<_> = constraint.unique_references().collect();\n                let first_evaluable_step = references\n                    .iter()\n                    .map(|r| r.instr_idx)\n                    .max()\n                    .unwrap_or_default();\n                (first_evaluable_step, references, constraint)\n            })\n            .collect_vec();\n\n        // For every literal that is referenced in a *future* step, schedule a fetch at\n        // the step in which it first appears so it can be cached for later comparisons.\n        let fetches_by_step = constraint_data\n            .iter()\n            .flat_map(|(constraint_step, references, _)| {\n                references\n                    .iter()\n                    .filter(move |literal| *constraint_step > literal.instr_idx)\n                    .map(|literal| (literal.instr_idx, literal.val.into()))\n            })\n            .into_group_map()\n            .into_iter()\n            .sorted_by_key(|(instruction_index, _)| *instruction_index)\n            .collect();\n\n        // The constraint itself can only be checked once all its literals exist.\n        let constraints_to_check_by_step = constraint_data\n            .into_iter()\n            .map(|(first_evaluable_step, _, constraint)| (first_evaluable_step, constraint))\n            .into_group_map()\n            .into_iter()\n            .sorted_by_key(|(instruction_index, _)| *instruction_index)\n            .collect();\n\n        Self {\n            fetches_by_step,\n            constraints_to_check_by_step,\n        }\n    }\n}\n\n/// An evaluator over a set of constraints\n/// The expected use is to\n/// - store the APC's set of optimistic constraints in the program\n/// - when an APC is executed, create an instance of this evaluator over the APC's optimistic constraints\n/// - as we go through the original instructions, call `OptimisticConstraintEvaluator::try_next_execution_step`\n/// - if a constraint fails, stop checking the constraints\n#[derive(Debug)]\npub struct OptimisticConstraintEvaluator<A, V> {\n    /// The current instruction index in the execution\n    instruction_index: usize,\n    /// The values from previous intermediate states which we still need\n    memory: HashMap<Fetch<A>, V>,\n}\n\n#[derive(Debug)]\npub struct OptimisticConstraintFailed;\n\nimpl<A, V> Default for OptimisticConstraintEvaluator<A, V> {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl<A, V> OptimisticConstraintEvaluator<A, V> {\n    pub fn new() -> Self {\n        Self {\n            instruction_index: 0,\n            memory: HashMap::default(),\n        }\n    }\n\n    pub fn instruction_index(&self) -> usize {\n        self.instruction_index\n    }\n\n    /// Check all constraints that can be checked at this stage, returning a new instance iff they are verified\n    pub fn try_next_execution_step<E>(\n        &mut self,\n        state: &E,\n        optimistic_constraints: &OptimisticConstraints<A, V>,\n    ) -> Result<(), OptimisticConstraintFailed>\n    where\n        E: ExecutionState<RegisterAddress = A, Value = V>,\n        A: std::hash::Hash + PartialEq + Eq + Copy,\n        V: Copy,\n    {\n        // Get the constraints that can first be checked at this step\n        let constraints = optimistic_constraints\n            .constraints_to_check_by_step\n            .get(&self.instruction_index);\n\n        if let Some(constraints) = constraints {\n            // Check the constraints based on the current state and the memory of the previous states\n            let evaluator =\n                StepOptimisticConstraintEvaluator::new(self.instruction_index, state, &self.memory);\n            if !constraints\n                .iter()\n                .all(|constraint| evaluator.evaluate_constraint(constraint))\n            {\n                return Err(OptimisticConstraintFailed);\n            }\n        }\n\n        // Get the values we need to store from the state to check constraints in the future\n        let fetches = optimistic_constraints\n            .fetches_by_step\n            .get(&self.instruction_index);\n\n        if let Some(fetches) = fetches {\n            // fetch the values them in memory\n            for fetch in fetches {\n                let value = fetch.get(state);\n                let key = Fetch {\n                    instr_idx: self.instruction_index,\n                    val: *fetch,\n                };\n                self.memory.insert(key, value);\n            }\n        }\n\n        self.instruction_index += 1;\n\n        Ok(())\n    }\n}\n\n/// A constraint evaluator using the current execution state as well as the memory of previous states\nstruct StepOptimisticConstraintEvaluator<'a, E: ExecutionState> {\n    step: usize,\n    state: &'a E,\n    memory: &'a HashMap<Fetch<E::RegisterAddress>, E::Value>,\n}\nimpl<'a, E: ExecutionState> StepOptimisticConstraintEvaluator<'a, E> {\n    fn new(\n        step: usize,\n        state: &'a E,\n        memory: &'a HashMap<\n            Fetch<<E as ExecutionState>::RegisterAddress>,\n            <E as ExecutionState>::Value,\n        >,\n    ) -> Self {\n        Self {\n            step,\n            memory,\n            state,\n        }\n    }\n}\n\nimpl<'a, E: ExecutionState> StepOptimisticConstraintEvaluator<'a, E> {\n    fn evaluate_constraint(&self, c: &OptimisticConstraint<E::RegisterAddress, E::Value>) -> bool {\n        self.evaluate_expression(&c.left) == self.evaluate_expression(&c.right)\n    }\n\n    fn evaluate_expression(\n        &self,\n        e: &OptimisticExpression<E::RegisterAddress, E::Value>,\n    ) -> E::Value {\n        match e {\n            OptimisticExpression::Number(v) => *v,\n            OptimisticExpression::Literal(optimistic_literal) => {\n                self.evaluate_literal(optimistic_literal)\n            }\n        }\n    }\n\n    fn evaluate_literal(&self, l: &OptimisticLiteral<E::RegisterAddress>) -> E::Value {\n        // By construction, the literals involved should only be from past states or the current state\n        debug_assert!(l.instr_idx <= self.step);\n        let fetch_value = self.fetch(&(*l).into());\n        match l.val {\n            LocalOptimisticLiteral::RegisterLimb(_, limb_index) => {\n                E::value_limb(fetch_value, limb_index)\n            }\n            LocalOptimisticLiteral::Pc => fetch_value,\n        }\n    }\n\n    fn fetch(&self, f: &Fetch<E::RegisterAddress>) -> E::Value {\n        if f.instr_idx == self.step {\n            // Hit the state for the current step\n            f.val.get(self.state)\n        } else {\n            // Hit the memory for the previous steps\n            self.memory[f]\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    struct TestExecutionState<const LIMB_WIDTH: usize> {\n        mem: [u8; 2],\n        pc: u8,\n    }\n\n    impl<const LIMB_WIDTH: usize> ExecutionState for TestExecutionState<LIMB_WIDTH> {\n        type RegisterAddress = u8;\n\n        type Value = u8;\n\n        fn pc(&self) -> Self::Value {\n            self.pc\n        }\n\n        fn reg(&self, address: &Self::RegisterAddress) -> Self::Value {\n            self.mem[*address as usize]\n        }\n\n        fn value_limb(value: Self::Value, limb_index: usize) -> Self::Value {\n            value >> (limb_index * LIMB_WIDTH) & (!0u8 >> (8 - LIMB_WIDTH))\n        }\n\n        fn global_clk(&self) -> usize {\n            todo!()\n        }\n    }\n\n    // An execution state with a single limb of 8 bits\n    type SingleLimbExecutionState = TestExecutionState<8>;\n\n    fn literal(instr_idx: usize, val: LocalOptimisticLiteral<u8>) -> OptimisticLiteral<u8> {\n        OptimisticLiteral { instr_idx, val }\n    }\n\n    fn literal_expr(\n        instr_idx: usize,\n        val: LocalOptimisticLiteral<u8>,\n    ) -> OptimisticExpression<u8, u8> {\n        OptimisticExpression::Literal(literal(instr_idx, val))\n    }\n\n    // This is used in the cases where the value has a single limb, so we access the first limb\n    fn mem(instr_idx: usize, addr: u8) -> OptimisticExpression<u8, u8> {\n        mem_limb(instr_idx, addr, 0)\n    }\n\n    fn mem_limb(instr_idx: usize, addr: u8, limb_index: usize) -> OptimisticExpression<u8, u8> {\n        literal_expr(\n            instr_idx,\n            LocalOptimisticLiteral::RegisterLimb(addr, limb_index),\n        )\n    }\n\n    fn pc(instr_idx: usize) -> OptimisticExpression<u8, u8> {\n        literal_expr(instr_idx, LocalOptimisticLiteral::Pc)\n    }\n\n    fn value(value: u8) -> OptimisticExpression<u8, u8> {\n        OptimisticExpression::Number(value)\n    }\n\n    fn eq(\n        left: OptimisticExpression<u8, u8>,\n        right: OptimisticExpression<u8, u8>,\n    ) -> OptimisticConstraint<u8, u8> {\n        OptimisticConstraint { left, right }\n    }\n\n    fn equality_constraints() -> OptimisticConstraints<u8, u8> {\n        OptimisticConstraints::from_constraints(vec![\n            eq(mem(0, 0), mem(0, 1)),\n            eq(mem(1, 0), mem(1, 1)),\n            eq(mem(2, 0), mem(2, 1)),\n        ])\n    }\n\n    fn cross_step_memory_constraint() -> OptimisticConstraints<u8, u8> {\n        OptimisticConstraints::from_constraints(vec![eq(mem(0, 0), mem(1, 1))])\n    }\n\n    fn cross_step_pc_constraint() -> OptimisticConstraints<u8, u8> {\n        OptimisticConstraints::from_constraints(vec![eq(pc(0), pc(1))])\n    }\n\n    fn initial_to_final_constraint(final_instr_idx: usize) -> OptimisticConstraints<u8, u8> {\n        OptimisticConstraints::from_constraints(vec![eq(mem(0, 0), mem(final_instr_idx, 1))])\n    }\n\n    #[test]\n    fn constraints_succeed_when_all_states_match() {\n        let evaluator = OptimisticConstraintEvaluator::new();\n\n        let states = [\n            SingleLimbExecutionState { mem: [0, 0], pc: 0 },\n            SingleLimbExecutionState { mem: [1, 1], pc: 1 },\n            SingleLimbExecutionState { mem: [2, 2], pc: 2 },\n        ];\n\n        let res = states.iter().try_fold(evaluator, |mut evaluator, state| {\n            evaluator\n                .try_next_execution_step(state, &equality_constraints())\n                .map(|_| evaluator)\n        });\n\n        assert!(res.is_ok());\n    }\n\n    #[test]\n    fn checks_equality_constraints() {\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let states = [\n            (SingleLimbExecutionState { mem: [0, 0], pc: 0 }, true),\n            (SingleLimbExecutionState { mem: [1, 1], pc: 1 }, true),\n            (SingleLimbExecutionState { mem: [2, 0], pc: 2 }, false),\n        ];\n\n        for (state, should_succeed) in &states {\n            assert_eq!(\n                evaluator\n                    .try_next_execution_step(state, &equality_constraints())\n                    .is_ok(),\n                *should_succeed\n            );\n        }\n    }\n\n    #[test]\n    fn reuses_values_from_previous_steps() {\n        let constraints = cross_step_memory_constraint();\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let first_state = SingleLimbExecutionState { mem: [5, 0], pc: 0 };\n        evaluator\n            .try_next_execution_step(&first_state, &constraints)\n            .unwrap();\n\n        let second_state = SingleLimbExecutionState { mem: [0, 5], pc: 1 };\n\n        assert!(evaluator\n            .try_next_execution_step(&second_state, &constraints)\n            .is_ok());\n    }\n\n    #[test]\n    fn detects_mismatch_for_stored_values() {\n        let constraints = cross_step_memory_constraint();\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let first_state = SingleLimbExecutionState { mem: [9, 0], pc: 0 };\n        evaluator\n            .try_next_execution_step(&first_state, &constraints)\n            .unwrap();\n\n        let second_state = SingleLimbExecutionState { mem: [0, 3], pc: 1 };\n\n        assert!(evaluator\n            .try_next_execution_step(&second_state, &constraints)\n            .is_err());\n    }\n\n    #[test]\n    fn compares_program_counter_across_steps() {\n        let constraints = cross_step_pc_constraint();\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let first_state = SingleLimbExecutionState { mem: [0; 2], pc: 7 };\n        evaluator\n            .try_next_execution_step(&first_state, &constraints)\n            .unwrap();\n\n        let second_state = SingleLimbExecutionState { mem: [0; 2], pc: 7 };\n        assert!(evaluator\n            .try_next_execution_step(&second_state, &constraints)\n            .is_ok());\n\n        let mut failing_evaluator = OptimisticConstraintEvaluator::new();\n        failing_evaluator\n            .try_next_execution_step(&first_state, &constraints)\n            .unwrap();\n\n        let mismatched_pc = SingleLimbExecutionState { mem: [0; 2], pc: 8 };\n        assert!(failing_evaluator\n            .try_next_execution_step(&mismatched_pc, &constraints)\n            .is_err());\n    }\n\n    #[test]\n    fn links_initial_and_final_state() {\n        let final_step = 2;\n        let constraints = initial_to_final_constraint(final_step);\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let initial_state = SingleLimbExecutionState {\n            mem: [11, 0],\n            pc: 0,\n        };\n        evaluator\n            .try_next_execution_step(&initial_state, &constraints)\n            .unwrap();\n\n        let middle_state = SingleLimbExecutionState { mem: [0; 2], pc: 1 };\n        evaluator\n            .try_next_execution_step(&middle_state, &constraints)\n            .unwrap();\n\n        let final_state = SingleLimbExecutionState {\n            mem: [0, 11],\n            pc: 2,\n        };\n        assert!(evaluator\n            .try_next_execution_step(&final_state, &constraints)\n            .is_ok());\n\n        let mut failing_evaluator = OptimisticConstraintEvaluator::new();\n        failing_evaluator\n            .try_next_execution_step(&initial_state, &constraints)\n            .unwrap();\n        failing_evaluator\n            .try_next_execution_step(&middle_state, &constraints)\n            .unwrap();\n\n        let mismatched_final_state = SingleLimbExecutionState { mem: [0, 3], pc: 2 };\n        assert!(failing_evaluator\n            .try_next_execution_step(&mismatched_final_state, &constraints)\n            .is_err());\n    }\n\n    #[test]\n    fn compares_memory_to_literal_value() {\n        let constraints = OptimisticConstraints::from_constraints(vec![eq(mem(0, 0), value(99))]);\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        let passing_state = SingleLimbExecutionState {\n            mem: [99, 0],\n            pc: 0,\n        };\n        assert!(evaluator\n            .try_next_execution_step(&passing_state, &constraints)\n            .is_ok());\n\n        let failing_constraints =\n            OptimisticConstraints::from_constraints(vec![eq(mem(0, 0), value(10))]);\n        let mut failing_evaluator = OptimisticConstraintEvaluator::new();\n        let failing_state = SingleLimbExecutionState {\n            mem: [12, 0],\n            pc: 0,\n        };\n        assert!(failing_evaluator\n            .try_next_execution_step(&failing_state, &failing_constraints)\n            .is_err());\n    }\n\n    #[test]\n    fn accesses_register_limbs() {\n        let constraints = OptimisticConstraints::from_constraints(vec![\n            eq(mem_limb(0, 0, 0), value(0b10)),\n            eq(mem_limb(0, 0, 1), value(0b01)),\n            eq(mem_limb(0, 0, 2), value(0b11)),\n            eq(mem_limb(0, 0, 3), value(0b10)),\n        ]);\n        let mut evaluator = OptimisticConstraintEvaluator::new();\n\n        // We use an execution state where each limb is two bits, so 4 limbs in total\n        let state = TestExecutionState::<2> {\n            mem: [0b1011_0110, 0],\n            pc: 0,\n        };\n\n        assert!(evaluator\n            .try_next_execution_step(&state, &constraints)\n            .is_ok());\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/execution/mod.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nmod ast;\nmod candidates;\nmod evaluator;\n\npub use ast::*;\npub use candidates::{Apc, ApcCall, ApcCandidates};\npub use evaluator::{OptimisticConstraintEvaluator, OptimisticConstraints};\npub trait ExecutionState {\n    type RegisterAddress: PartialEq\n        + Eq\n        + std::hash::Hash\n        + Clone\n        + Copy\n        + std::fmt::Debug\n        + Serialize\n        + for<'a> Deserialize<'a>\n        + Send\n        + Sync;\n    type Value: PartialEq\n        + TryFrom<u64>\n        + Eq\n        + std::fmt::Debug\n        + Serialize\n        + for<'a> Deserialize<'a>\n        + Clone\n        + Copy\n        + Send\n        + Sync;\n\n    /// Return the pc at this point\n    fn pc(&self) -> Self::Value;\n\n    fn value_limb(value: Self::Value, limb_index: usize) -> Self::Value;\n\n    /// Read a register at this point\n    fn reg(&self, address: &Self::RegisterAddress) -> Self::Value;\n\n    /// Return the value of a the clock. The returned value must be strictly increasing within this execution.\n    fn global_clk(&self) -> usize;\n}\n"
  },
  {
    "path": "autoprecompiles/src/execution_profile.rs",
    "content": "use crate::adapter::Adapter;\nuse crate::blocks::Program;\nuse std::collections::HashMap;\nuse std::sync::Arc;\nuse std::sync::Mutex;\nuse tracing::dispatcher::Dispatch;\nuse tracing::field::Field as TracingField;\nuse tracing::{Event, Level, Subscriber};\nuse tracing_subscriber::{\n    layer::Context,\n    prelude::*,\n    registry::{LookupSpan, Registry},\n    Layer,\n};\n\n#[derive(Clone)]\n/// Program execution information for PGO\npub struct ExecutionProfile {\n    /// execution count of each pc\n    pub pc_count: HashMap<u64, u32>,\n    /// list of pcs executed in order\n    pub pc_list: Vec<u64>,\n}\n\n/// Produces information about the program's execution for PGO.\n/// Used in Pgo::Cell and Pgo::Instruction to help rank basic blocks to create APCs for.\npub fn execution_profile<A: Adapter>(\n    program: &A::Program,\n    execute_fn: impl FnOnce(),\n) -> ExecutionProfile {\n    // in memory collector storage\n    let collector = PgoCollector::new();\n\n    // build subscriber\n    let subscriber = Registry::default().with(collector.clone());\n\n    // dispatch constructs a local subscriber at trace level that is invoked during data collection but doesn't override the global one at info level\n    let dispatch = Dispatch::new(subscriber);\n    tracing::dispatcher::with_default(&dispatch, execute_fn);\n\n    let pc_list = collector.take_pc_list();\n\n    // Extract the collected data\n    let pc_count = pc_list.iter().fold(HashMap::new(), |mut counts, pc| {\n        *counts.entry(*pc).or_insert(0) += 1;\n        counts\n    });\n\n    // the smallest pc is the same as the base_pc if there's no stdin\n    let pc_min = pc_count.keys().min().unwrap();\n    tracing::debug!(\"pc_min: {}; base_pc: {}\", pc_min, program.base_pc());\n\n    // print the total and by pc counts\n    tracing::debug!(\"Pgo captured {} pc's\", pc_count.len());\n\n    if tracing::enabled!(Level::TRACE) {\n        // print pc_index map in descending order of pc_index count\n        let mut pc_index_count_sorted: Vec<_> = pc_count.iter().collect();\n        pc_index_count_sorted.sort_by(|a, b| b.1.cmp(a.1));\n        pc_index_count_sorted.iter().for_each(|(pc, count)| {\n            tracing::trace!(\"pc_index {}: {}\", pc, count);\n        });\n    }\n\n    ExecutionProfile { pc_count, pc_list }\n}\n\n// holds basic type fields of execution objects captured in trace by subscriber\n#[derive(Default)]\nstruct PgoData {\n    pc: Option<u64>,\n}\n\nimpl tracing::field::Visit for PgoData {\n    // when we receive a u64 field, they are parsed into fields of the pgo data\n    fn record_u64(&mut self, field: &tracing::field::Field, value: u64) {\n        if field.name() == \"pc\" {\n            self.pc = Some(value);\n        }\n    }\n\n    // required for implementation, but in practice we will only receive u64 fields\n    // the fields we receive are determined by the instruction trace print out of our openvm fork during execution\n    fn record_debug(&mut self, _: &TracingField, _: &dyn std::fmt::Debug) {}\n}\n\n// A Layer that collects data we are interested in using for the pgo from the trace fields.\n#[derive(Clone)]\nstruct PgoCollector {\n    pc_list: Arc<Mutex<Vec<u64>>>,\n}\n\nimpl PgoCollector {\n    fn new() -> Self {\n        Self {\n            pc_list: Arc::new(Mutex::new(Vec::new())),\n        }\n    }\n\n    fn increment(&self, pc: u64) {\n        self.pc_list.lock().unwrap().push(pc);\n    }\n\n    fn take_pc_list(&self) -> Vec<u64> {\n        std::mem::take(&mut self.pc_list.lock().unwrap())\n    }\n}\n\nimpl<S> Layer<S> for PgoCollector\nwhere\n    S: Subscriber + for<'a> LookupSpan<'a>,\n{\n    fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) {\n        // build a visitor to parse and hold trace fields we are interested in\n        let mut visitor = PgoData::default();\n        event.record(&mut visitor);\n\n        // because our subscriber is at the trace level, for trace print outs that don't match PgoData,\n        // the visitor can't parse them, and these cases are filtered out automatically\n        if let Some(pc) = visitor.pc {\n            self.increment(pc);\n        }\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/export.rs",
    "content": "use std::{\n    fmt::Display,\n    io::{BufWriter, Write},\n    path::PathBuf,\n};\n\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::ConstraintSystem;\nuse powdr_number::FieldElement;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    adapter::{Adapter, AdapterApcOverPowdrField, AdapterOptimisticConstraints},\n    blocks::{Instruction, PcStep, SuperBlock},\n    bus_map::BusMap,\n    execution::ExecutionState,\n    expression::AlgebraicReference,\n    symbolic_machine::constraint_system_to_symbolic_machine,\n    Apc, ColumnAllocator, SymbolicMachine,\n};\n\n/// Configuration for exporting the state of the autoprecompile\n/// generation and optimization as json at different stages.\n#[derive(Default)]\npub struct ExportOptions {\n    pub path: Option<PathBuf>,\n    pub level: ExportLevel,\n    sequence_number: usize,\n    substituted_variables: Vec<String>,\n}\n\n#[derive(Default)]\npub enum ExportLevel {\n    /// Export the unoptimized and optimized autoprecompile.\n    #[default]\n    OnlyAPC,\n    /// In addition to the above, also export the state at each\n    /// optimization loop iteration.\n    APCAndOptimizerLoop,\n    /// In addition to the above, also export the state at each\n    /// optimization step.\n    APCAndOptimizerSteps,\n}\n\nimpl ExportOptions {\n    /// Creates a new export options instance. Does not export anything unless\n    /// a path is given. `path` is a path to a file name prefix.\n    /// During export, a sequence number and an extension will be appended.\n    pub fn new(path: Option<PathBuf>, start_pcs: &[u64], level: ExportLevel) -> Self {\n        ExportOptions {\n            path: path.map(|p| p.join(format!(\"apc_candidate_{}\", start_pcs.iter().join(\"_\")))),\n            level,\n            sequence_number: 0,\n            substituted_variables: Vec::new(),\n        }\n    }\n    /// Constructs export options from environment variables.\n    pub fn from_env_vars(\n        export_path: Option<String>,\n        export_level: Option<String>,\n        start_pcs: &[u64],\n    ) -> Self {\n        let path = export_path.map(PathBuf::from);\n        let level = match export_level.as_deref() {\n            Some(\"1\") => ExportLevel::OnlyAPC,\n            Some(\"2\") => ExportLevel::APCAndOptimizerLoop,\n            Some(\"3\") => ExportLevel::APCAndOptimizerSteps,\n            _ => ExportLevel::OnlyAPC,\n        };\n        ExportOptions::new(path, start_pcs, level)\n    }\n\n    pub fn export_requested(&self) -> bool {\n        self.path.is_some()\n    }\n\n    pub fn export_apc<A: Adapter>(\n        &mut self,\n        apc: &AdapterApcOverPowdrField<A>,\n        suffix: Option<&str>,\n        bus_map: &BusMap<A::CustomBusTypes>,\n    ) {\n        let apc = instructions_to_powdr_field::<A>(apc.clone());\n        let path = self.write_to_next_file(&ApcWithBusMap { apc: &apc, bus_map }, suffix);\n\n        // For debugging, also serialize a human-readable version of the final precompile\n        let rendered = apc.machine.render(bus_map);\n        let path = path.with_file_name(format!(\n            \"{}.txt\",\n            path.file_stem().unwrap().to_string_lossy()\n        ));\n        std::fs::write(path, rendered).unwrap();\n    }\n\n    pub fn export_apc_from_machine<A: Adapter>(\n        &mut self,\n        block: SuperBlock<A::Instruction>,\n        machine: SymbolicMachine<A::PowdrField>,\n        column_allocator: &ColumnAllocator,\n        bus_map: &BusMap<A::CustomBusTypes>,\n        suffix: Option<&str>,\n    ) {\n        assert!(self.export_requested());\n        let apc = Apc::new(\n            block,\n            machine,\n            AdapterOptimisticConstraints::<A>::empty(),\n            column_allocator,\n        );\n        self.export_apc::<A>(&apc, suffix, bus_map);\n    }\n\n    pub fn export_optimizer_outer(&mut self, data: &impl serde::Serialize, suffix: &str) {\n        match self.level {\n            ExportLevel::APCAndOptimizerLoop | ExportLevel::APCAndOptimizerSteps => {\n                self.write_to_next_file(data, Some(suffix));\n            }\n            _ => {}\n        }\n    }\n\n    pub fn export_optimizer_outer_constraint_system<T: FieldElement>(\n        &mut self,\n        constraint_system: &ConstraintSystem<T, AlgebraicReference>,\n        suffix: &str,\n    ) {\n        match self.level {\n            ExportLevel::APCAndOptimizerLoop | ExportLevel::APCAndOptimizerSteps => {\n                let machine = constraint_system_to_symbolic_machine(constraint_system.clone());\n                self.write_to_next_file(&machine, Some(suffix));\n            }\n            _ => {}\n        }\n    }\n\n    pub fn export_optimizer_inner(&mut self, data: &impl serde::Serialize, suffix: &str) {\n        if let ExportLevel::APCAndOptimizerSteps = self.level {\n            self.write_to_next_file(data, Some(suffix));\n        }\n    }\n\n    pub fn export_optimizer_inner_constraint_system<T, V>(\n        &mut self,\n        constraint_system: &ConstraintSystem<T, V>,\n        suffix: &str,\n    ) where\n        T: FieldElement,\n        V: Ord + Clone + serde::Serialize,\n    {\n        if let ExportLevel::APCAndOptimizerSteps = self.level {\n            self.write_to_next_file(&constraint_system, Some(suffix));\n        }\n    }\n\n    /// Registers a sequence of variables that have been substituted during optimization,\n    /// so that they can be exported together with the final export.\n    pub fn register_substituted_variables<Var, Expr>(\n        &mut self,\n        vars: impl IntoIterator<Item = (Var, Expr)>,\n    ) where\n        Var: serde::Serialize,\n        Expr: serde::Serialize,\n    {\n        if self.export_requested() {\n            self.substituted_variables.extend(\n                vars.into_iter()\n                    .map(|(v, e)| serde_json::to_string(&(v, e)).unwrap()),\n            );\n        }\n    }\n\n    /// Exports the registered substituted variables to a separate json file.\n    pub fn export_substituted_variables(&mut self) {\n        if self.export_requested() {\n            let path = self.path.clone().unwrap();\n            let file_stub = path.file_name().unwrap().to_string_lossy();\n            let path = path.with_file_name(format!(\"{file_stub}_substitutions.json\"));\n            let mut writer = create_full_path(&path);\n            write!(&mut writer, \"[{}]\", self.substituted_variables.join(\",\")).unwrap();\n            writer.flush().unwrap();\n        }\n    }\n\n    /// Path to the next file to export to. Uses an increasing sequence number\n    /// and also adds the `info` into the file name.\n    fn next_path(&mut self, info: Option<&str>) -> PathBuf {\n        let seq = self.sequence_number;\n        self.sequence_number += 1;\n        let path = self.path.clone().unwrap();\n        let file_stub = path.file_name().unwrap().to_string_lossy();\n        path.with_file_name(format!(\n            \"{file_stub}_{seq:03}{}.json\",\n            info.map(|i| format!(\"_{i}\")).unwrap_or_default(),\n        ))\n    }\n\n    fn write_to_next_file(&mut self, data: &impl serde::Serialize, info: Option<&str>) -> PathBuf {\n        let path = self.next_path(info);\n        self.write_to_file(data, path.clone());\n        path\n    }\n\n    fn write_to_file(&mut self, data: &impl serde::Serialize, path: PathBuf) {\n        let mut writer = create_full_path(&path);\n        serde_json::to_writer(&mut writer, data).unwrap();\n        writer.flush().unwrap();\n    }\n}\n\nfn create_full_path(path: &PathBuf) -> BufWriter<std::fs::File> {\n    if let Some(parent) = path.parent() {\n        std::fs::create_dir_all(parent).unwrap();\n    }\n    BufWriter::new(std::fs::File::create(path).unwrap())\n}\n\n/// Converts the APC to use an instruction type that stores field elements\n/// using a powdr type, so that we do not need to export in Montgomery form.\n#[allow(clippy::type_complexity)]\nfn instructions_to_powdr_field<A: Adapter>(\n    apc: AdapterApcOverPowdrField<A>,\n) -> Apc<\n    <A as Adapter>::PowdrField,\n    SimpleInstruction<<A as Adapter>::PowdrField>,\n    <<A as Adapter>::ExecutionState as ExecutionState>::RegisterAddress,\n    <<A as Adapter>::ExecutionState as ExecutionState>::Value,\n> {\n    let block = apc.block.map_instructions(|instr| {\n        SimpleInstruction(\n            // Extract the data by providing a dummy pc\n            // and removing it again.\n            instr\n                .pc_lookup_row(778)\n                .iter()\n                .skip(1)\n                .map(|x| A::from_field(x.clone()))\n                .collect(),\n        )\n    });\n\n    Apc {\n        block,\n        machine: apc.machine,\n        subs: apc.subs,\n        optimistic_constraints: apc.optimistic_constraints,\n    }\n}\n\n/// Dummy instruction type that is used to store the converted field type.\n#[derive(Serialize, Deserialize, Clone)]\npub struct SimpleInstruction<T>(Vec<T>);\n\nimpl<T: Display> Display for SimpleInstruction<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.0.iter().format(\", \"))\n    }\n}\n\nimpl<T: Display + Clone> Instruction<T> for SimpleInstruction<T> {\n    fn pc_lookup_row(&self, _pc: u64) -> Vec<T> {\n        self.0.clone()\n    }\n}\n\nimpl<T> PcStep for SimpleInstruction<T> {\n    fn pc_step() -> u32 {\n        unimplemented!()\n    }\n}\n\n#[derive(Serialize, Deserialize)]\npub struct ApcWithBusMap<Apc, BusMap> {\n    #[serde(flatten)]\n    pub apc: Apc,\n    pub bus_map: BusMap,\n}\n"
  },
  {
    "path": "autoprecompiles/src/expression.rs",
    "content": "//! In this module, we instantiate `powdr_expression::AlgebraicExpression` using a\n//! custom `AlgebraicReference` type.\nuse core::ops::{Add, Mul, Neg, Sub};\nuse powdr_number::ExpressionConvertible;\nuse serde::{Deserialize, Serialize};\nuse std::{collections::BTreeMap, hash::Hash, marker::PhantomData, sync::Arc};\n\nuse crate::symbolic_machine::{SymbolicBusInteraction, SymbolicConstraint};\n\npub type AlgebraicExpression<T> = powdr_expression::AlgebraicExpression<T, AlgebraicReference>;\n\n#[derive(Debug, Clone, Eq)]\npub struct AlgebraicReference {\n    /// Name of the polynomial - just for informational purposes.\n    /// Comparisons are based on the ID.\n    pub name: Arc<String>,\n    /// Identifier for a reference.\n    pub id: u64,\n}\n\nimpl std::fmt::Display for AlgebraicReference {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.name)\n    }\n}\n\nimpl PartialOrd for AlgebraicReference {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for AlgebraicReference {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.id.cmp(&other.id)\n    }\n}\n\nimpl PartialEq for AlgebraicReference {\n    fn eq(&self, other: &Self) -> bool {\n        self.id == other.id\n    }\n}\n\nimpl Hash for AlgebraicReference {\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        self.id.hash(state);\n    }\n}\n\nimpl Serialize for AlgebraicReference {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: serde::Serializer,\n    {\n        serializer.serialize_str(&format!(\"{}@{}\", self.name, self.id))\n    }\n}\n\nimpl<'de> Deserialize<'de> for AlgebraicReference {\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: serde::Deserializer<'de>,\n    {\n        let s = String::deserialize(deserializer)?;\n        let Some(separator_pos) = s.rfind('@') else {\n            return Err(serde::de::Error::custom(format!(\n                \"Invalid format for AlgebraicReference: {s}\",\n            )));\n        };\n        let name = Arc::new(s[..separator_pos].to_string());\n        let id: u64 = s[separator_pos + 1..].parse().map_err(|_| {\n            serde::de::Error::custom(format!(\n                \"Invalid ID in AlgebraicReference: {}\",\n                &s[separator_pos + 1..]\n            ))\n        })?;\n        Ok(AlgebraicReference { name, id })\n    }\n}\n\n/// Tries to convert a `powdr_expression::AlgebraicExpression<T, R>` into a\n/// `powdr_expression::AlgebraicExpression<T, AlgebraicReference>`.\npub fn try_convert<T, R: TryInto<AlgebraicReference>>(\n    expr: powdr_expression::AlgebraicExpression<T, R>,\n) -> Result<AlgebraicExpression<T>, R::Error> {\n    match expr {\n        powdr_expression::AlgebraicExpression::Reference(reference) => Ok(\n            powdr_expression::AlgebraicExpression::Reference(reference.try_into()?),\n        ),\n        powdr_expression::AlgebraicExpression::Number(n) => {\n            Ok(powdr_expression::AlgebraicExpression::Number(n))\n        }\n        powdr_expression::AlgebraicExpression::BinaryOperation(binary) => {\n            Ok(powdr_expression::AlgebraicExpression::BinaryOperation(\n                powdr_expression::AlgebraicBinaryOperation {\n                    left: Box::new(try_convert(*binary.left)?),\n                    op: binary.op,\n                    right: Box::new(try_convert(*binary.right)?),\n                },\n            ))\n        }\n        powdr_expression::AlgebraicExpression::UnaryOperation(unary) => {\n            Ok(powdr_expression::AlgebraicExpression::UnaryOperation(\n                powdr_expression::AlgebraicUnaryOperation {\n                    op: unary.op,\n                    expr: Box::new(try_convert(*unary.expr)?),\n                },\n            ))\n        }\n    }\n}\n\n/// Evaluate an `AlgebraicExpression` to a generic type, which for example can be an expression or a concrete value.\npub trait AlgebraicEvaluator<F, E>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n    E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n{\n    fn eval_const(&self, c: F) -> E;\n    fn eval_var(&self, algebraic_var: &AlgebraicReference) -> E;\n\n    fn eval_expr(&self, algebraic_expr: &AlgebraicExpression<F>) -> E {\n        algebraic_expr.to_expression(&|n| self.eval_const(*n), &|var| self.eval_var(var))\n    }\n    fn eval_bus_interaction<'a, 'b>(\n        &'a self,\n        bus_interaction: &'b SymbolicBusInteraction<F>,\n    ) -> ConcreteBusInteraction<E, impl Iterator<Item = E> + 'b>\n    where\n        'a: 'b,\n    {\n        let mult = self.eval_expr(&bus_interaction.mult);\n        let args = bus_interaction.args.iter().map(|arg| self.eval_expr(arg));\n        ConcreteBusInteraction {\n            id: bus_interaction.id,\n            mult,\n            args,\n        }\n    }\n\n    fn eval_constraint(&self, constraint: &SymbolicConstraint<F>) -> ConcreteConstraint<E> {\n        ConcreteConstraint {\n            expr: self.eval_expr(&constraint.expr),\n        }\n    }\n}\n\n/// Evaluates an `AlgebraicExpression` to a concrete value by subsituting the polynomial references by known values.\npub struct RowEvaluator<'a, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    pub row: &'a [F],\n}\n\nimpl<'a, F> RowEvaluator<'a, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    pub fn new(row: &'a [F]) -> Self {\n        Self { row }\n    }\n}\n\nimpl<F> AlgebraicEvaluator<F, F> for RowEvaluator<'_, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    fn eval_const(&self, c: F) -> F {\n        c\n    }\n\n    fn eval_var(&self, algebraic_var: &AlgebraicReference) -> F {\n        self.row[algebraic_var.id as usize]\n    }\n}\n\n/// Evaluates an `AlgebraicExpression` to a concrete value by subsituting the polynomial references by known values where known value is looked up via a column index mapping.\npub struct MappingRowEvaluator<'a, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    pub row: &'a [F],\n    pub witness_id_to_index: &'a BTreeMap<u64, usize>,\n}\n\nimpl<'a, F> MappingRowEvaluator<'a, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    pub fn new(row: &'a [F], witness_id_to_index: &'a BTreeMap<u64, usize>) -> Self {\n        Self {\n            row,\n            witness_id_to_index,\n        }\n    }\n}\n\nimpl<F> AlgebraicEvaluator<F, F> for MappingRowEvaluator<'_, F>\nwhere\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Copy,\n{\n    fn eval_const(&self, c: F) -> F {\n        c\n    }\n\n    fn eval_var(&self, algebraic_var: &AlgebraicReference) -> F {\n        let index = self.witness_id_to_index[&(algebraic_var.id)];\n        self.row[index]\n    }\n}\n\npub struct ConcreteBusInteraction<E, I> {\n    pub id: u64,\n    pub mult: E,\n    pub args: I,\n}\n\npub struct ConcreteConstraint<E> {\n    pub expr: E,\n}\n\n/// Evaluates by subsituting the polynomial references by known values, potentially changing the expression type in the process.\npub struct WitnessEvaluator<'a, V, F, E> {\n    pub witness: &'a BTreeMap<u64, V>,\n    _phantom: PhantomData<(F, E)>,\n}\n\nimpl<'a, V, F, E> WitnessEvaluator<'a, V, F, E> {\n    pub fn new(witness: &'a BTreeMap<u64, V>) -> Self {\n        Self {\n            witness,\n            _phantom: PhantomData,\n        }\n    }\n}\n\nimpl<V, F, E> AlgebraicEvaluator<F, E> for WitnessEvaluator<'_, V, F, E>\nwhere\n    V: Into<E> + Copy,\n    F: Add<Output = F> + Sub<Output = F> + Mul<Output = F> + Neg<Output = F> + Into<E> + Copy,\n    E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n{\n    fn eval_const(&self, c: F) -> E {\n        c.into()\n    }\n\n    fn eval_var(&self, algebraic_var: &AlgebraicReference) -> E {\n        (*self.witness.get(&algebraic_var.id).unwrap()).into()\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/expression_conversion.rs",
    "content": "use powdr_constraint_solver::{\n    grouped_expression::{GroupedExpression, GroupedExpressionComponent},\n    runtime_constant::RuntimeConstant,\n};\nuse powdr_expression::{AlgebraicExpression, AlgebraicUnaryOperation, AlgebraicUnaryOperator};\nuse powdr_number::{ExpressionConvertible, FieldElement};\n\n/// Turns an algebraic expression into a grouped expression,\n/// assuming all [`AlgebraicReference`]s are unknown variables.\npub fn algebraic_to_grouped_expression<T, V>(\n    expr: &AlgebraicExpression<T, V>,\n) -> GroupedExpression<T, V>\nwhere\n    T: FieldElement,\n    V: Ord + Clone,\n{\n    expr.to_expression(&|n| GroupedExpression::from_number(*n), &|reference| {\n        GroupedExpression::from_unknown_variable(reference.clone())\n    })\n}\n\n/// Turns a grouped expression back into an algebraic expression.\n/// Tries to simplify the expression wrt negation and constant factors\n/// to aid human readability.\npub fn grouped_expression_to_algebraic<T, V>(\n    expr: GroupedExpression<T, V>,\n) -> powdr_expression::AlgebraicExpression<T, V>\nwhere\n    T: FieldElement,\n    V: Ord + Clone,\n{\n    // Turn the expression into a list of to-be-summed items and try to\n    // simplify on the way.\n    let items = expr.into_summands().filter_map(|c| match c {\n        GroupedExpressionComponent::Quadratic(l, r) => {\n            let l = grouped_expression_to_algebraic(l);\n            let (l, l_negated) = extract_negation_if_possible(l);\n            let r = grouped_expression_to_algebraic(r);\n            let (r, r_negated) = extract_negation_if_possible(r);\n            Some(if l_negated == r_negated {\n                l * r\n            } else {\n                -(l * r)\n            })\n        }\n        GroupedExpressionComponent::Linear(v, c) => Some(if c.is_one() {\n            AlgebraicExpression::Reference(v.clone())\n        } else if (-c).is_one() {\n            -AlgebraicExpression::Reference(v.clone())\n        } else if c.is_in_lower_half() {\n            AlgebraicExpression::from(c) * AlgebraicExpression::Reference(v.clone())\n        } else {\n            -(AlgebraicExpression::from(-c) * AlgebraicExpression::Reference(v.clone()))\n        }),\n        GroupedExpressionComponent::Constant(constant) => {\n            (!constant.is_known_zero()).then(|| field_element_to_algebraic_expression(constant))\n        }\n    });\n\n    // Now order the items by negated and non-negated.\n    let mut positive = vec![];\n    let mut negated = vec![];\n    for item in items {\n        let (item, item_negated) = extract_negation_if_possible(item);\n        if item_negated {\n            negated.push(item);\n        } else {\n            positive.push(item);\n        }\n    }\n    let positive = positive.into_iter().reduce(|acc, item| acc + item);\n    let negated = negated.into_iter().reduce(|acc, item| acc + item);\n    match (positive, negated) {\n        (Some(positive), Some(negated)) => positive - negated,\n        (Some(positive), None) => positive,\n        (None, Some(negated)) => -negated,\n        (None, None) => AlgebraicExpression::from(T::zero()),\n    }\n}\n\nfn field_element_to_algebraic_expression<T: FieldElement, V>(v: T) -> AlgebraicExpression<T, V> {\n    if v.is_in_lower_half() {\n        AlgebraicExpression::from(v)\n    } else {\n        -AlgebraicExpression::from(-v)\n    }\n}\n\n/// If `e` is negated, returns the expression without negation and `true`,\n/// otherwise returns the un-modified expression and `false`.\nfn extract_negation_if_possible<T, V>(\n    e: AlgebraicExpression<T, V>,\n) -> (AlgebraicExpression<T, V>, bool) {\n    match e {\n        AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation {\n            op: AlgebraicUnaryOperator::Minus,\n            expr,\n        }) => (*expr, true),\n        _ => (e, false),\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/lib.rs",
    "content": "use crate::adapter::{Adapter, AdapterApc, AdapterVmConfig};\nuse crate::blocks::{PcStep, SuperBlock};\nuse crate::bus_map::{BusMap, BusType};\nuse crate::empirical_constraints::{ConstraintGenerator, EmpiricalConstraints};\nuse crate::evaluation::AirStats;\nuse crate::execution::OptimisticConstraints;\nuse crate::export::ExportOptions;\nuse crate::expression_conversion::algebraic_to_grouped_expression;\nuse crate::optimistic::algebraic_references::BlockCellAlgebraicReferenceMapper;\nuse crate::optimistic::config::optimistic_precompile_config;\nuse crate::optimistic::execution_constraint_generator::generate_execution_constraints;\nuse crate::optimistic::execution_literals::optimistic_literals;\nuse crate::symbolic_machine::{SymbolicConstraint, SymbolicMachine};\nuse crate::symbolic_machine_generator::convert_apc_field_type;\nuse adapter::AdapterOptimisticConstraint;\nuse execution::{\n    ExecutionState, LocalOptimisticLiteral, OptimisticConstraint, OptimisticExpression,\n    OptimisticLiteral,\n};\nuse expression::{AlgebraicExpression, AlgebraicReference};\nuse itertools::Itertools;\nuse powdr::UniqueReferences;\nuse powdr_constraint_solver::constraint_system::{ComputationMethod, DerivedVariable};\nuse powdr_expression::{\n    AlgebraicBinaryOperation, AlgebraicBinaryOperator, AlgebraicUnaryOperation,\n};\nuse serde::{Deserialize, Serialize};\nuse std::collections::BTreeSet;\nuse std::path::{Path, PathBuf};\nuse std::sync::Arc;\nuse symbolic_machine_generator::statements_to_symbolic_machine;\n\nuse powdr_number::FieldElement;\n\npub mod adapter;\npub mod blocks;\npub mod bus_map;\npub mod constraint_optimizer;\npub mod empirical_constraints;\npub mod evaluation;\npub mod execution_profile;\npub mod expression;\npub mod expression_conversion;\npub mod low_degree_bus_interaction_optimizer;\npub mod memory_optimizer;\npub mod optimizer;\npub mod pgo;\npub mod powdr;\npub mod range_constraint_optimizer;\nmod stats_logger;\npub mod symbolic_machine;\npub mod symbolic_machine_generator;\npub use pgo::{PgoConfig, PgoType};\npub use powdr_constraint_solver::inliner::DegreeBound;\npub mod equivalence_classes;\npub mod execution;\npub mod export;\npub mod optimistic;\npub mod trace_handler;\n\n#[derive(Clone)]\npub struct PowdrConfig {\n    /// Number of autoprecompiles to generate.\n    pub autoprecompiles: u64,\n    /// Number of basic blocks to skip for autoprecompiles.\n    /// This is either the largest N if no PGO, or the costliest N with PGO.\n    pub skip_autoprecompiles: u64,\n    /// Maximum number of basic blocks included in a superblock.\n    /// Default of 1 means only basic blocks are considered.\n    pub superblock_max_bb_count: u8,\n    /// Maximum number of instructions included in an Apc.\n    pub apc_max_instructions: u32,\n    /// Apcs executed less than the cutoff are ignored.\n    pub apc_exec_count_cutoff: u32,\n    /// Max degree of constraints.\n    pub degree_bound: DegreeBound,\n    /// The path to the APC candidates dir, if any.\n    pub apc_candidates_dir_path: Option<PathBuf>,\n    /// Whether to use optimistic precompiles.\n    pub should_use_optimistic_precompiles: bool,\n}\n\nimpl PowdrConfig {\n    pub fn new(autoprecompiles: u64, skip_autoprecompiles: u64, degree_bound: DegreeBound) -> Self {\n        Self {\n            autoprecompiles,\n            skip_autoprecompiles,\n            // superblocks disabled by default\n            superblock_max_bb_count: 1,\n            apc_max_instructions: u32::MAX,\n            apc_exec_count_cutoff: 1,\n            degree_bound,\n            apc_candidates_dir_path: None,\n            should_use_optimistic_precompiles: false,\n        }\n    }\n\n    pub fn with_superblocks(\n        mut self,\n        max_bb_count: u8,\n        max_instructions: Option<u32>,\n        exec_count_cutoff: Option<u32>,\n    ) -> Self {\n        assert!(\n            max_bb_count > 0,\n            \"superblock_max_bb_count must be greater than 0\"\n        );\n        self.superblock_max_bb_count = max_bb_count;\n        if let Some(max_instructions) = max_instructions {\n            self.apc_max_instructions = max_instructions;\n        }\n        if let Some(exec_count_cutoff) = exec_count_cutoff {\n            self.apc_exec_count_cutoff = exec_count_cutoff;\n        }\n        self\n    }\n\n    pub fn with_apc_candidates_dir<P: AsRef<Path>>(mut self, path: P) -> Self {\n        self.apc_candidates_dir_path = Some(path.as_ref().to_path_buf());\n        self\n    }\n\n    pub fn with_optimistic_precompiles(mut self, should_use_optimistic_precompiles: bool) -> Self {\n        self.should_use_optimistic_precompiles = should_use_optimistic_precompiles;\n        self\n    }\n}\n\n#[derive(Debug, Clone)]\npub enum InstructionKind {\n    Normal,\n    ConditionalBranch,\n    UnconditionalBranch,\n}\n\n/// A configuration of a VM in which execution is happening.\npub struct VmConfig<'a, M, B, C> {\n    /// Maps an opcode to its AIR.\n    pub instruction_handler: &'a M,\n    /// The bus interaction handler, used by the constraint solver to reason about bus interactions.\n    pub bus_interaction_handler: B,\n    /// The bus map that maps bus id to bus type\n    pub bus_map: BusMap<C>,\n}\n\n// We implement Clone manually because deriving it adds a Clone bound to the `InstructionMachineHandler`\nimpl<'a, M, B: Clone, C: Clone> Clone for VmConfig<'a, M, B, C> {\n    fn clone(&self) -> Self {\n        VmConfig {\n            instruction_handler: self.instruction_handler,\n            bus_interaction_handler: self.bus_interaction_handler.clone(),\n            bus_map: self.bus_map.clone(),\n        }\n    }\n}\n\npub trait InstructionHandler {\n    type Field;\n    type Instruction;\n    type AirId;\n\n    /// Returns the degree bound used for the instructions\n    fn degree_bound(&self) -> DegreeBound;\n\n    /// Returns the AIR for the given instruction.\n    fn get_instruction_air_and_id(\n        &self,\n        instruction: &Self::Instruction,\n    ) -> (Self::AirId, &SymbolicMachine<Self::Field>);\n\n    /// Returns the AIR stats for the given instruction.\n    fn get_instruction_air_stats(&self, instruction: &Self::Instruction) -> AirStats;\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct Substitution {\n    /// The index of the original column in the original air\n    pub original_poly_index: usize,\n    /// The `poly_id` of the target column in the APC air\n    pub apc_poly_id: u64,\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct Apc<T, I, A, V> {\n    /// The block this APC is based on\n    pub block: SuperBlock<I>,\n    /// The symbolic machine for this APC\n    pub machine: SymbolicMachine<T>,\n    /// For each original instruction, the substitutions from original columns to APC columns\n    pub subs: Vec<Vec<Substitution>>,\n    /// The optimistic constraints to be satisfied for this apc to be run\n    pub optimistic_constraints: OptimisticConstraints<A, V>,\n}\n\nimpl<T, I: PcStep, A, V> Apc<T, I, A, V> {\n    pub fn subs(&self) -> &[Vec<Substitution>] {\n        &self.subs\n    }\n\n    pub fn machine(&self) -> &SymbolicMachine<T> {\n        &self.machine\n    }\n\n    /// The instructions in the block.\n    pub fn instructions(&self) -> impl Iterator<Item = &I> {\n        self.block.instructions().map(|(_, i)| i)\n    }\n\n    /// The PCs of the original basic blocks composing this APC. Can be used to identify the APC.\n    pub fn start_pcs(&self) -> Vec<u64> {\n        self.block.start_pcs()\n    }\n\n    /// Create a new APC based on the given super block, symbolic machine and column allocator\n    /// The column allocator only issues the subs which are actually used in the machine\n    fn new(\n        block: SuperBlock<I>,\n        machine: SymbolicMachine<T>,\n        optimistic_constraints: OptimisticConstraints<A, V>,\n        column_allocator: &ColumnAllocator,\n    ) -> Self {\n        // Get all poly_ids in the machine\n        let all_references = machine\n            .unique_references()\n            .map(|r| r.id)\n            .collect::<BTreeSet<_>>();\n        // Only keep substitutions from the column allocator if the target poly_id is used in the machine\n        let subs = column_allocator\n            .subs\n            .iter()\n            .map(|subs| {\n                subs.iter()\n                    .enumerate()\n                    .filter_map(|(original_poly_index, apc_poly_id)| {\n                        all_references\n                            .contains(apc_poly_id)\n                            .then_some(Substitution {\n                                original_poly_index,\n                                apc_poly_id: *apc_poly_id,\n                            })\n                    })\n                    .collect_vec()\n            })\n            .collect();\n        Self {\n            block,\n            machine,\n            subs,\n            optimistic_constraints,\n        }\n    }\n}\n\n/// Allocates global poly_ids and keeps track of substitutions\npub struct ColumnAllocator {\n    /// For each original air, for each original column index, the associated poly_id in the APC air\n    subs: Vec<Vec<u64>>,\n    /// The next poly_id to issue\n    next_poly_id: u64,\n}\n\nimpl ColumnAllocator {\n    pub fn from_max_poly_id_of_machine(machine: &SymbolicMachine<impl FieldElement>) -> Self {\n        Self {\n            subs: Vec::new(),\n            next_poly_id: machine.main_columns().map(|c| c.id).max().unwrap_or(0) + 1,\n        }\n    }\n\n    pub fn issue_next_poly_id(&mut self) -> u64 {\n        let id = self.next_poly_id;\n        self.next_poly_id += 1;\n        id\n    }\n\n    /// Returns whether the given poly_id is known (i.e., was issued by this allocator)\n    pub fn is_known_id(&self, poly_id: u64) -> bool {\n        poly_id < self.next_poly_id\n    }\n}\n\npub fn build<A: Adapter>(\n    block: SuperBlock<A::Instruction>,\n    vm_config: AdapterVmConfig<A>,\n    degree_bound: DegreeBound,\n    mut export_options: ExportOptions,\n    empirical_constraints: &EmpiricalConstraints,\n) -> Result<AdapterApc<A>, crate::constraint_optimizer::Error> {\n    let start = std::time::Instant::now();\n\n    let (mut machine, column_allocator) = statements_to_symbolic_machine::<A>(\n        &block,\n        vm_config.instruction_handler,\n        &vm_config.bus_map,\n    );\n\n    // Generate constraints for optimistic precompiles.\n    let should_generate_execution_constraints =\n        optimistic_precompile_config().restrict_optimistic_precompiles;\n    let algebraic_references =\n        BlockCellAlgebraicReferenceMapper::new(&column_allocator.subs, machine.main_columns());\n    let empirical_constraints = empirical_constraints.for_block(&block);\n\n    // TODO: Use execution constraints\n    let (empirical_constraints, _execution_constraints) = if should_generate_execution_constraints {\n        // Filter constraints to only contain execution-checkable columns,\n        // generate execution constraints for them.\n        let optimistic_literals = optimistic_literals::<A>(&block, &vm_config, &degree_bound);\n\n        let empirical_constraints = empirical_constraints.filtered(|block_cell| {\n            let algebraic_reference = algebraic_references\n                .get_algebraic_reference(block_cell)\n                .unwrap();\n            optimistic_literals.contains_key(algebraic_reference)\n        });\n\n        let empirical_constraints =\n            ConstraintGenerator::<A>::new(empirical_constraints, algebraic_references, &block)\n                .generate_constraints();\n\n        let execution_constraints =\n            generate_execution_constraints(&empirical_constraints, &optimistic_literals);\n        (empirical_constraints, execution_constraints)\n    } else {\n        // Don't filter empirical constraints, return empty execution constraints.\n        let empirical_constraints =\n            ConstraintGenerator::<A>::new(empirical_constraints, algebraic_references, &block)\n                .generate_constraints();\n        (empirical_constraints, vec![])\n    };\n\n    // Add empirical constraints to the baseline\n    machine\n        .constraints\n        .extend(empirical_constraints.into_iter().map(Into::into));\n\n    if export_options.export_requested() {\n        export_options.export_apc_from_machine::<A>(\n            block.clone(),\n            machine.clone(),\n            &column_allocator,\n            &vm_config.bus_map,\n            Some(\"unopt\"),\n        );\n    }\n\n    let labels = [(\"apc_start_pc\", block.start_pcs().into_iter().join(\"_\"))];\n    metrics::counter!(\"before_opt_cols\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n    metrics::counter!(\"before_opt_constraints\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n    metrics::counter!(\"before_opt_interactions\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n\n    let (machine, column_allocator) = optimizer::optimize::<_, _, _, A::MemoryBusInteraction<_>>(\n        machine,\n        vm_config.bus_interaction_handler,\n        degree_bound,\n        &vm_config.bus_map,\n        column_allocator,\n        &mut export_options,\n    )?;\n\n    // add guards to constraints that are not satisfied by zeroes\n    let (machine, column_allocator) = add_guards(machine, column_allocator);\n\n    metrics::counter!(\"after_opt_cols\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n    metrics::counter!(\"after_opt_constraints\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n    metrics::counter!(\"after_opt_interactions\", &labels)\n        .absolute(machine.unique_references().count() as u64);\n\n    // TODO: for now, we only include optimistic constraints related to superblock PCs.\n    // Optimistic constraints from empirical constraints are still missing.\n    let pc_constraints = superblock_pc_constraints::<A>(&block);\n    let optimistic_constraints = OptimisticConstraints::from_constraints(pc_constraints);\n\n    let apc = Apc::new(block, machine, optimistic_constraints, &column_allocator);\n\n    if export_options.export_requested() {\n        export_options.export_apc::<A>(&apc, None, &vm_config.bus_map);\n    }\n\n    let apc = convert_apc_field_type(apc, &A::into_field);\n\n    metrics::gauge!(\"apc_gen_time_ms\", &labels).set(start.elapsed().as_millis() as f64);\n\n    Ok(apc)\n}\n\n/// Generate optimistic constraints for superblock jumps\nfn superblock_pc_constraints<A: Adapter>(\n    block: &SuperBlock<A::Instruction>,\n) -> Vec<AdapterOptimisticConstraint<A>> {\n    block\n        .instruction_indexed_start_pcs()\n        .into_iter()\n        .map(|(instr_idx, pc)| {\n            let left = OptimisticExpression::Literal(OptimisticLiteral {\n                instr_idx,\n                val: LocalOptimisticLiteral::Pc,\n            });\n            let Ok(pc_value) =\n                <<A as Adapter>::ExecutionState as ExecutionState>::Value::try_from(pc)\n            else {\n                panic!(\"PC doesn't fit in Value type\");\n            };\n            let right = OptimisticExpression::Number(pc_value);\n            OptimisticConstraint { left, right }\n        })\n        .collect()\n}\n\nfn satisfies_zero_witness<T: FieldElement>(expr: &AlgebraicExpression<T>) -> bool {\n    let mut zeroed_expr = expr.clone();\n    powdr::make_refs_zero(&mut zeroed_expr);\n    let zeroed_expr = algebraic_to_grouped_expression(&zeroed_expr);\n    zeroed_expr.try_to_number().unwrap().is_zero()\n}\n\n/// Adds `is_valid` guards to constraints without increasing its degree.\n/// This implementation always guards the LHS of multiplications.\n/// In the future this could be changed to minimize the number of guards added.\n/// Assumption:\n/// - `expr` is already simplified, i.e., expressions like (3 + 4) and (x * 1) do not appear.\nfn add_guards_constraint<T: FieldElement>(\n    expr: AlgebraicExpression<T>,\n    is_valid: &AlgebraicExpression<T>,\n) -> AlgebraicExpression<T> {\n    if satisfies_zero_witness(&expr) {\n        return expr;\n    }\n\n    match expr {\n        AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation { left, op, right }) => {\n            let left = add_guards_constraint(*left, is_valid);\n            let right = match op {\n                AlgebraicBinaryOperator::Add | AlgebraicBinaryOperator::Sub => {\n                    Box::new(add_guards_constraint(*right, is_valid))\n                }\n                AlgebraicBinaryOperator::Mul => right,\n            };\n            AlgebraicExpression::new_binary(left, op, *right)\n        }\n        AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation { op, expr }) => {\n            let inner = add_guards_constraint(*expr, is_valid);\n            AlgebraicExpression::new_unary(op, inner)\n        }\n        AlgebraicExpression::Number(..) => expr * is_valid.clone(),\n        _ => expr,\n    }\n}\n\n/// Adds an `is_valid` guard to all constraints and bus interactions, if needed.\nfn add_guards<T: FieldElement>(\n    mut machine: SymbolicMachine<T>,\n    mut column_allocator: ColumnAllocator,\n) -> (SymbolicMachine<T>, ColumnAllocator) {\n    let pre_degree = machine.degree();\n\n    let is_valid_ref = AlgebraicReference {\n        name: Arc::new(\"is_valid\".to_string()),\n        id: column_allocator.issue_next_poly_id(),\n    };\n    let is_valid = AlgebraicExpression::Reference(is_valid_ref.clone());\n\n    machine.derived_columns.push(DerivedVariable::new(\n        is_valid_ref,\n        ComputationMethod::Constant(T::one()),\n    ));\n\n    machine.constraints = machine\n        .constraints\n        .into_iter()\n        .map(|c| add_guards_constraint(c.expr, &is_valid).into())\n        .collect();\n\n    let mut is_valid_mults: Vec<SymbolicConstraint<T>> = Vec::new();\n    for b in &mut machine.bus_interactions {\n        if !satisfies_zero_witness(&b.mult) {\n            // guard the multiplicity by `is_valid`\n            b.mult = is_valid.clone() * b.mult.clone();\n            // TODO this would not have to be cloned if we had *=\n            //c.expr *= guard.clone();\n        } else {\n            // if it's zero, then we do not have to change the multiplicity, but we need to force it to be zero on non-valid rows with a constraint\n            let one = AlgebraicExpression::Number(1u64.into());\n            let e = ((one - is_valid.clone()) * b.mult.clone()).into();\n            is_valid_mults.push(e);\n        }\n    }\n\n    machine.constraints.extend(is_valid_mults);\n\n    // if pre_degree is 0, is_valid is added to the multiplicities of the bus interactions, thus the degree increases from 0 to 1\n    if pre_degree != 0 && !machine.bus_interactions.is_empty() {\n        assert_eq!(\n            pre_degree,\n            machine.degree(),\n            \"Degree should not change after adding guards\"\n        );\n    }\n\n    // This needs to be added after the assertion above because it's a quadratic constraint\n    // so it may increase the degree of the machine.\n    machine.constraints.push(powdr::make_bool(is_valid).into());\n\n    (machine, column_allocator)\n}\n"
  },
  {
    "path": "autoprecompiles/src/low_degree_bus_interaction_optimizer.rs",
    "content": "use itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::{\n    AlgebraicConstraint, BusInteraction, BusInteractionHandler, ConstraintSystem,\n};\nuse powdr_constraint_solver::grouped_expression::GroupedExpression;\nuse powdr_constraint_solver::inliner::DegreeBound;\nuse powdr_constraint_solver::range_constraint::RangeConstraint;\nuse powdr_constraint_solver::runtime_constant::RuntimeConstant;\nuse powdr_constraint_solver::solver::Solver;\nuse powdr_number::FieldElement;\nuse powdr_number::LargeInt;\nuse std::fmt::Display;\nuse std::hash::Hash;\nuse std::marker::PhantomData;\n\nuse crate::constraint_optimizer::IsBusStateful;\nuse crate::range_constraint_optimizer::{RangeConstraintHandler, RangeConstraints};\n\n/// An optimizer that replaces some stateless bus interactions (a.k.a. lookups)\n/// by low-degree algebraic constraints.\npub struct LowDegreeBusInteractionOptimizer<'a, T, V, S, B> {\n    solver: &'a mut S,\n    bus_interaction_handler: B,\n    degree_bound: DegreeBound,\n    _phantom: PhantomData<(T, V)>,\n}\n\nstruct LowDegreeReplacement<T: FieldElement, V> {\n    constraint: AlgebraicConstraint<GroupedExpression<T, V>>,\n    range_constraints: RangeConstraints<T, V>,\n}\n\nimpl<\n        'a,\n        T: FieldElement,\n        V: Ord + Clone + Ord + Eq + Display + Hash,\n        S: Solver<T, V>,\n        B: BusInteractionHandler<T> + IsBusStateful<T> + RangeConstraintHandler<T>,\n    > LowDegreeBusInteractionOptimizer<'a, T, V, S, B>\n{\n    pub fn new(solver: &'a mut S, bus_interaction_handler: B, degree_bound: DegreeBound) -> Self {\n        Self {\n            solver,\n            bus_interaction_handler,\n            degree_bound,\n            _phantom: PhantomData,\n        }\n    }\n\n    pub fn optimize(self, mut system: ConstraintSystem<T, V>) -> ConstraintSystem<T, V> {\n        let mut new_constraints = vec![];\n        system.bus_interactions = system\n            .bus_interactions\n            .into_iter()\n            .flat_map(|bus_int| {\n                if let Some(LowDegreeReplacement {\n                    constraint: replacement,\n                    range_constraints,\n                }) = self.try_replace_bus_interaction(&bus_int)\n                {\n                    // If we found a replacement, add the polynomial constraints (unless it is\n                    // trivially zero) and replace the bus interaction with interactions implementing\n                    // the range constraints.\n                    // Note that many of these may be optimized away by the range constraint optimizer.\n                    if !replacement.is_redundant() {\n                        new_constraints.push(replacement);\n                    }\n\n                    self.bus_interaction_handler\n                        .batch_make_range_constraints(range_constraints)\n                        // It can be that the VM cannot implement the precise range constraint (although this\n                        // does not really happen in practice!).\n                        // For soundness, it is essential that the constraint is not wider than the\n                        // one we used to generate all inputs. So if `batch_make_range_constraints`\n                        // errors out, we keep the original bus interaction.\n                        // Note that we still add the polynomial constraints, because it'll likely\n                        // lead to columns being inlined.\n                        .unwrap_or(vec![bus_int])\n                } else {\n                    // Keep the bus interaction as is if a replacement can't be found.\n                    vec![bus_int]\n                }\n            })\n            .collect();\n\n        // Knowing the low-degree functions might help the solver.\n        // The range constraints do not need to be added, because they don't carry information\n        // that is not already implied by the existing bus interactions.\n        self.solver\n            .add_algebraic_constraints(new_constraints.iter().cloned());\n\n        system.algebraic_constraints.extend(new_constraints);\n        system\n    }\n\n    /// Checks whether a bus interaction can be replaced by a low-degree constraint + range checks.\n    /// Returns None if no replacement is found.\n    fn try_replace_bus_interaction(\n        &self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n    ) -> Option<LowDegreeReplacement<T, V>> {\n        let bus_id = bus_interaction.bus_id.try_to_number()?;\n        if self.bus_interaction_handler.is_stateful(bus_id) {\n            return None;\n        }\n\n        self.symbolic_function_candidates_with_small_domain(bus_interaction)\n            .into_iter()\n            .find_map(|symbolic_function| {\n                let low_degree_function =\n                    self.find_low_degree_function(bus_interaction, &symbolic_function)?;\n\n                // Build polynomial constraint\n                let symbolic_inputs = symbolic_function\n                    .inputs\n                    .iter()\n                    .cloned()\n                    .map(|input| input.expression)\n                    .collect();\n                let low_degree_function = low_degree_function(symbolic_inputs);\n                let polynomial_constraint = AlgebraicConstraint::assert_eq(\n                    symbolic_function.output.expression,\n                    low_degree_function,\n                );\n\n                // Check degree\n                let within_degree_bound =\n                    polynomial_constraint.degree() <= self.degree_bound.identities;\n                if within_degree_bound {\n                    let range_constraints = symbolic_function\n                        .inputs\n                        .into_iter()\n                        .map(|field| (field.expression, field.range_constraint))\n                        .collect();\n                    Some(LowDegreeReplacement {\n                        constraint: polynomial_constraint,\n                        range_constraints,\n                    })\n                } else {\n                    None\n                }\n            })\n    }\n\n    /// Given a bus interaction of 2 or 3 unknown fields, finds all combinations of (symbolic)\n    /// inputs and outputs where the input space is small enough.\n    fn symbolic_function_candidates_with_small_domain(\n        &self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n    ) -> Vec<SymbolicFunction<T, V>> {\n        let unknown_fields = bus_interaction\n            .payload\n            .iter()\n            .cloned()\n            .enumerate()\n            .filter(|(_i, expr)| expr.try_to_number().is_none())\n            .map(|(index, expression)| {\n                let range_constraint = self.solver.range_constraint_for_expression(&expression);\n                SymbolicField {\n                    index,\n                    expression,\n                    range_constraint,\n                }\n            })\n            .collect_vec();\n\n        let unknown_field_count = unknown_fields.len();\n        // Currently, we only have hypotheses for:\n        // - 2 unknown fields (1 input, 1 output)\n        // - 3 unknown fields (2 inputs, 1 output)\n        if !(unknown_field_count == 2 || unknown_field_count == 3) {\n            return Vec::new();\n        }\n\n        unknown_fields\n            .into_iter()\n            .permutations(unknown_field_count)\n            .map(|mut fields| {\n                let output = fields.pop().unwrap();\n                SymbolicFunction {\n                    inputs: fields,\n                    output,\n                }\n            })\n            .filter(|function| {\n                self.has_few_possible_values(\n                    function.inputs.iter().map(|f| f.range_constraint),\n                    MAX_DOMAIN_SIZE,\n                )\n            })\n            .collect_vec()\n    }\n\n    /// Given a list of range constraints, computes whether space of all possible values\n    /// is small enough.\n    fn has_few_possible_values(\n        &self,\n        range_constraints: impl Iterator<Item = RangeConstraint<T>>,\n        max_size: u64,\n    ) -> bool {\n        range_constraints\n            .map(|rc| {\n                // TODO: This should share code with `has_few_possible_assignments`,\n                // But this only currently only considers the range width which ignores the mask\n                // and might be way larger than the actual number of allowed values.\n                rc.size_estimate().try_into_u64().and_then(|size| {\n                    if size < 1 << 16 {\n                        Some(rc.allowed_values().count() as u64)\n                    } else {\n                        None\n                    }\n                })\n            })\n            .try_fold(1u64, |acc, x| acc.checked_mul(x?))\n            .is_some_and(|count| count <= max_size)\n    }\n\n    /// Given a bus interaction and a symbolic input-output pair, tries to find a low-degree function\n    /// by testing all of the hard-coded hypotheses against set of all concrete input-output pairs.\n    fn find_low_degree_function(\n        &self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n        symbolic_function: &SymbolicFunction<T, V>,\n    ) -> Option<LowDegreeFunction<T, V>> {\n        let mut hypotheses = hypotheses(symbolic_function.inputs.len());\n\n        // Generate the function graph, to match against the hypotheses.\n        let all_possible_assignments =\n            self.concrete_input_output_pairs(bus_interaction, symbolic_function);\n\n        for assignment in all_possible_assignments {\n            let Ok((inputs, output)) = assignment else {\n                // We can't enumerate all possible assignments, so the hypotheses can't be tested.\n                return None;\n            };\n            let inputs = inputs\n                .into_iter()\n                .map(|value| GroupedExpression::from_number(value))\n                .collect::<Vec<_>>();\n            hypotheses.retain(|hypothesis| {\n                let hypothesis_evaluation = hypothesis(inputs.clone());\n                hypothesis_evaluation.try_to_number().unwrap() == output\n            });\n            if hypotheses.is_empty() {\n                // No hypothesis left\n                return None;\n            }\n        }\n\n        // If we got this far, the hypothesis is correct!\n        Some(hypotheses.into_iter().exactly_one().unwrap_or_else(|_| {\n            panic!(\"Expected exactly one multilinear extension, but got multiple.\")\n        }))\n    }\n\n    /// Generate all concrete input-output pairs given a symbolic one.\n    ///\n    /// The inputs are generated as the cross product of all allowed values of the\n    /// individual inputs.\n    /// The outputs are generated by asking the bus interaction handler for each input assignment.\n    ///\n    /// If at any time (1) the inputs violate a constraint or (2) the outputs are not unique,\n    /// an error is yielded.\n    fn concrete_input_output_pairs<'b>(\n        &'b self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n        input_output_pair: &'b SymbolicFunction<T, V>,\n    ) -> impl Iterator<Item = Result<(Vec<T>, T), ()>> + 'b {\n        let bus_interaction = bus_interaction.to_range_constraints(self.solver);\n\n        // Consider all possible input assignments, which is the cross product of all allowed values.\n        let input_assignments = input_output_pair\n            .inputs\n            .iter()\n            .map(move |input| {\n                input\n                    .range_constraint\n                    .allowed_values()\n                    .map(|v| (input.index, v))\n                    .collect_vec()\n            })\n            .multi_cartesian_product();\n\n        // For each input assignment, try it and ask the bus interaction handler if there\n        // is a unique output assignment.\n        input_assignments.map(move |assignment| {\n            // Set all inputs to concrete values\n            let mut bus_interaction = bus_interaction.clone();\n            for (i, value) in assignment.iter() {\n                bus_interaction.payload[*i] = RangeConstraint::from_value(*value);\n            }\n\n            let inputs = assignment.into_iter().map(|(_i, value)| value).collect();\n\n            // Get the output from the bus interaction handler, if it exists and is unique.\n            let output = self\n                .bus_interaction_handler\n                .handle_bus_interaction_checked(bus_interaction)\n                // If the assignment violates a constraint, return an error.\n                .map_err(|_| ())?\n                .payload[input_output_pair.output.index]\n                .try_to_single_value()\n                // If the output is not unique, return an error.\n                .ok_or(())?;\n            Ok((inputs, output))\n        })\n    }\n}\n\n/// Represents a low-degree function, mapping a list of inputs to a single output.\ntype LowDegreeFunction<T, V> = Box<dyn Fn(Vec<GroupedExpression<T, V>>) -> GroupedExpression<T, V>>;\n\n/// The maximum size of the input domain for low-degree functions.\nconst MAX_DOMAIN_SIZE: u64 = 256;\n\n/// Represents a bus interaction field.\n#[derive(Clone, Debug)]\nstruct SymbolicField<T: FieldElement, V> {\n    /// The index into the bus interaction payload\n    index: usize,\n    /// The expression in the bus interaction payload\n    expression: GroupedExpression<T, V>,\n    /// The range constraint for the expression\n    range_constraint: RangeConstraint<T>,\n}\n\n#[derive(Clone, Debug)]\nstruct SymbolicFunction<T: FieldElement, V> {\n    inputs: Vec<SymbolicField<T, V>>,\n    output: SymbolicField<T, V>,\n}\n\n/// Some well-known low-degree functions that are tested against the input-output pairs.\nfn hypotheses<T: FieldElement, V: Ord + Clone + Hash + Eq>(\n    num_inputs: usize,\n) -> Vec<LowDegreeFunction<T, V>> {\n    match num_inputs {\n        1 => vec![\n            // Identity function\n            Box::new(|inputs| inputs[0].clone()),\n            // Logical not (1 bit)\n            Box::new(|inputs| GroupedExpression::from_number(T::from_u64(1)) - inputs[0].clone()),\n            // Logical not (8 bit)\n            Box::new(|inputs| {\n                GroupedExpression::from_number(T::from_u64(0xff)) - inputs[0].clone()\n            }),\n            // Logical not (16 bit)\n            Box::new(|inputs| {\n                GroupedExpression::from_number(T::from_u64(0xffff)) - inputs[0].clone()\n            }),\n        ],\n        2 => vec![\n            // Identity on the first input. Note that we don't have to add identity on the second input,\n            // because we test all possible permutations of inputs.\n            Box::new(|inputs| inputs[0].clone()),\n            // x + y\n            Box::new(|inputs| inputs[0].clone() + inputs[1].clone()),\n            // AND on bits:\n            Box::new(|inputs| inputs[0].clone() * inputs[1].clone()),\n            // OR on bits:\n            Box::new(|inputs| {\n                inputs[0].clone() + inputs[1].clone() - (inputs[0].clone() * inputs[1].clone())\n            }),\n            // XOR on bits:\n            Box::new(|inputs| {\n                inputs[0].clone() + inputs[1].clone()\n                    - GroupedExpression::from_number(T::from_u64(2))\n                        * (inputs[0].clone() * inputs[1].clone())\n            }),\n        ],\n        _ => panic!(\"Unexpected number of inputs: {num_inputs}\"),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use std::array::from_fn;\n\n    use powdr_constraint_solver::solver::new_solver;\n    use powdr_number::BabyBearField;\n\n    use crate::range_constraint_optimizer::{MakeRangeConstraintsError, RangeConstraints};\n\n    use super::*;\n\n    pub type Var = &'static str;\n    pub fn var(name: Var) -> GroupedExpression<BabyBearField, Var> {\n        GroupedExpression::from_unknown_variable(name)\n    }\n\n    pub fn constant(value: u64) -> GroupedExpression<BabyBearField, Var> {\n        GroupedExpression::from_number(BabyBearField::from(value))\n    }\n\n    #[derive(Clone, Debug)]\n    struct XorBusHandler;\n    impl BusInteractionHandler<BabyBearField> for XorBusHandler {\n        fn handle_bus_interaction(\n            &self,\n            bus_interaction: BusInteraction<RangeConstraint<BabyBearField>>,\n        ) -> BusInteraction<RangeConstraint<BabyBearField>> {\n            let range_constraints = match (\n                bus_interaction.payload[0].try_to_single_value(),\n                bus_interaction.payload[1].try_to_single_value(),\n            ) {\n                // If x and y are known, compute z\n                (Some(x), Some(y)) => {\n                    let z = BabyBearField::from(x.to_degree() ^ y.to_degree());\n                    [\n                        RangeConstraint::from_value(x),\n                        RangeConstraint::from_value(y),\n                        RangeConstraint::from_value(z),\n                    ]\n                }\n                // By default, just return byte range constraints\n                _ => from_fn(|_i| RangeConstraint::from_mask(0xffu32)),\n            };\n            BusInteraction {\n                bus_id: bus_interaction.bus_id,\n                payload: range_constraints.into_iter().collect(),\n                multiplicity: bus_interaction.multiplicity,\n            }\n        }\n    }\n    impl IsBusStateful<BabyBearField> for XorBusHandler {\n        fn is_stateful(&self, _bus_id: BabyBearField) -> bool {\n            false\n        }\n    }\n    impl RangeConstraintHandler<BabyBearField> for XorBusHandler {\n        fn pure_range_constraints<V: Ord + Clone + Eq + Display + Hash>(\n            &self,\n            _bus_interaction: &BusInteraction<GroupedExpression<BabyBearField, V>>,\n        ) -> Option<RangeConstraints<BabyBearField, V>> {\n            unreachable!()\n        }\n\n        fn batch_make_range_constraints<V: Ord + Clone + Eq + Display + Hash>(\n            &self,\n            _range_constraints: RangeConstraints<BabyBearField, V>,\n        ) -> Result<\n            Vec<BusInteraction<GroupedExpression<BabyBearField, V>>>,\n            MakeRangeConstraintsError,\n        > {\n            unreachable!()\n        }\n    }\n\n    fn compute_replacement(\n        mut solver: impl Solver<BabyBearField, Var>,\n        bus_interaction: &BusInteraction<GroupedExpression<BabyBearField, Var>>,\n    ) -> Option<AlgebraicConstraint<GroupedExpression<BabyBearField, Var>>> {\n        let optimizer = LowDegreeBusInteractionOptimizer {\n            solver: &mut solver,\n            bus_interaction_handler: XorBusHandler,\n            degree_bound: DegreeBound {\n                identities: 2,\n                bus_interactions: 1,\n            },\n            _phantom: PhantomData,\n        };\n        optimizer\n            .try_replace_bus_interaction(bus_interaction)\n            .map(|v| v.constraint)\n    }\n\n    #[test]\n    fn test_try_replace_bus_interaction_generic_xor() {\n        let mut solver = new_solver(ConstraintSystem::default(), XorBusHandler);\n        // The input search space is small, but xor is not linear.\n        solver.add_range_constraint(&\"x\", RangeConstraint::from_mask(0xfu32));\n        solver.add_range_constraint(&\"y\", RangeConstraint::from_mask(0xfu32));\n        let bus_interaction = BusInteraction {\n            bus_id: constant(0),\n            payload: vec![var(\"x\"), var(\"y\"), var(\"z\")],\n            multiplicity: constant(1),\n        };\n        let replacement = compute_replacement(solver, &bus_interaction);\n        assert!(replacement.is_none());\n    }\n\n    #[test]\n    fn test_try_replace_bus_interaction_logical_not() {\n        let mut solver = new_solver(ConstraintSystem::default(), XorBusHandler);\n        // not(x) is a linear function (255 - x).\n        solver.add_range_constraint(&\"x\", RangeConstraint::from_mask(0xffu32));\n        let bus_interaction = BusInteraction {\n            bus_id: constant(0),\n            payload: vec![var(\"x\"), constant(0xff), var(\"z\")],\n            multiplicity: constant(1),\n        };\n        let Some(replacement) = compute_replacement(solver, &bus_interaction) else {\n            panic!(\"Expected a replacement\")\n        };\n        assert_eq!(replacement.to_string(), \"x + z - 255 = 0\");\n    }\n\n    #[test]\n    fn test_try_replace_bus_interaction_binary_inputs() {\n        let mut solver = new_solver(ConstraintSystem::default(), XorBusHandler);\n        // Any function on two bits has a multilinear extension.\n        solver.add_range_constraint(&\"x\", RangeConstraint::from_mask(1u32));\n        solver.add_range_constraint(&\"y\", RangeConstraint::from_mask(1u32));\n        let bus_interaction = BusInteraction {\n            bus_id: constant(0),\n            payload: vec![var(\"x\"), var(\"y\"), var(\"z\")],\n            multiplicity: constant(1),\n        };\n        let Some(replacement) = compute_replacement(solver, &bus_interaction) else {\n            panic!(\"Expected a replacement\")\n        };\n        assert_eq!(replacement.to_string(), \"(2 * x) * (y) - x - y + z = 0\");\n    }\n\n    #[test]\n    fn test_try_replace_bus_interaction_disjoint_masks() {\n        let mut solver = new_solver(ConstraintSystem::default(), XorBusHandler);\n        // Because the masks are disjoint, there is a multilinear extension: z = x + y.\n        solver.add_range_constraint(&\"x\", RangeConstraint::from_mask(0x0fu32));\n        solver.add_range_constraint(&\"y\", RangeConstraint::from_mask(0xf0u32));\n        let bus_interaction = BusInteraction {\n            bus_id: constant(0),\n            payload: vec![var(\"x\"), var(\"y\"), var(\"z\")],\n            multiplicity: constant(1),\n        };\n        let Some(replacement) = compute_replacement(solver, &bus_interaction) else {\n            panic!(\"Expected a replacement\")\n        };\n        assert_eq!(replacement.to_string(), \"-(x + y - z) = 0\");\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/memory_optimizer.rs",
    "content": "use std::collections::{HashMap, HashSet};\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::{\n    AlgebraicConstraint, BusInteraction, ConstraintSystem,\n};\nuse powdr_constraint_solver::grouped_expression::GroupedExpression;\nuse powdr_constraint_solver::solver::Solver;\nuse powdr_number::FieldElement;\n\n/// Optimizes bus sends that correspond to general-purpose memory read and write operations.\n/// It works best if all read-write-operation addresses are fixed offsets relative to some\n/// symbolic base address. If stack and heap access operations are mixed, this is usually violated.\npub fn optimize_memory<\n    T: FieldElement,\n    V: Hash + Eq + Clone + Ord + Display,\n    M: MemoryBusInteraction<T, V>,\n>(\n    mut system: ConstraintSystem<T, V>,\n    solver: &mut impl Solver<T, V>,\n    memory_bus_id: Option<u64>,\n) -> ConstraintSystem<T, V> {\n    // In the absence of memory bus, we return the system unchanged\n    let memory_bus_id = match memory_bus_id {\n        Some(id) => id,\n        None => {\n            return system;\n        }\n    };\n\n    // TODO use the solver here.\n    let (to_remove, new_constraints) =\n        redundant_memory_interactions_indices::<T, V, M>(&system, solver, memory_bus_id);\n    let to_remove = to_remove.into_iter().collect::<HashSet<_>>();\n    system.bus_interactions = system\n        .bus_interactions\n        .into_iter()\n        .enumerate()\n        .filter_map(|(i, bus)| (!to_remove.contains(&i)).then_some(bus))\n        .collect();\n    solver.add_algebraic_constraints(new_constraints.iter().cloned());\n    // TODO perform substitutions instead\n    system.algebraic_constraints.extend(new_constraints);\n\n    system\n}\n\n#[derive(Debug, Copy, Clone)]\n/// The type of the memory bus interaction.\npub enum MemoryOp {\n    /// Get the previous value from memory.\n    GetPrevious,\n    /// Set the new value in memory.\n    SetNew,\n}\n\n/// A recoverable error when trying to convert a bus interaction to a memory bus interaction.\n/// For example, it might be that we don't know the bus ID or multiplicity yet.\npub struct MemoryBusInteractionConversionError;\n\n/// A bus interaction that corresponds to half of a memory operation,\n/// i.e. either a \"get previous\" or a \"set new\" operation.\n/// Note that the order of memory bus interactions as they appear in the constraint system\n/// is assumed to be chronological.\npub trait MemoryBusInteraction<T, V>: Sized {\n    /// The address type of the memory bus interaction.\n    /// We assume that it can be represented as a list of expressions of a *static* size, i.e.,\n    /// `addr.into_iter().count()` should always return the same value.\n    /// If there are different memories (e.g. register memory and heap memory), this type can be\n    /// a composite address.\n    type Address: IntoIterator<Item = GroupedExpression<T, V>>;\n\n    /// Tries to convert a `BusInteraction` to a `MemoryBusInteraction`.\n    ///\n    /// Returns `Ok(None)` if we know that the bus interaction is not a memory bus interaction.\n    /// Returns `Err(_)` if the bus interaction is a memory bus interaction but could not be converted properly\n    /// (usually because the multiplicity is not -1 or 1).\n    /// Otherwise returns `Ok(Some(memory_bus_interaction))`\n    fn try_from_bus_interaction(\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n        memory_bus_id: u64,\n    ) -> Result<Option<Self>, MemoryBusInteractionConversionError>;\n\n    /// Returns the address of the memory bus interaction.\n    fn addr(&self) -> Self::Address;\n\n    /// Returns the data part of the memory bus interaction.\n    fn data(&self) -> &[GroupedExpression<T, V>];\n\n    /// Returns the timestamp part of the memory bus interaction.\n    fn timestamp_limbs(&self) -> &[GroupedExpression<T, V>];\n\n    /// Returns the operation of the memory bus interaction.\n    fn op(&self) -> MemoryOp;\n}\n\n#[derive(Clone, Debug, Eq, PartialEq, Hash)]\n/// A memory address, represented as a list of expressions.\n/// By converting from `MemoryBusInteraction::Address` to `Address<T, V>`,\n/// we can make sure that its `Eq` implementation is the expected one: Two addresses\n/// are equal if all their parts are equal.\nstruct Address<T, V>(Vec<GroupedExpression<T, V>>);\n\nimpl<I, T, V> From<I> for Address<T, V>\nwhere\n    I: IntoIterator<Item = GroupedExpression<T, V>>,\n{\n    fn from(exprs: I) -> Self {\n        Self(exprs.into_iter().collect())\n    }\n}\n\nstruct MemoryContent<T, V> {\n    bus_index: usize,\n    data: Vec<GroupedExpression<T, V>>,\n    timestamp_limbs: Vec<GroupedExpression<T, V>>,\n}\n\nimpl<T: Clone, V: Clone> MemoryContent<T, V> {\n    fn from_bus_interaction<M: MemoryBusInteraction<T, V>>(bus_index: usize, mem_int: M) -> Self {\n        Self {\n            bus_index,\n            data: mem_int.data().to_vec(),\n            timestamp_limbs: mem_int.timestamp_limbs().to_vec(),\n        }\n    }\n}\n\n/// Tries to find indices of bus interactions that can be removed in the given machine\n/// and also returns a set of new constraints to be added.\nfn redundant_memory_interactions_indices<\n    T: FieldElement,\n    V: Ord + Clone + Hash + Display,\n    M: MemoryBusInteraction<T, V>,\n>(\n    system: &ConstraintSystem<T, V>,\n    solver: &mut impl Solver<T, V>,\n    memory_bus_id: u64,\n) -> (\n    Vec<usize>,\n    Vec<AlgebraicConstraint<GroupedExpression<T, V>>>,\n) {\n    let mut new_constraints = Vec::new();\n\n    // Track memory contents by memory type while we go through bus interactions.\n    // This maps an address to the index of the previous send on that address, the\n    // data currently stored there and the timestamp used in the last send.\n    let mut memory_contents: HashMap<Address<T, V>, MemoryContent<T, V>> = Default::default();\n    let mut to_remove: Vec<usize> = Default::default();\n\n    // TODO we assume that memory interactions are sorted by timestamp.\n    for (index, bus_int) in system.bus_interactions.iter().enumerate() {\n        let mem_int = match M::try_from_bus_interaction(bus_int, memory_bus_id) {\n            Ok(Some(mem_int)) => mem_int,\n            Ok(None) => continue,\n            Err(_) => {\n                // This interaction might be going to memory, but we do not know\n                // the multiplicity. Delete all knowledge.\n                // TODO If we can still clearly determine the memory type, we could\n                // only clear the knowledge for that memory type.\n                memory_contents.clear();\n                continue;\n            }\n        };\n\n        let addr = mem_int.addr().into();\n\n        match mem_int.op() {\n            MemoryOp::GetPrevious => {\n                // If there is an unconsumed send to this address, consume it.\n                // In that case, we can replace both bus interactions with equality constraints\n                // between the data that would have been sent and received.\n                if let Some(existing) = memory_contents.remove(&addr) {\n                    for (existing, new) in existing.data.iter().zip_eq(mem_int.data().iter()) {\n                        new_constraints.push(AlgebraicConstraint::assert_zero(\n                            existing.clone() - new.clone(),\n                        ));\n                    }\n                    for (existing_timestamp_limb, new_timestamp_limb) in existing\n                        .timestamp_limbs\n                        .iter()\n                        .zip_eq(mem_int.timestamp_limbs().iter())\n                    {\n                        new_constraints.push(AlgebraicConstraint::assert_zero(\n                            existing_timestamp_limb.clone() - new_timestamp_limb.clone(),\n                        ));\n                    }\n                    to_remove.extend([index, existing.bus_index]);\n                }\n            }\n            MemoryOp::SetNew => {\n                // We can only retain knowledge about addresses where we can prove\n                // that this send operation does not interfere with it, i.e.\n                // if we can prove that the two addresses differ by at least a word size.\n                memory_contents.retain(|other_addr, _| {\n                    addr.0\n                        .iter()\n                        .zip_eq(other_addr.0.iter())\n                        // Two addresses are different if they differ in at least one component.\n                        .any(|(a, b)| solver.are_expressions_known_to_be_different(a, b))\n                });\n                memory_contents.insert(\n                    addr.clone(),\n                    MemoryContent::from_bus_interaction(index, mem_int),\n                );\n            }\n        }\n    }\n\n    log::debug!(\n        \"Removing {} memory interactions and adding {} new constraints\",\n        to_remove.len(),\n        new_constraints.len()\n    );\n\n    (to_remove, new_constraints)\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimistic/algebraic_references.rs",
    "content": "use std::collections::BTreeMap;\n\nuse crate::{empirical_constraints::BlockCell, expression::AlgebraicReference};\n\n/// Maps BlockCells to their corresponding AlgebraicReferences.\npub struct BlockCellAlgebraicReferenceMapper {\n    block_cell_to_algebraic_reference: BTreeMap<BlockCell, AlgebraicReference>,\n}\n\nimpl BlockCellAlgebraicReferenceMapper {\n    /// Creates a new BlockCellAlgebraicReferenceMapper.\n    /// Arguments:\n    /// - `subs`: A mapping from instruction index and column index to polynomial IDs.\n    ///   This would typically come from a `ColumnAllocator`.\n    /// - `columns`: An iterator over the algebraic references for the columns in the block.\n    pub fn new(subs: &[Vec<u64>], columns: impl Iterator<Item = AlgebraicReference>) -> Self {\n        let poly_id_to_block_cell = subs\n            .iter()\n            .enumerate()\n            .flat_map(|(instr_index, subs)| {\n                subs.iter().enumerate().map(move |(col_index, &poly_id)| {\n                    (poly_id, BlockCell::new(instr_index, col_index))\n                })\n            })\n            .collect::<BTreeMap<_, _>>();\n        let block_cell_to_algebraic_reference = columns\n            .map(|r| (*poly_id_to_block_cell.get(&r.id).unwrap(), r))\n            .collect::<BTreeMap<_, _>>();\n        Self {\n            block_cell_to_algebraic_reference,\n        }\n    }\n\n    pub fn get_algebraic_reference(&self, block_cell: &BlockCell) -> Option<&AlgebraicReference> {\n        self.block_cell_to_algebraic_reference.get(block_cell)\n    }\n\n    pub fn has_block_cell(&self, block_cell: &BlockCell) -> bool {\n        self.block_cell_to_algebraic_reference\n            .contains_key(block_cell)\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimistic/config.rs",
    "content": "const DEFAULT_EXECUTION_COUNT_THRESHOLD: u64 = 100;\nconst DEFAULT_MAX_SEGMENTS: usize = 20;\n\npub struct OptimisticPrecompileConfig {\n    /// For any program line that was not executed at least this many times in the traces,\n    /// discard any empirical constraints associated with it.\n    pub execution_count_threshold: u64,\n    /// The maximum number of segments to keep in memory while detecting empirical constraints.\n    /// A higher number here leads to more accurate percentile estimates, but uses more memory.\n    pub max_segments: usize,\n    /// Whether to restrict empirical constraints to those that are checkable at execution time.\n    pub restrict_optimistic_precompiles: bool,\n}\n\npub fn optimistic_precompile_config() -> OptimisticPrecompileConfig {\n    let execution_count_threshold = std::env::var(\"POWDR_OP_EXECUTION_COUNT_THRESHOLD\")\n        .ok()\n        .and_then(|s| s.parse().ok())\n        .unwrap_or(DEFAULT_EXECUTION_COUNT_THRESHOLD);\n    let max_segments = std::env::var(\"POWDR_EMPIRICAL_CONSTRAINTS_MAX_SEGMENTS\")\n        .ok()\n        .and_then(|s| s.parse().ok())\n        .unwrap_or(DEFAULT_MAX_SEGMENTS);\n    let restricted_optimistic_precompiles =\n        std::env::var(\"POWDR_RESTRICTED_OPTIMISTIC_PRECOMPILES\") == Ok(\"1\".to_string());\n\n    OptimisticPrecompileConfig {\n        execution_count_threshold,\n        max_segments,\n        restrict_optimistic_precompiles: restricted_optimistic_precompiles,\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimistic/execution_constraint_generator.rs",
    "content": "use std::collections::BTreeMap;\n\nuse powdr_number::{FieldElement, LargeInt};\n\nuse crate::{\n    empirical_constraints::{EqualityConstraint, EqualityExpression},\n    execution::{OptimisticConstraint, OptimisticExpression, OptimisticLiteral},\n    expression::AlgebraicReference,\n};\n\n/// Converts a list of equality constraints into optimistic execution constraints.\n/// Only works for constraints between numbers and algebraic references that have\n/// corresponding optimistic literal, otherwise panics.\npub fn generate_execution_constraints<T: FieldElement>(\n    equality_constraints: &[EqualityConstraint<T>],\n    optimistic_literals: &BTreeMap<AlgebraicReference, OptimisticLiteral<Vec<T>>>,\n) -> Vec<OptimisticConstraint<Vec<T>, u32>> {\n    equality_constraints\n        .iter()\n        .map(|constraint| OptimisticConstraint {\n            left: get_optimistic_expression(optimistic_literals, &constraint.left),\n            right: get_optimistic_expression(optimistic_literals, &constraint.right),\n        })\n        .collect()\n}\n\nfn get_optimistic_expression<T: FieldElement>(\n    optimistic_literals: &BTreeMap<AlgebraicReference, OptimisticLiteral<Vec<T>>>,\n    algebraic_expression: &EqualityExpression<T>,\n) -> OptimisticExpression<Vec<T>, u32> {\n    match algebraic_expression {\n        EqualityExpression::Number(n) => {\n            OptimisticExpression::Number(n.to_integer().try_into_u32().unwrap())\n        }\n        EqualityExpression::Reference(r) => {\n            let optimistic_literal = optimistic_literals.get(r).unwrap();\n            OptimisticExpression::Literal(optimistic_literal.clone())\n        }\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimistic/execution_literals.rs",
    "content": "use std::collections::BTreeMap;\n\nuse crate::export::ExportOptions;\nuse crate::memory_optimizer::MemoryBusInteraction;\nuse crate::symbolic_machine::{\n    symbolic_bus_interaction_to_bus_interaction, SymbolicBusInteraction,\n};\nuse crate::symbolic_machine_generator::statements_to_symbolic_machines;\nuse crate::{\n    adapter::{Adapter, AdapterVmConfig},\n    blocks::SuperBlock,\n    bus_map::BusType,\n    execution::{LocalOptimisticLiteral, OptimisticLiteral},\n    expression::AlgebraicReference,\n    memory_optimizer::MemoryOp,\n    optimizer::optimize,\n};\nuse crate::{ColumnAllocator, SymbolicMachine};\nuse powdr_constraint_solver::inliner::DegreeBound;\n\n/// Maps an algebraic reference to an execution literal, if it represents the limb of a\n/// memory access to an address known at compile time.\npub fn optimistic_literals<A: Adapter>(\n    block: &SuperBlock<A::Instruction>,\n    vm_config: &AdapterVmConfig<A>,\n    degree_bound: &DegreeBound,\n) -> BTreeMap<AlgebraicReference, OptimisticLiteral<Vec<<A as Adapter>::PowdrField>>> {\n    // 1. Generate symbolic machines for each instruction in the block\n    let (symbolic_machines, column_allocator) = statements_to_symbolic_machines::<A>(\n        block,\n        vm_config.instruction_handler,\n        &vm_config.bus_map,\n    );\n\n    symbolic_machines\n        .into_iter()\n        .enumerate()\n        // 2. Extract memory accesses with known addresses\n        .flat_map(|(instruction_index, symbolic_machine)| {\n            extract_concrete_memory_accesses::<A>(\n                symbolic_machine,\n                instruction_index,\n                vm_config,\n                degree_bound,\n            )\n        })\n        // 3. Map each limb reference to an optimistic literal\n        .flat_map(|memory_access| generate_limb_mapping(memory_access, &column_allocator))\n        .collect()\n}\n\n/// A memory access going to a concrete (= compile-time) address.\nstruct ConcreteMemoryAccess<T> {\n    instruction_index: usize,\n    concrete_address: Vec<T>,\n    limbs: Vec<AlgebraicReference>,\n}\n\n/// Given a symbolic machine, extracts all the concrete memory accesses\n/// This works by:\n/// - optimizing the symbolic machine to resolve as many addresses as possible\n/// - filtering for memory bus interactions with known addresses\n/// - extracting the concrete address and the references to the data limbs\nfn extract_concrete_memory_accesses<A: Adapter>(\n    symbolic_machine: SymbolicMachine<A::PowdrField>,\n    instruction_index: usize,\n    vm_config: &AdapterVmConfig<A>,\n    degree_bound: &DegreeBound,\n) -> impl Iterator<Item = ConcreteMemoryAccess<A::PowdrField>> {\n    // Optimize the dummy block, so that register addresses become known at compile time.\n    // It is important that this happens per instruction, because otherwise the memory\n    // optimizer might remove intermediate register accesses, meaning that we'd miss\n    // those optimistic literals.\n    // Note that the optimizer would still remove some memory accesses, if the instruction\n    // accesses the same register multiple times.\n    let dummy_column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&symbolic_machine);\n    let (symbolic_machine, _) = optimize::<_, _, _, A::MemoryBusInteraction<_>>(\n        symbolic_machine.clone(),\n        vm_config.bus_interaction_handler.clone(),\n        *degree_bound,\n        &vm_config.bus_map,\n        // The optimizer might introduce new columns, but we'll discard later.\n        dummy_column_allocator,\n        &mut ExportOptions::default(),\n    )\n    .unwrap();\n\n    let memory_bus_id = vm_config.bus_map.get_bus_id(&BusType::Memory).unwrap();\n    symbolic_machine\n        .bus_interactions\n        .into_iter()\n        // Filter for memory bus interactions\n        .filter_map(move |bus_interaction| {\n            try_extract_concrete_memory_access::<A>(\n                instruction_index,\n                bus_interaction,\n                memory_bus_id,\n            )\n        })\n}\n\n/// Given a bus interaction, tries to instantiate a ConcreteMemoryAccess.\n/// This will work if the bus interaction is a memory bus interaction with a known multiplicity,\n/// the address is known concretely, and value references are single columns.\nfn try_extract_concrete_memory_access<A: Adapter>(\n    instruction_index: usize,\n    bus_interaction: SymbolicBusInteraction<A::PowdrField>,\n    memory_bus_id: u64,\n) -> Option<ConcreteMemoryAccess<A::PowdrField>> {\n    let bus_interaction = symbolic_bus_interaction_to_bus_interaction(&bus_interaction);\n    let bus_interaction =\n        A::MemoryBusInteraction::try_from_bus_interaction(&bus_interaction, memory_bus_id)\n            // TODO: This filters out memory bus interactions with unknown multiplicity.\n            .ok()\n            .flatten()?;\n    let address = bus_interaction.addr();\n    let data = bus_interaction.data();\n\n    // Find concrete address\n    let concrete_address = address\n        .into_iter()\n        .map(|expr| expr.try_to_known().cloned())\n        .collect::<Option<Vec<_>>>()?;\n\n    // Find references to the limbs\n    let limbs = data\n        .iter()\n        .map(|expr| expr.try_to_simple_unknown())\n        .collect::<Option<Vec<_>>>()?;\n\n    let instruction_index = match bus_interaction.op() {\n        MemoryOp::GetPrevious => instruction_index,\n        MemoryOp::SetNew => instruction_index + 1,\n    };\n\n    Some(ConcreteMemoryAccess {\n        instruction_index,\n        concrete_address,\n        limbs,\n    })\n}\n\n/// Given a concrete memory access, generates a mapping from each limb's reference\n/// to an optimistic literal representing that limb.\n/// Skips limbs that refer to columns introduced by the optimizer.\nfn generate_limb_mapping<'a, T: Clone + 'a>(\n    memory_access: ConcreteMemoryAccess<T>,\n    column_allocator: &'a ColumnAllocator,\n) -> impl Iterator<Item = (AlgebraicReference, OptimisticLiteral<Vec<T>>)> + 'a {\n    memory_access\n        .limbs\n        .into_iter()\n        .enumerate()\n        .filter_map(move |(limb_index, limb_ref)| {\n            if !column_allocator.is_known_id(limb_ref.id) {\n                // Limb refers to a column introduced by the optimizer, skip it.\n                // We would never have empirical constraints on such a column anyway.\n                return None;\n            }\n\n            let local_literal = LocalOptimisticLiteral::RegisterLimb(\n                memory_access.concrete_address.clone(),\n                limb_index,\n            );\n            let optimistic_literal = OptimisticLiteral {\n                instr_idx: memory_access.instruction_index,\n                val: local_literal,\n            };\n            Some((limb_ref, optimistic_literal))\n        })\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimistic/mod.rs",
    "content": "pub mod algebraic_references;\npub mod config;\npub mod execution_constraint_generator;\npub mod execution_literals;\n"
  },
  {
    "path": "autoprecompiles/src/optimizer.rs",
    "content": "use std::fmt::Debug;\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::BusInteractionHandler;\nuse powdr_constraint_solver::grouped_expression::GroupedExpression;\nuse powdr_constraint_solver::indexed_constraint_system::IndexedConstraintSystem;\nuse powdr_constraint_solver::inliner::{self, inline_everything_below_degree_bound};\nuse powdr_constraint_solver::rule_based_optimizer::rule_based_optimization;\nuse powdr_constraint_solver::solver::new_solver;\nuse powdr_number::FieldElement;\n\nuse crate::constraint_optimizer;\nuse crate::constraint_optimizer::{trivial_simplifications, IsBusStateful};\nuse crate::export::ExportOptions;\nuse crate::memory_optimizer::MemoryBusInteraction;\nuse crate::range_constraint_optimizer::{optimize_range_constraints, RangeConstraintHandler};\nuse crate::symbolic_machine::{\n    constraint_system_to_symbolic_machine, symbolic_machine_to_constraint_system,\n    SymbolicConstraint,\n};\nuse crate::ColumnAllocator;\nuse crate::{\n    constraint_optimizer::optimize_constraints,\n    expression::AlgebraicReference,\n    stats_logger::{self, StatsLogger},\n    BusMap, BusType, DegreeBound, SymbolicMachine,\n};\n\n/// Optimizes a given symbolic machine and returns an equivalent, but \"simpler\" one.\n/// All constraints in the returned machine will respect the given degree bound.\n/// New variables may be introduced in the process.\npub fn optimize<T, B, BusTypes, MemoryBus>(\n    mut machine: SymbolicMachine<T>,\n    bus_interaction_handler: B,\n    degree_bound: DegreeBound,\n    bus_map: &BusMap<BusTypes>,\n    mut column_allocator: ColumnAllocator,\n    export_options: &mut ExportOptions,\n) -> Result<(SymbolicMachine<T>, ColumnAllocator), crate::constraint_optimizer::Error>\nwhere\n    T: FieldElement,\n    B: BusInteractionHandler<T> + IsBusStateful<T> + RangeConstraintHandler<T> + Clone,\n    BusTypes: PartialEq + Eq + Clone + Display,\n    MemoryBus: MemoryBusInteraction<T, AlgebraicReference>,\n{\n    let mut stats_logger = StatsLogger::start(&machine);\n\n    if let Some(exec_bus_id) = bus_map.get_bus_id(&BusType::ExecutionBridge) {\n        machine = optimize_exec_bus(machine, exec_bus_id);\n        stats_logger.log(\"exec bus optimization\", &machine);\n    }\n\n    export_options.export_optimizer_outer(&machine, \"exec_bus\");\n\n    let mut new_var = |name: &str| {\n        let id = column_allocator.issue_next_poly_id();\n        AlgebraicReference {\n            // TODO is it a problem that we do not check the name to be unique?\n            name: format!(\"{name}_{id}\").into(),\n            id,\n        }\n    };\n\n    let constraint_system = symbolic_machine_to_constraint_system(machine);\n    stats_logger.log(\"system construction\", &constraint_system);\n\n    let mut constraint_system: IndexedConstraintSystem<_, _> = constraint_system.into();\n    stats_logger.log(\"indexing\", &constraint_system);\n\n    // We could run the rule system before ever constructing the solver.\n    // Currently, it does not yet save time.\n    // let mut constraint_system = rule_based_optimization(\n    //     constraint_system,\n    //     NoRangeConstraints,\n    //     bus_interaction_handler.clone(),\n    //     &mut new_var,\n    //     // No degree bound given, i.e. only perform replacements that\n    //     // do not increase the degree.\n    //     None,\n    // )\n    // .0;\n    // export_options.register_substituted_variables(assignments);\n    // export_options.export_optimizer_outer(&machine, \"02_rule_based_optimization\");\n    stats_logger.log(\"rule-based optimization\", &constraint_system);\n\n    let mut solver = new_solver(\n        constraint_system.system().clone(),\n        bus_interaction_handler.clone(),\n    );\n    stats_logger.log(\"constructing the solver\", &constraint_system);\n    loop {\n        export_options\n            .export_optimizer_outer_constraint_system(constraint_system.system(), \"loop_iteration\");\n        let stats = stats_logger::Stats::from(&constraint_system);\n        constraint_system = optimize_constraints::<_, _, MemoryBus>(\n            constraint_system,\n            &mut solver,\n            bus_interaction_handler.clone(),\n            &mut stats_logger,\n            bus_map.get_bus_id(&BusType::Memory),\n            degree_bound,\n            &mut new_var,\n            export_options,\n        )?\n        .into();\n        if stats == stats_logger::Stats::from(&constraint_system) {\n            break;\n        }\n    }\n    let (constraint_system, substitutions) = inliner::replace_constrained_witness_columns(\n        constraint_system,\n        inline_everything_below_degree_bound(degree_bound),\n    );\n    stats_logger.log(\"inlining\", &constraint_system);\n    export_options.register_substituted_variables(substitutions);\n    export_options.export_optimizer_outer_constraint_system(constraint_system.system(), \"inlining\");\n\n    let constraint_system = constraint_optimizer::remove_disconnected_columns(\n        constraint_system,\n        &mut solver,\n        bus_interaction_handler.clone(),\n    );\n    stats_logger.log(\"removing disconnected columns\", &constraint_system);\n    export_options.export_optimizer_inner_constraint_system(\n        constraint_system.system(),\n        \"remove_disconnected\",\n    );\n\n    let (constraint_system, _) = rule_based_optimization(\n        constraint_system,\n        &solver,\n        bus_interaction_handler.clone(),\n        &mut new_var,\n        Some(degree_bound),\n    );\n    export_options\n        .export_optimizer_outer_constraint_system(constraint_system.system(), \"rule_based\");\n    // Note that the rest of the optimization does not benefit from optimizing range constraints,\n    // so we only do it once at the end.\n    let constraint_system = optimize_range_constraints(\n        constraint_system.into(),\n        bus_interaction_handler.clone(),\n        degree_bound,\n    );\n    stats_logger.log(\"optimizing range constraints\", &constraint_system);\n    export_options\n        .export_optimizer_outer_constraint_system(&constraint_system, \"range_constraints\");\n\n    let constraint_system = trivial_simplifications(\n        constraint_system.into(),\n        bus_interaction_handler,\n        &mut stats_logger,\n    )\n    .system()\n    .clone();\n    export_options.export_optimizer_outer_constraint_system(&constraint_system, \"trivial_simp\");\n\n    stats_logger.finalize(&constraint_system);\n\n    export_options.export_substituted_variables();\n\n    // Sanity check: Degree bound should be respected:\n    for algebraic_constraint in &constraint_system.algebraic_constraints {\n        assert!(\n            algebraic_constraint.degree() <= degree_bound.identities,\n            \"Degree bound violated ({} > {}): {algebraic_constraint}\",\n            algebraic_constraint.degree(),\n            degree_bound.identities\n        );\n    }\n    for bus_interaction in &constraint_system.bus_interactions {\n        for (i, expr) in bus_interaction.fields().enumerate() {\n            assert!(\n                expr.degree() <= degree_bound.identities,\n                \"Degree bound violated in field {i} ({} > {}): {bus_interaction}\",\n                expr.degree(),\n                degree_bound.identities\n            );\n        }\n    }\n\n    // Sanity check: All PC lookups should be removed, because we'd only have constants on the LHS.\n    let pc_lookup_bus_id = bus_map.get_bus_id(&BusType::PcLookup).unwrap();\n    assert!(\n        !constraint_system\n            .bus_interactions\n            .iter()\n            .any(|b| b.bus_id == GroupedExpression::from_number(T::from(pc_lookup_bus_id))),\n        \"Expected all PC lookups to be removed.\"\n    );\n    Ok((\n        constraint_system_to_symbolic_machine(constraint_system),\n        column_allocator,\n    ))\n}\n\npub fn optimize_exec_bus<T: FieldElement>(\n    mut machine: SymbolicMachine<T>,\n    exec_bus_id: u64,\n) -> SymbolicMachine<T> {\n    let mut first_seen = false;\n    let mut receive = true;\n    let mut latest_send = None;\n    let mut execution_bus_constraints = vec![];\n    machine.bus_interactions.retain(|bus_int| {\n        if bus_int.id != exec_bus_id {\n            return true;\n        }\n\n        if receive {\n            // TODO assert that mult matches -expr\n        }\n\n        // Keep the first receive\n        let keep = if !first_seen {\n            first_seen = true;\n            true\n        } else if !receive {\n            // Save the latest send and remove the bus interaction\n            latest_send = Some(bus_int.clone());\n            false\n        } else {\n            // Equate the latest send to the new receive and remove the bus interaction\n            for (bus_arg, send_arg) in bus_int\n                .args\n                .iter()\n                .zip_eq(latest_send.as_ref().unwrap().args.iter())\n            {\n                execution_bus_constraints\n                    .push(SymbolicConstraint::from(bus_arg.clone() - send_arg.clone()))\n            }\n            false\n        };\n\n        receive = !receive;\n\n        keep\n    });\n\n    // Re-add the last send\n    machine.bus_interactions.push(latest_send.unwrap());\n\n    // Add the constraints which replace the execution bus interactions\n    machine.constraints.extend(execution_bus_constraints);\n\n    machine\n}\n\n/// A wrapped variable: Either a regular variable or a bus interaction field.\n#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]\npub enum Variable<V> {\n    Variable(V),\n    BusInteractionField(usize, usize),\n}\n\nimpl<V: Display> Display for Variable<V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Variable::Variable(v) => write!(f, \"{v}\"),\n            Variable::BusInteractionField(bus_index, field_index) => {\n                write!(f, \"BusInteractionField({bus_index}, {field_index})\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/optimizer_documentation.md",
    "content": "# The Autoprecompiles Optimizer\n\n## Terminology\n\n### Field Elements\n\nThroughout this document, we will be working in a finite field of prime order `p`.\nOften, we use operators or concepts that are only defined\nin the integers. In this case, we use the natural number representation\nof the field element, i.e. the unique integer `x` such that `0 <= x < p`\nwhere the field operations are defined as `x + y = (x + y) mod p`\nand `x * y = (x * y) mod p` for field elements `x` and `y`.\n\nThis way, we can also make statements about a field element being\n_less than_ another field element, even if this would not make sense\ninside the finite field. Sometimes, field elements are also interpreted\nas signed integers instead of natural numbers, but this will be clarified.\n\n### Constraint System\n\nThe optimizer is operating on an abstraction of a chip we call\n_Constraint System_, which consists of a set of _Algebraic Constraints_\nand _Bus Interactions_. Both of them contain expressions involving variables.\nA Constraint System is _satisfied_ by an assignment\nof its variables if the assignment satisfies all Algebraic Constraints\nand Bus Interactions in the system.\n\nThe purpose of the optimizer is to simplify a Constraint System\ninto a Constraint System that has the same satisfying assignments.\nThis is not the exact definition of correctness for the optimizer because\nit is also allowed to remove variables and introduce new ones, but\nit is a good guideline for now until we have all the definitions.\n\n### Algebraic Constraint\n\nAn _Algebraic Constraint_ consists of an _Algebraic Expression_, i.e.\nan expression involving the operators `+` and `*` on _Variables_ and\n_Constants_ from the implied finite field. The idea is that the constraint\nforces the expression to be zero and thus we write it as an equation\n`<expr> = 0` (but also `<lhs> = <rhs>` if more convenient, by which\nwe mean `<rhs> - <lhs> = 0`).\n\nAn Algebraic Constraint is _satisfied_ by an assignment of the variables\nif it evaluates to zero under this assignment.\n\nExample: `x * (x - 1) = 0` is an algebraic constraint that forces\nthe variable `x` to be either zero or one, meaning that an\nassignment satisfies this constraint if and only if it has `x = 0` or `x = 1`.\n\n### Range Constraint\n\nThe task of the optimizer is hugely simplified by the concept of\n_Range Constraints_. Range Constraints allow us to combine the effects of\ndifferent Algebraic Constraints (and Bus Interactions) on the same variable.\nIn an abstract way, a _Range Constraint_ is just\na restriction on values and we can say that a value _satisfies_ a Range Constraint\nor not. We also say that a Range Constraint _allows_ a value if that value satisfies it.\nWe can connect Range Constraints and variables (a Range Constraint _on_ a\nvariable) and say that an assignment of a variable `v` _satisfies_ a Range Constraint\n`r` on `v` if the value assigned to `v` satisfies `r`.\nA Range Constraint `r` on a variable `v` is _valid_ in a Constraint System if any\nsatisfying assignment of the Constraint System also satisfies `r`.\n\nDuring optimization, we derive Range Constraints for expressions and variables\nfrom Algebraic Constraints and Bus Interactions and use them to simplify\nthe Constraint System. We also use Range Constraints for a uniform abstraction\nof Bus Interactions as we will see in a later section.\n\nAs an example, let us consider the Constraint System consisting of the\nAlgebraic Constraint `x * (x - 1) = 0`. From this Algebraic Constraint the optimizer\nwill synthesize a Range Constraint `r1` on `x` that only allows the values `0` and `1`.\nThe Range Constraint is valid in the Constraint System because, as we saw at the end of\nthe previous section, any satisfying assignment for the Algebraic Constraint\nmust have `x = 0` or `x = 1`. Note that a Range Constraint that allows all values\nin the field is always valid, but not very useful.\n\nNow assume we extend the Constraint System by an additional constraint\n`(x - 2) * (x - 1) = 0`. The Range Constraint `r1` on `x` is still valid in the extended\nsystem because additional constraints can only reduce the set of satisfying assignments.\nIf we look at the second constraint in isolation, we can get a Range Constraint `r2` on `x`\nthat allows exactly the values `1` and `2`. Both `r1` and `r2` are valid in the extended\nsystem, and so is their intersection, which only allows the value `1`.\n\nFrom this simple example, one can already see the power of these Range Constraints.\nIn a later section we will talk about the various computations that can be performed\non Range Constraints including the intersection.\n\n#### Concrete Implementation of Range Constraints\n\nThe abstract concept of Range Constraints is implemented in the optimizer by a combination\nof a _Wrapping Interval_ and a _Bitmask_.\n\nA _Wrapping Interval_ is a pair of field elements `min` and `max`.\nA value `x` is allowed by the Wrapping Interval if and only if it is\npart of the sequence `min`, `min + 1`, `min + 2`, ..., `max`.\nNote that this sequence wraps around the prime `p` of the field.\n\nThe following is an equivalent definition:\nIf `min <= max` (as seen in the natural numbers), the Wrapping\nInterval allows a value `x` if and only if `min <= x <= max`\n(the non-wrapping case).\nIf `min > max`, the Wrapping Interval allows a value `x`\nif and only if `x >= min` or `x <= max` (the wrapping case).\n\nThe reason we allow these wrapping intervals is that we can compute\nthe Range Constraint interval of an expression `x + k` for any constant `k`\nfrom the Range Constraint interval of `x` without losing information.\n\nA _Bitmask_ is a natural number `bitmask` that is interpreted as a\nbitmask for the natural number representation of field elements.\nIt allows a value `x` if and only if `x & bitmask == x`, i.e. all\nbits that are set in `x` are also set in `bitmask`.\nNote that in particular, the bitmask can never disallow the value zero.\n\nA _Range Constraint_ allows a value if and only if both the bitmask\nand the wrapping interval allow it.\n\n### Bus Interaction\n\nThe concept of _Bus Interaction_ is a bit more complicated. The concrete\nsemantics of a bus interaction depends on the environment, i.e. the\nzkVM we are operating inside and the chips it has.\n\nA _Bus Interaction_ consists of a _Bus ID_, a _Multiplicity_ and\na _Payload_. The _Bus ID_ is an Algebraic Expression and specifies\nwhich bus to interact with. The _Multiplicity_ is an Algebraic Expression\nand in most cases it should evaluate either to 1 or -1. The _Payload_ is\nthe data that is sent to the bus or received from the bus and is a list\nof Algebraic Expressions.\n\nUsually, one can think of a Bus Interaction to constrain the items in\nthe payload as a tuple. For example, if you have an XOR bus, then\na Bus Interaction with payload `(a, b, c)` ensures that\n`c = a ^ b`. In a bus interaction, there is no intrinsic concept of\ninputs and outputs (even though some buses can be seen like that).\n\nIn the example of the XOR bus, it is perfectly fine to use\n`(a, b, 0xff)` and thus ensure that (on the lower most byte),\n`b` is the bitwise negation of `a`.\n\nBuses can only be properly described across a system of chips or\nconstraint systems. What we want to achieve is that all the buses\nare balanced:\n\nA Bus is _balanced_ if across the whole system and for all payloads,\nthe sum of the multiplicities is zero. Intuitively, with a multiplicity\nof 1 we can send some payload and we receive it on the other end\nwith a multiplicity of -1.\n\nThe autoprecompiles optimizer will mostly work with an abstraction\nof bus interactions that are specifically implemented for each concrete\nbus type, but these implementations also usually fall into categories,\nso it should not be difficult to implement this abstraction for a new bus\nor system.\n\n### Bus Interaction Abstraction\n\nFor the optimizer to be able to handle Bus Interactions, we need to\nimplement the following methods:\n\n- `is_stateful`: For a given Bus ID (a field element), returns if the bus\n  with the given ID is stateful or not. If a bus interaction is not stateful,\n  it only affects the payload passed to it and no other elements of the system.\n  A memory bus or the execution bridge are examples of stateful busses,\n  while range constraint busses, busses modeling arithmetic operations or\n  lookup tables are not stateful.\n\n- `handle_bus_interaction`: Takes a Bus Interaction where its items are represented\n  by Range Constraints instead of expressions. It returns\n  a Bus Interaction with Range Constraints such that all payloads that satisfy\n  the input Range Constraints and the bus semantics also satisfy the output\n  Range Constraints. An implementation that always returns its inputs\n  (or also just fully unconstrained Range Constraints) would be correct, but\n  of course you should return Range Constraints that are as tight\n  as possible such that the optimizer gets the most out of it.\n\nAs an example, let us assume we are modeling a bus that implements a byte\nconstraint, i.e. a bus that takes a single payload item and enforces that it is\nin the range `0..=255`. The bus is not stateful since it does not depend on nor affects any\nother parts of the system. A simple correct implementation of\n`handle_bus_interaction` would be to always return a `0xff`-mask Range Constraint\nfor the payload and ignore the input. It is correct because any assignment that\nsatisfies the bus semantics must have the payload in the range `0..=255`.\nEven though this implementation ignores the input Range Constraints, is also the best\npossible, since even if the input Range Constraint\nis something like `200..=300`, the optimizer will not forget it but\ninstead combine it with the one returned by `handle_bus_interaction` and derive\n`200..=255` as the new Range Constraint for the payload.\n\nAnother example is an XOR-bus that takes three payload items `a, b, c`\nand ensures that all of them are bytes and `a ^ b = c`. This bus is also not stateful.\nHere, one would implement `handle_bus_interaction` by returning the three byte constraints\nfor the payload items if the input has no restrictions. If two inputs are fully\ndetermined (i.e. only a single value satisfies the Range Constraints),\nwe can compute the third and return that as a Range Constraint.\n\nWe will see later how we can fully optimize away XOR bus interactions using just this\nabstraction.\n\n### Memory Bus\n\nTODO Continue with the abstraction using Range Constraints.\n\n## Combining Range Constraints\n\n\n## Grouped Expressions\n\nThe main data structure used for algebraic expressions is the _Grouped Expression_.\nA Grouped Expression consists of a constant term, a list of linear terms (a list of pairs of a non-zero coefficient\nand a variable) and a list of quadratic terms (a list of pairs of Grouped Expressions).\n\nThe variables in the linear terms are unique and the coefficients are required\nto be non-zero. The uniqueness is enforced by using a map data type.\nThis makes it easy to compare, add and subtract affine expressions, which do not\nhave quadratic terms.\n\nIt also provides a normal form for affine Algebraic Constraints if we require \nthe coefficient of the first variable (according to some fixed order on the \nvariables) to be one. Note that an Algebraic Constraint can be multiplied \nby a nonzero factor without changing the semantics.\n\nAddition and subtraction of Grouped Expressions are implemented to remove linear terms that cancel each other out,\nand they perform some checks also in the quadratic terms, but this part is not complete for performance reasons.\n\n## Equivalence Notion\n\n### Introduction and Example\n\nWe start with some informal intuition and an example.\n\nWe call two Constraint Systems _equivalent_ if every satisfying assignment for one system\ncan be extended to a satisfying assignment for the other system and every such extension\nleads to the same payloads and multiplicities for all _stateful_ bus interactions in both systems.\n\nAs an example, consider the two systems\n\nSystem A:\n```\nx = 8\nx + y + z = 12\nBusInteraction { bus_id = 2, multiplicity = 1, payload = [x, y, z] }\nw * (w - 1) = 0\n```\n\nSystem B:[^variables]\n```\ny' + z' = 4\nBusInteraction { bus_id = 2, multiplicity = 1, payload = [8, y', z'] }\n```\n\n[^variables]:\n    In this pair of systems, and throughout the rest of this document, we will use\n    unprimed variables for the first system and primed ones for the second system.\n    When two variables have the same name (modulo primes), that means the variables\n    are informally *intended* to have the same value. We will formalize this idea\n    later.\n\nLet us assume that the bus with ID 2 is stateful and allows all combinations of values between 0 and 100 (inclusive).\nNote that the variables `y`/`y'` and `z`/`z'` are not uniquely determined in either system. The stateful bus\nacts both as input and output for the system.\n\nNote that System B is obtained from System A by substituting `x = 8`, removing `w`, and replacing `y,z` with `y',z'`.\n\nAll satisfying assignments of System A must have `x = 8` and either `w = 0` or `w = 1`.\nSuch an assignment also satisfies System B (with the variables primed)\nand it produces the same values for the stateful bus interaction.\n\nThe converse is a bit more complicated: Satisfying assignments of system B only assign the variables\n`y'` and `z'`.  We can give `y` and `z` the same values in system A, but we need\nto extend the assignment so that it assigns `x` and `w` and satisfies System A. For `x`, the only\nchoice we have is `x = 8`, but there are two ways to extend the assignment with regards to `w` such that\nit is still satisfying, `w = 0` or `w = 1`. Since both ways to extend the assignment\nproduce the same values in the stateful bus interaction, the systems are equivalent.\n\n### Abstract Equivalence Definition\n\nNow let's proceed formally.\n\nLet $S = (C, B)$ be a system, defined over a vector of variables, $w$. Let\n$C$ be the stateless constraints of the system: a formula over $w$. This includes the\nalgebraic constraints and stateless buses. Let $B$ be the stateful bus interactions.\nIt is a fixed-length sequence of interactions. Each interaction is a pair.\nThe first component, $d$, is the data, a fixed-length\nlist of algebraic expressions, so its type is $\\mathbb{F}^+$ (sequences of\npositive length of algebraic expressions over $\\mathbb{F}$). Assume the bus ID\nis represented as the first entry in $d$, for simplicity. The second component\nof an interaction is $m$, the multiplicity, which is an algebraic expression.\n\nThe bus interactions will be aggregated into a special kind of multiset. We\nrefer to a map from $\\mathbb{F}^+ \\to \\mathbb{F}$ as a “field multiset” (aka\n“multiset”). This name reflects an interpretation of the map as a multiset in\nwhich each key in the map appears with multiplicity equal to its\nvalue.[^fmultiset] Note\nthat these multisets can be added pointwise. That is, for multisets $m$ and\n$m'$, their sum $m + m'$ maps each key $k$ to $m(k) + m'(k)$. We interpret a bus\ninteraction as a multiset with one key and the specified multiplicity. That is,\nwe define $\\textsf{toMs}(d, m)$ to be the field multiset that maps key $d$ to\nvalue $m$ and all other keys to value $0$. Then, we define $\\Sigma(B)$ to be\n$\\sum_{(d,m) \\in B} \\textsf{toMs}(d, m)$\n\n[^fmultiset]:\n    A field multiset is slightly different than a standard multiset. In a\n    standard multiset, the multiplicities are natural numbers, not field\n    elements. Thus, in a field multiset, multiplicities can cancel out and can\n    be negative. For example, in a field multiset over $\\mathbb{F}_2$, for a key\n    $k$, containing $k$ twice is equivalent to containing $k$ zero times.\n    We use field multisets because the cryptography used to create zkVMs can\n    prove properties of field multisets, but not standard multisets.\n    While some SMT solvers, like cvc5, do have a theory of standard\n    multisets ([link][bags]), field multisets are more naturally encoded using\n    the theory of arrays, with pointwise addition.\n\n[bags]: https://cvc5.github.io/docs/cvc5-1.3.2/theories/bags.html\n\n\nNow we can define equivalence, between systems. Assume two systems $S = (C, B)$\nand $S' = (C', B')$ in variables $w$ and $w'$, respectively.\n\nEquivalence has two conditions.\n\nThe first condition is **completeness**, which says that when $S$ is satisfiable,\nso is $S'$, and with the same effects (stateful bus interactions). Formally,\nthere should exist an efficient $E(w) \\to w'$ such that: for all $w$ and $s$,\nif $C(w) \\wedge \\Sigma(B(w)) = s$,\nthen $C'(w') \\wedge \\Sigma(B'(w')) = s$,\nwhere $w' = E(w)$.\n\nThe second condition is **soundness**, which says that when $S'$ is satisfiable, $S$\nis too, and with the same effects. Formally, there should exists an efficient\n$I(w') \\to w$ such that: for all $w'$ and $s$,\nif $C'(w') \\wedge \\Sigma(B'(w')) = s$,\nthen $C(w) \\wedge \\Sigma(B(w)) = s$,\nwhere $w = I(w')$.\n\nIn the context of powdr, $S$ is the input to the optimization pipeline and $S'$\nis the output. The pipeline also implicitly outputs $E$, which is encoded as\nfollows. Most of the variables in $w'$ have the same name as some variable in\n$w$---they takes its value. Other variables have an entry in the \"derived\nvariables\", which explains how to compute them from $w$.\n\n### Worked example\n\nWe will give two equivalent systems, as examples.\n\nThe first system, $S = (C, B)$ is a slightly more complex version of the\ninformal example above, with $b$ in place of $w$.\n\n> $d_0 = (2, x, y, z), m_0 = 1$\n>\n> $d_1 = (2, x, y, z), m_1 = b$\n>\n> $d_2 = (2, 8, y, z), m_2 = -b$\n>\n> $C = (x = 8 \\wedge x + y + z = 12 \\wedge b(b-1) = 0)$\n\nThe second system $S' = (B', C')$ is:\n\n> $d'_0 = (2, 8, y', z'), m'_0 = 1$\n>\n> $C' = (y' + z' = 4)$\n\nAlgorithmically, one optimizes $S$ into $S'$ by the following transformations:\n\n1. Since $x = 8$, substitute $8$ for $x$.\n2. Now, we have $d_1 = d_2$, and $m_1 = -m_2$, so remove both bus\n   interactions--they have equal data and their multiplicities sum to 0.\n3. $b$ appears in no bus interactions, and in no algebraic constraints with\n   other variables. Moreover, the constraints it does appear in are satisfiable.\n   Remove them.\n\nNow, we prove that these systems are equivalent under the prior definition. That\nis, we prove soundness and completeness.\n\n#### Soundness\n\n$I(w') \\to w$ is defined to map $w'=(y',z')$ to $w=(x,y,z,b)$ as follows:\n$x \\gets 8, y \\gets y', z \\gets z', b \\gets 0$.\n\nRoughly, we must show:\n\n$$\\forall w', \\forall s, C'(w') \\wedge \\Sigma(B'(w')) = s \\land w = I(w')\n\\implies C(w) \\wedge \\Sigma(B(w)) = s$$\n\nWhich is the same as\n\n$$\\forall w', C'(w') \\wedge \\land w = I(w')\n\\implies C(w) \\wedge \\Sigma(B(w)) = \\Sigma(B'(w'))$$\n\n\nProof:\n\n* Fix $w' = (y', z')$.\n* To show the $\\implies$, assume\n  * $w = I(w')$, that is:\n    * $x = 8$\n    * $y = y'$\n    * $z = z'$\n    * $b = 0$\n  * $y' + z' = 4$\n  * $s = $\n* And now we need to show each of the following goals:\n  * $x = 8$, since it is part of $C(w)$\n    * we already have this\n  * $x + y + z = 12$, since it is also part of $C(w)$\n    * we have this since we have $x=8, y=y', z=z', y'+z'=4$\n  * $b(b-1) = 0$, since it is also part of $C(w)$\n    * we have this since $b=0$\n  * $\\mathsf{toMs}((2, 8, y', z'), 1) = \\mathsf{toMs}((2, x, y, z), 1) + \\mathsf{toMs}((2, x, y, z), b) + \\mathsf{toMs}((2, 8, y, z), -b)$\n    * Fist, let $s = \\mathsf{toMs}((2, 8, y', z'), 1)$\n    * since $y=y'$ and $z=z'$, we have\n        $s = \\mathsf{toMs}((2, 8, y, z), 1)$\n    * since $x=8$, we have\n        $s = \\mathsf{toMs}((2, x, y, z), 1)$\n    * since 0 multiplicities are an identity for $+$, we have\n        $s = \\mathsf{toMs}((2, x, y, z), 1) + \\mathsf{toMs}((2, x, y, z), 0) + \\mathsf{toMs}((2, 8, y, z), 0)$\n    * since $b=0$, we have our goal:\n        $s = \\mathsf{toMs}((2, x, y, z), 1) + \\mathsf{toMs}((2, x, y, z), b) + \\mathsf{toMs}((2, 8, y, z), -b)$\n\n#### Completeness\n\n$E$ is defined as $y' \\gets y, z' \\gets z$.\n\nRoughly, we must show:\n\n$$\\forall w, \\forall s, C(w) \\wedge \\Sigma(B(w)) = s \\wedge w' = E(w)\n\\implies C'(w') \\wedge \\Sigma(B'(w')) = s$$\n\nWhich is the same as\n\n$$\\forall w, C(w) \\wedge w' = E(w)\n\\implies C'(w') \\wedge \\Sigma(B(w)) = \\Sigma(B'(w'))$$\n\nProof:\n\n* Fix $w = (x, y, z, b)$.\n* Fix $w' = (y', z')$.\n* To show the $\\implies$, assume\n  * $w'=E(w)$, that is:\n    * $y' = y$\n    * $z' = z$\n  * $x = 8$\n  * $x + y + z = 12$\n* And now we need to show each of the following goals:\n  * $y' + z' = 4$\n    * we have this from $y' = y, z' = z, x = 8, x + y + z = 12$\n  * $\\mathsf{toMs}((2, x, y, z), 1) + \\mathsf{toMs}((2, x, y, z), b) + \\mathsf{toMs}((2, 8, y, z), -b) = \\mathsf{toMs}((2, 8, y', z'), 1)$\n    * let\n      $s = \\mathsf{toMs}((2, x, y, z), 1) + \\mathsf{toMs}((2, x, y, z), b) + \\mathsf{toMs}((2, 8, y, z), -b)$\n    * since $x = 8$, we have:\n      $s = \\mathsf{toMs}((2, 8, y, z), 1) + \\mathsf{toMs}((2, 8, y, z), b) + \\mathsf{toMs}((2, 8, y, z), -b)$\n    * by additive inverse for multiset multiplicities we have:\n      $s = \\mathsf{toMs}((2, 8, y, z), 1)$\n    * by $y'=y,x'=x$, we have our goal:\n      $s = \\mathsf{toMs}((2, 8, y', z'), 1)$\n\n### Connection to prior definitions from the literature\n\nOur definition is an instantiation of Ozdemir et al.'s definition of ZKP\ncompiler correctness from the paper [\"Bounded Verification for\nFinite-Field-Blasting in a Compiler for Zero Knowledge Proofs\"][1]. Start from\ntheir Definition 1. To see this, set:\n\n* their $w$ and $w'$ to our $w$ and $w'$,\n* their $x$ and $x'$ to our $s$ (both are $s$),\n* their $\\phi(x,w)$ to our $C(w) \\wedge \\Sigma(B(w)) = s$,\n* their $\\phi'(x',w')$ to our $C'(w') \\wedge \\Sigma(B'(w')) = s$,\n* their $\\mathsf{Ext}_x(x)$ to the identity function from $s$ to itself,\n* their $\\mathsf{Ext}_w(x, w)$ to our $E$, and\n* their $\\mathsf{Inv}(x', i')$ to our $I$.\n\nThis alignment bodes very well for our definition. Ozdemir et al. proved that a\nZKP compiler that is correct by their definition can securely compose with a\nzkSNARK for the compiler's output language to give a zkSNARK for the compiler's\ninput language. We would hope to show a similar result using our definition. But\nour result, would also need to account for the zkVM's design. Our result would\nsay something like (secure zkSNARK for plonkish constraints) + (correct zkVM) +\n(correct powdr) = (secure zkSNARK for RISC-V).\n\n### Connections to Georg's definition\n\nOur definition strengthens Georg's slightly. In his soundness definition,\n$I$ and $E$ are de-skolemized (their outputs are existentially quantified). This\nis equivalent to removing the requirement that $I$ and $E$ be efficient. An\ninefficient $E$ really wouldn't work, because then you can't compute the witness\n$w'$. Fortunately, powdr outputs $E$ (encoded in the variable derivations). An\ninefficient $I$ means that powdr would compose with a zkSNARG, but not a\nzkSNARK. That is, it no longer applies to knowledge soundness, just to\nexistential soundness.\n\n### Constraints\n\nIn the foregoing, we noted that stateless bus interactions and algebraic\nconstraints are represented by $C$. Now, we discuss $C$ in more detail.\n\nIn terms of SMT theories, the algebraic constraints are just QF_FF (quantifier-free over a finite field) predicates over the variables $w$. More\nspecifically, they are $\\mathbb{F}$ equalities over terms constructed with $+$\nand $\\times$ in $\\mathbb{F}$.\n\nThe are a few different bus interactions, that contribute to $C$:\n\n* TODO\n\n### Requirements that are not yet formalized.\n\nThe definition above is a living object. There are requirements for powdr that\nwe have not yet formalized, and there may be some that we are not yet aware of.\nMost of these are likely weird invariants that OpenVM implicitly assumes in its\nown definition of correctness.\n\nCurrently, we know of one unformalized requirement:\n\n* Under all satisfying assignments, a constraints system must ensure that the\n  different between the execution step counter in its final execution bus send\n  and its initial execution bus receive is at most the total number of bus\n  interactions. This requirement is used to prevent overflows related to the\n  step counter and the bus multiplicities. Powdr is currently violating this\n  requirement[2]. But also, this requirement is not tight. Many looser\n  requirements could also prevent overflow. And, powdr might be able to be\n  changed to respect it.\n\n  We expect that it will be easy to verify a requirement like this one once we\n  figure out exactly what we need to verify. It is also possible that this\n  requirement will end up being something that is not the responsibility of the\n  optimizer and is instead the responsibility of a different part of the\n  pipeline.\n\n## Optimization Steps\n\nThe called functions are\n\n```\noptimize_exec_bus\nloop:\n    solver_based_optimization\n    remove_trivial_constraints\n    remove_free_variables\n    remove_disconnected_columns\n    trivial_simplifications\n    optimize_memory\n    LowDegreeBusInteractionOptimizer\ninliner::replace_constrained_witness_columns\noptimize_range_constraints\ntrivial_simplifications\n```\n\nin addition, in the solver we have to explain:\n- linearizing\n- boolean extraction\n- solving algebraic constraints\n - simple equivalence\n - splitting into multiple constraints\n - solving itself\n- handling bus interactions\n- quadratic equivalence detection\n- exhaustive search\n- equal zero check\n\n### Constraint System Solver\n\nThe Constraint System Solver is the core of the optimizer. It is created from a Constraint System, but\nit does not directly modify the Constraint System. Instead it acts as an information base about the\nvariables in the Constraint System. It can provide tight Range Constraints for variables or expressions,\nwhich include the special case of variables being constant. The optimizer uses the Constraint System Solver\nto substitute such constant variables. It can also determine if two Algebraic Expressions are always different,\nwhich is crucial for memory optimization to solve the aliasing problem.\n\n#### Linearizing\n\n#### Boolean Extraction\n\n#### Simple Variable Equivalence\n\n`try_to_simple_equivalence`\n\n#### Splitting Algebraic Constraints Into Multiple Constraints\n\n#### Solving Algebraic Constraints\n\n##### Affine Constraints\n\n##### Quadratic Constraints\n\n#### Handling Bus Interactions\n\n#### Quadratic Equivalence Detection\n\n#### Exhaustive Search\n\n#### Equal Zero Check\n\n[1]: https://eprint.iacr.org/2023/778.pdf\n[2]: https://github.com/powdr-labs/powdr/issues/3542\n"
  },
  {
    "path": "autoprecompiles/src/pgo/cell/mod.rs",
    "content": "use std::{collections::BTreeMap, io::BufWriter};\n\nuse itertools::Itertools;\nuse rayon::iter::{IntoParallelIterator, ParallelIterator};\nuse selection::select_blocks_greedy;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    adapter::{Adapter, AdapterApcWithStats, AdapterExecutionBlocks, AdapterVmConfig, PgoAdapter},\n    blocks::{BasicBlock, BlockAndStats, SuperBlock},\n    evaluation::{evaluate_apc, EvaluationResult},\n    execution_profile::ExecutionProfile,\n    export::{ExportLevel, ExportOptions},\n    EmpiricalConstraints, PowdrConfig,\n};\n\nmod selection;\n\n/// Trait for autoprecompile candidates.\n/// Provides ApcWithStats with logic for evaluating a candidate.\npub trait ApcCandidate<A: Adapter>: Sized {\n    fn create(apc_with_stats: AdapterApcWithStats<A>) -> Self;\n    fn inner(&self) -> &AdapterApcWithStats<A>;\n    fn into_inner(self) -> AdapterApcWithStats<A>;\n    // cost of the APC before optimization\n    fn cost_before_opt(&self) -> usize;\n    // cost of the APC after optimization\n    fn cost_after_opt(&self) -> usize;\n    // value of the APC for each time it is used\n    fn value_per_use(&self) -> usize;\n}\n\n#[derive(Serialize, Deserialize)]\n/// NOTE: When making changes to this field or any of the contained types,\n/// JSON_EXPORT_VERSION must be updated\npub struct ApcCandidateJsonExport {\n    // execution_frequency\n    pub execution_frequency: usize,\n    // original instructions (pretty printed)\n    pub original_blocks: Vec<BasicBlock<String>>,\n    // before and after optimization stats\n    pub stats: EvaluationResult,\n    // width before optimisation, used for software version cells in effectiveness plot\n    pub width_before: usize,\n    // value used in ranking of candidates\n    pub value: usize,\n    // cost before optimisation, used for effectiveness calculation\n    pub cost_before: f64,\n    // cost after optimization, used for effectiveness calculation and ranking of candidates\n    pub cost_after: f64,\n}\n\npub struct CellPgo<A, C> {\n    _marker: std::marker::PhantomData<(A, C)>,\n    data: ExecutionProfile,\n    max_total_apc_columns: Option<usize>,\n}\n\nimpl<A, C> CellPgo<A, C> {\n    pub fn with_pgo_data_and_max_columns(\n        data: ExecutionProfile,\n        max_total_apc_columns: Option<usize>,\n    ) -> Self {\n        Self {\n            _marker: std::marker::PhantomData,\n            data,\n            max_total_apc_columns,\n        }\n    }\n}\n\n/// This version is used by external tools to support multiple versions of the json export.\n/// Version should be incremented whenever a breaking change is made to the type (or inner types).\n/// Version Log:\n/// 0: Serialize only APCs as Vec<ApcCandidateJsonExport>\n/// 1: Add labels to the JSON export\n/// 2: Rename apcs[*].original_block.statements -> apcs[*].original_block.instructions\n/// 3. Remove apcs[*].apc_candidate_file\n/// 4. superblocks: original_blocks: Vec<BasicBlock<_>>\nconst JSON_EXPORT_VERSION: usize = 4;\n\n#[derive(Serialize, Deserialize)]\nstruct JsonExport {\n    version: usize,\n    apcs: Vec<ApcCandidateJsonExport>,\n    labels: BTreeMap<u64, Vec<String>>,\n}\n\nimpl JsonExport {\n    fn new(apcs: Vec<ApcCandidateJsonExport>, labels: BTreeMap<u64, Vec<String>>) -> Self {\n        Self {\n            version: JSON_EXPORT_VERSION,\n            apcs,\n            labels,\n        }\n    }\n}\n\nimpl<A: Adapter + Send + Sync, C: ApcCandidate<A> + Send + Sync> PgoAdapter for CellPgo<A, C> {\n    type Adapter = A;\n\n    fn create_apcs_with_pgo(\n        &self,\n        exec_blocks: AdapterExecutionBlocks<Self::Adapter>,\n        config: &PowdrConfig,\n        vm_config: AdapterVmConfig<Self::Adapter>,\n        labels: BTreeMap<u64, Vec<String>>,\n        empirical_constraints: EmpiricalConstraints,\n    ) -> Vec<AdapterApcWithStats<Self::Adapter>> {\n        if config.autoprecompiles == 0 {\n            return vec![];\n        }\n\n        let AdapterExecutionBlocks::<Self::Adapter> {\n            blocks,\n            execution_bb_runs,\n        } = exec_blocks;\n\n        tracing::info!(\n            \"Generating autoprecompiles for all {} blocks in parallel\",\n            blocks.len(),\n        );\n\n        // Generate apcs in parallel.\n        // Produces two matching vectors: one with the APCs and another with the corresponding originating block.\n        let (apcs, blocks): (Vec<_>, Vec<_>) = blocks\n            .into_par_iter()\n            .filter_map(|block_and_stats| {\n                let start = std::time::Instant::now();\n                let res = try_generate_candidate::<A, C>(\n                    block_and_stats.block.clone(),\n                    config,\n                    &vm_config,\n                    &empirical_constraints,\n                )?;\n                tracing::debug!(\n                    \"Generated APC for block {:?}, (took {:?})\",\n                    block_and_stats.block.start_pcs(),\n                    start.elapsed()\n                );\n                Some((res, block_and_stats))\n            })\n            .collect();\n\n        // write the APC candidates JSON to disk if the directory is specified.\n        if let Some(apc_candidates_dir_path) = &config.apc_candidates_dir_path {\n            let apcs = apcs\n                .iter()\n                .zip_eq(&blocks)\n                .map(|(apc, candidate)| apc_candidate_json_export::<A, _>(apc, candidate))\n                .collect();\n            let json = JsonExport::new(apcs, labels);\n            let json_path = apc_candidates_dir_path.join(\"apc_candidates.json\");\n            let file = std::fs::File::create(&json_path)\n                .expect(\"Failed to create file for APC candidates JSON\");\n            serde_json::to_writer(BufWriter::new(file), &json)\n                .expect(\"Failed to write APC candidates JSON to file\");\n        }\n\n        // select best candidates\n        let budget = self.max_total_apc_columns.unwrap_or(usize::MAX);\n        let max_selected = (config.autoprecompiles + config.skip_autoprecompiles) as usize;\n        let selection =\n            select_blocks_greedy(&apcs, &blocks, budget, max_selected, &execution_bb_runs);\n\n        // skip per config\n        let skip = (config.skip_autoprecompiles as usize).min(selection.len());\n\n        // filter and order the apcs using the selection\n        let mut apcs: Vec<_> = apcs.into_iter().map(|apc| Some(apc.into_inner())).collect();\n        selection\n            .into_iter()\n            .skip(skip)\n            .map(|position| apcs[position].take().unwrap())\n            .collect()\n    }\n\n    fn execution_profile(&self) -> Option<&ExecutionProfile> {\n        Some(&self.data)\n    }\n}\n\n// Try and build an autoprecompile candidate from a superblock.\nfn try_generate_candidate<A: Adapter, C: ApcCandidate<A>>(\n    block: SuperBlock<A::Instruction>,\n    config: &PowdrConfig,\n    vm_config: &AdapterVmConfig<A>,\n    empirical_constraints: &EmpiricalConstraints,\n) -> Option<C> {\n    let export_options = ExportOptions::new(\n        config.apc_candidates_dir_path.clone(),\n        &block.start_pcs(),\n        ExportLevel::OnlyAPC,\n    );\n    let apc = crate::build::<A>(\n        block.clone(),\n        vm_config.clone(),\n        config.degree_bound,\n        export_options,\n        empirical_constraints,\n    )\n    .ok()?;\n    let apc_with_stats = evaluate_apc::<A>(vm_config.instruction_handler, apc);\n    Some(C::create(apc_with_stats))\n}\n\nfn apc_candidate_json_export<A: Adapter, C: ApcCandidate<A>>(\n    apc: &C,\n    block: &BlockAndStats<A::Instruction>,\n) -> ApcCandidateJsonExport {\n    let original_blocks: Vec<_> = apc\n        .inner()\n        .apc()\n        .block\n        .blocks()\n        .map(|b| BasicBlock {\n            start_pc: b.start_pc,\n            instructions: b.instructions.iter().map(ToString::to_string).collect(),\n        })\n        .collect();\n\n    ApcCandidateJsonExport {\n        execution_frequency: block.count as usize,\n        original_blocks,\n        stats: apc.inner().evaluation_result(),\n        width_before: apc.cost_before_opt(),\n        value: apc\n            .value_per_use()\n            .checked_mul(block.count as usize)\n            .unwrap(),\n        cost_before: apc.cost_before_opt() as f64,\n        cost_after: apc.cost_after_opt() as f64,\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/pgo/cell/selection.rs",
    "content": "use itertools::Itertools;\nuse priority_queue::PriorityQueue;\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    adapter::Adapter,\n    blocks::{find_non_overlapping, BlockAndStats, ExecutionBasicBlockRun},\n};\n\nuse super::ApcCandidate;\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\n// A candidate block, used during block selection\npub struct BlockCandidate {\n    // sequence of basic blocks composing this block\n    pub start_pcs: Vec<u64>,\n    // cost of original basic blocks (before optimization)\n    pub cost_before: usize,\n    // cost after optimization\n    pub cost_after: usize,\n    // value gained each time this candidate is used\n    pub value_per_use: usize,\n    // times this block could run in the execution\n    pub execution_count: u32,\n}\n\nimpl BlockCandidate {\n    pub fn new<A: Adapter, C: ApcCandidate<A>>(\n        block: &BlockAndStats<A::Instruction>,\n        apc: &C,\n    ) -> Self {\n        Self {\n            start_pcs: block.block.start_pcs(),\n            cost_before: apc.cost_before_opt(),\n            cost_after: apc.cost_after_opt(),\n            value_per_use: apc.value_per_use(),\n            execution_count: block.count,\n        }\n    }\n\n    pub fn value(&self) -> usize {\n        (self.execution_count as usize)\n            .checked_mul(self.value_per_use)\n            .unwrap()\n    }\n\n    pub fn cost(&self) -> usize {\n        self.cost_after\n    }\n\n    pub fn density(&self) -> Density {\n        Density {\n            value: self.value(),\n            cost: self.cost(),\n            tie: self.start_pcs[0],\n        }\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct Density {\n    value: usize,\n    cost: usize,\n    tie: u64,\n}\n\nimpl PartialEq for Density {\n    fn eq(&self, other: &Self) -> bool {\n        self.cmp(other) == std::cmp::Ordering::Equal\n    }\n}\n\nimpl Eq for Density {}\n\nimpl PartialOrd for Density {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl Ord for Density {\n    // Avoids value/cost integer ratio by using cross-multiplication\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        let lhs = self.value.checked_mul(other.cost).unwrap();\n        let rhs = other.value.checked_mul(self.cost).unwrap();\n\n        lhs.cmp(&rhs).then_with(|| self.tie.cmp(&other.tie))\n    }\n}\n\n/// Counts the occurrences of a candidate in a basic block run.\n/// Returns the count and the sub-runs after the candidate is removed.\nfn count_and_update_run<'a>(\n    sblock: &BlockCandidate,\n    run: &'a ExecutionBasicBlockRun,\n) -> (u32, impl Iterator<Item = ExecutionBasicBlockRun> + 'a) {\n    let sblock_len = sblock.start_pcs.len();\n    let matches = find_non_overlapping(&run.0, &sblock.start_pcs);\n    let count = matches.len() as u32;\n    let match_intervals = matches.into_iter().flat_map(move |i| [i, i + sblock_len]);\n    let sub_runs = std::iter::once(0)\n        .chain(match_intervals)\n        .chain(std::iter::once(run.0.len()))\n        .tuples()\n        // skip empty sequences\n        .filter(|(start, end)| start != end)\n        .map(|(start, end)| ExecutionBasicBlockRun(run.0[start..end].to_vec()));\n    (count, sub_runs)\n}\n\n/// Count the occurences of a candidate in the execution (multiple basic block runs).\n/// Returns the count and an updated execution with the candidate removed.\nfn count_and_update_execution(\n    sblock: &BlockCandidate,\n    execution: &[(ExecutionBasicBlockRun, u32)],\n) -> (u32, Vec<(ExecutionBasicBlockRun, u32)>) {\n    let mut total_count = 0;\n    let new_execution = execution\n        .iter()\n        .flat_map(|(run, run_count)| {\n            let (count, sub_runs) = count_and_update_run(sblock, run);\n            total_count += count * *run_count;\n            sub_runs.map(|sub_run| (sub_run, *run_count))\n        })\n        .collect();\n    (total_count, new_execution)\n}\n\n/// Greedily select blocks based on density.\n/// Once a candidate is selected, the value of the remaining candidates are updated to reflect the new execution (with the selection removed).\n/// Returns the indices of the selected blocks, together with how many times each would run if applied over the execution in the selected order.\npub fn select_blocks_greedy<A: Adapter, C: ApcCandidate<A>>(\n    apcs: &[C],\n    blocks: &[BlockAndStats<A::Instruction>],\n    budget: usize,\n    max_selected: usize,\n    execution_bb_runs: &[(ExecutionBasicBlockRun, u32)],\n) -> Vec<usize> {\n    let mut candidates = blocks\n        .iter()\n        .zip_eq(apcs)\n        .map(|(b, apc)| BlockCandidate::new(b, apc))\n        .collect::<Vec<_>>();\n\n    // keep candidates by priority. As a candidate is selected, remaining priorities will be (lazily) updated.\n    let mut by_priority: PriorityQueue<_, _> = candidates\n        .iter()\n        .map(BlockCandidate::density)\n        .enumerate()\n        .collect();\n\n    let mut selected = vec![];\n    let mut cumulative_cost = 0;\n    let mut current_execution = execution_bb_runs.to_vec();\n\n    while let Some((idx, _prio)) = by_priority.pop() {\n        let c = &mut candidates[idx];\n\n        // ignore if too costly\n        if cumulative_cost + c.cost() > budget {\n            // The item does not fit, skip it\n            continue;\n        }\n\n        // check if the priority of this candidate has changed by re-counting it over the remaining execution.\n        let (count, new_execution) = count_and_update_execution(c, &current_execution);\n        if count == 0 {\n            // candidate no longer runs, remove it\n            continue;\n        } else if count < c.execution_count {\n            // re-insert with updated priority\n            c.execution_count = count;\n            by_priority.push(idx, c.density());\n            continue;\n        }\n\n        // the item fits, increment the cumulative cost and update the execution by removing its occurrences\n        cumulative_cost += c.cost();\n        current_execution = new_execution;\n        selected.push(idx);\n\n        if selected.len() >= max_selected {\n            break;\n        }\n    }\n    selected\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    fn sblock(start_pcs: Vec<u64>) -> BlockCandidate {\n        BlockCandidate {\n            start_pcs,\n            cost_before: 0,\n            cost_after: 0,\n            value_per_use: 0,\n            execution_count: 0,\n        }\n    }\n\n    fn run(pcs: Vec<u64>) -> ExecutionBasicBlockRun {\n        ExecutionBasicBlockRun(pcs)\n    }\n\n    #[test]\n    fn test_count_and_update_run() {\n        // no match: full run returned as single sub-run\n        let r = run(vec![3, 4, 5]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![1, 2]), &r);\n        assert_eq!(count, 0);\n        assert_eq!(sub_runs.collect::<Vec<_>>(), vec![run(vec![3, 4, 5])]);\n\n        // match at start\n        let r = run(vec![1, 2, 3, 4]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![1, 2]), &r);\n        assert_eq!(count, 1);\n        assert_eq!(sub_runs.collect::<Vec<_>>(), vec![run(vec![3, 4])]);\n\n        // match at end\n        let r = run(vec![1, 2, 3, 4]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![3, 4]), &r);\n        assert_eq!(count, 1);\n        assert_eq!(sub_runs.collect::<Vec<_>>(), vec![run(vec![1, 2])]);\n\n        // match in middle\n        let r = run(vec![1, 2, 3, 4]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![2, 3]), &r);\n        assert_eq!(count, 1);\n        assert_eq!(\n            sub_runs.collect::<Vec<_>>(),\n            vec![run(vec![1]), run(vec![4])]\n        );\n\n        // multiple matches\n        let r = run(vec![1, 2, 3, 1, 2, 4]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![1, 2]), &r);\n        assert_eq!(count, 2);\n        assert_eq!(\n            sub_runs.collect::<Vec<_>>(),\n            vec![run(vec![3]), run(vec![4])]\n        );\n\n        // full run is the match: no sub-runs\n        let r = run(vec![1, 2, 3]);\n        let (count, sub_runs) = count_and_update_run(&sblock(vec![1, 2, 3]), &r);\n        assert_eq!(count, 1);\n        assert_eq!(sub_runs.collect::<Vec<_>>(), vec![]);\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/pgo/instruction.rs",
    "content": "use std::{cmp::Reverse, collections::BTreeMap};\n\nuse itertools::Itertools;\n\nuse crate::{\n    adapter::{Adapter, AdapterApcWithStats, AdapterExecutionBlocks, AdapterVmConfig, PgoAdapter},\n    execution_profile::ExecutionProfile,\n    pgo::create_apcs_for_all_blocks,\n    EmpiricalConstraints, PowdrConfig,\n};\n\npub struct InstructionPgo<A> {\n    _marker: std::marker::PhantomData<A>,\n    data: ExecutionProfile,\n}\n\nimpl<A> InstructionPgo<A> {\n    pub fn with_pgo_data(data: ExecutionProfile) -> Self {\n        Self {\n            _marker: std::marker::PhantomData,\n            data,\n        }\n    }\n}\n\nimpl<A: Adapter> PgoAdapter for InstructionPgo<A> {\n    type Adapter = A;\n\n    fn create_apcs_with_pgo(\n        &self,\n        exec_blocks: AdapterExecutionBlocks<Self::Adapter>,\n        config: &PowdrConfig,\n        vm_config: AdapterVmConfig<Self::Adapter>,\n        _labels: BTreeMap<u64, Vec<String>>,\n        empirical_constraints: EmpiricalConstraints,\n    ) -> Vec<AdapterApcWithStats<Self::Adapter>> {\n        tracing::info!(\n            \"Generating autoprecompiles with instruction PGO for {} blocks\",\n            exec_blocks.blocks.len()\n        );\n\n        if config.autoprecompiles == 0 {\n            return vec![];\n        }\n\n        let blocks = exec_blocks\n            .blocks\n            .into_iter()\n            // sort by frequency * number of instructions in the block, descending\n            .sorted_by_key(|block_and_stats| {\n                Reverse(block_and_stats.count * block_and_stats.block.instructions().count() as u32)\n            })\n            .map(|block_and_stats| {\n                let block = block_and_stats.block;\n                assert!(block.is_basic_block(), \"Instruction PGO does not support superblocks\");\n                let frequency = block_and_stats.count;\n                let number_of_instructions = block.instructions().count();\n                let value = frequency * number_of_instructions as u32;\n\n                tracing::debug!(\n                    \"Basic block start_pc: {}, value: {}, frequency: {}, number_of_instructions: {}\",\n                    block.pcs().next().unwrap(),\n                    value,\n                    frequency,\n                    number_of_instructions,\n                );\n\n                block\n            })\n            .collect();\n\n        create_apcs_for_all_blocks::<Self::Adapter>(\n            blocks,\n            config,\n            vm_config,\n            empirical_constraints,\n        )\n    }\n\n    fn execution_profile(&self) -> Option<&ExecutionProfile> {\n        Some(&self.data)\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/pgo/mod.rs",
    "content": "use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};\nuse strum::{Display, EnumString};\n\nuse crate::{\n    adapter::{Adapter, AdapterApcWithStats, AdapterVmConfig},\n    blocks::SuperBlock,\n    evaluation::evaluate_apc,\n    execution_profile::ExecutionProfile,\n    export::{ExportLevel, ExportOptions},\n    EmpiricalConstraints, PowdrConfig,\n};\n\nmod cell;\nmod instruction;\nmod none;\n\npub use {\n    cell::{ApcCandidate, CellPgo},\n    instruction::InstructionPgo,\n    none::NonePgo,\n};\n\n/// Three modes for profiler guided optimization with different cost functions to sort the basic blocks by descending cost and select the most costly ones to accelerate.\n#[derive(Default)]\npub enum PgoConfig {\n    /// value = cells saved per apc * times executed\n    /// cost = number of columns in the apc\n    /// constraint of max total columns\n    Cell(ExecutionProfile, Option<usize>),\n    /// value = instruction per apc * times executed\n    Instruction(ExecutionProfile),\n    /// value = instruction per apc\n    #[default]\n    None,\n}\n\nimpl PgoConfig {\n    /// Returns the number of times a certain pc was executed in the profile.\n    pub fn pc_execution_count(&self, pc: u64) -> Option<u32> {\n        match self {\n            PgoConfig::Cell(prof, _) | PgoConfig::Instruction(prof) => {\n                prof.pc_count.get(&pc).copied()\n            }\n            PgoConfig::None => None,\n        }\n    }\n}\n\n/// CLI enum for PGO mode\n#[derive(Copy, Clone, Debug, EnumString, Display, Default)]\n#[strum(serialize_all = \"lowercase\")]\npub enum PgoType {\n    /// cost = cells saved per apc * times executed\n    #[default]\n    Cell,\n    /// cost = instruction per apc * times executed\n    Instruction,\n    /// cost = instruction per apc\n    None,\n}\n\npub fn pgo_config(\n    pgo: PgoType,\n    max_columns: Option<usize>,\n    execution_profile: ExecutionProfile,\n) -> PgoConfig {\n    match pgo {\n        PgoType::Cell => PgoConfig::Cell(execution_profile, max_columns),\n        PgoType::Instruction => PgoConfig::Instruction(execution_profile),\n        PgoType::None => PgoConfig::None,\n    }\n}\n\n// Only used for PgoConfig::Instruction and PgoConfig::None,\n// because PgoConfig::Cell caches all APCs in sorting stage.\nfn create_apcs_for_all_blocks<A: Adapter>(\n    blocks: Vec<SuperBlock<A::Instruction>>,\n    config: &PowdrConfig,\n    vm_config: AdapterVmConfig<A>,\n    empirical_constraints: EmpiricalConstraints,\n) -> Vec<AdapterApcWithStats<A>> {\n    let n_acc = config.autoprecompiles as usize;\n    tracing::info!(\"Generating {n_acc} autoprecompiles in parallel\");\n\n    blocks\n        .into_par_iter()\n        .skip(config.skip_autoprecompiles as usize)\n        .take(n_acc)\n        .map(|superblock| {\n            tracing::debug!(\n                \"Accelerating block of length {} and start pcs {:?}\",\n                superblock.instructions().count(),\n                superblock.start_pcs(),\n            );\n\n            let export_options = ExportOptions::new(\n                config.apc_candidates_dir_path.clone(),\n                &superblock.start_pcs(),\n                ExportLevel::OnlyAPC,\n            );\n            let apc = crate::build::<A>(\n                superblock.clone(),\n                vm_config.clone(),\n                config.degree_bound,\n                export_options,\n                &empirical_constraints,\n            )\n            .unwrap();\n\n            evaluate_apc::<A>(vm_config.instruction_handler, apc)\n        })\n        .collect()\n}\n"
  },
  {
    "path": "autoprecompiles/src/pgo/none.rs",
    "content": "use std::{cmp::Reverse, collections::BTreeMap};\n\nuse derivative::Derivative;\nuse itertools::Itertools;\n\nuse crate::{\n    adapter::{Adapter, AdapterApcWithStats, AdapterExecutionBlocks, AdapterVmConfig, PgoAdapter},\n    pgo::create_apcs_for_all_blocks,\n    EmpiricalConstraints, PowdrConfig,\n};\n\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct NonePgo<A> {\n    _marker: std::marker::PhantomData<A>,\n}\n\nimpl<A: Adapter> PgoAdapter for NonePgo<A> {\n    type Adapter = A;\n\n    fn create_apcs_with_pgo(\n        &self,\n        exec_blocks: AdapterExecutionBlocks<Self::Adapter>,\n        config: &PowdrConfig,\n        vm_config: AdapterVmConfig<Self::Adapter>,\n        _labels: BTreeMap<u64, Vec<String>>,\n        empirical_constraints: EmpiricalConstraints,\n    ) -> Vec<AdapterApcWithStats<Self::Adapter>> {\n        let blocks = exec_blocks\n            .blocks\n            .into_iter()\n            // sort by number of instructions in the block, descending\n            .sorted_by_key(|block_and_stats| {\n                Reverse(block_and_stats.block.instructions().count() as u32)\n            })\n            .map(|block_and_stats| {\n                let block = block_and_stats.block;\n                assert!(\n                    block.is_basic_block(),\n                    \"None PGO does not support superblocks\"\n                );\n                tracing::debug!(\n                    \"Basic block start_pc: {}, number_of_instructions: {}\",\n                    block.pcs().next().unwrap(),\n                    block.instructions().count(),\n                );\n\n                block\n            })\n            .collect();\n\n        create_apcs_for_all_blocks::<Self::Adapter>(\n            blocks,\n            config,\n            vm_config,\n            empirical_constraints,\n        )\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/powdr.rs",
    "content": "use std::collections::BTreeMap;\nuse std::sync::Arc;\n\nuse itertools::Itertools;\nuse powdr_expression::visitors::{AllChildren, ExpressionVisitable};\nuse powdr_number::FieldElement;\n\nuse crate::expression::{AlgebraicExpression, AlgebraicReference};\nuse crate::SymbolicMachine;\n\npub fn make_refs_zero<T: FieldElement>(expr: &mut AlgebraicExpression<T>) {\n    let zero = AlgebraicExpression::Number(T::zero());\n    expr.pre_visit_expressions_mut(&mut |expr| {\n        if let AlgebraicExpression::Reference(AlgebraicReference { .. }) = expr {\n            *expr = zero.clone();\n        }\n    });\n}\n\npub fn make_bool<T: FieldElement>(expr: AlgebraicExpression<T>) -> AlgebraicExpression<T> {\n    let one = AlgebraicExpression::Number(T::from(1u64));\n    expr.clone() * (expr - one)\n}\n\npub fn substitute_subexpressions<T: Clone + std::cmp::Ord>(\n    expr: &mut AlgebraicExpression<T>,\n    sub: &BTreeMap<AlgebraicExpression<T>, AlgebraicExpression<T>>,\n) {\n    expr.pre_visit_expressions_mut(&mut |expr| {\n        if let Some(sub_expr) = sub.get(expr) {\n            *expr = sub_expr.clone();\n        }\n    });\n}\n\npub trait UniqueReferences<'a, T: 'a, R> {\n    /// Returns an iterator over the unique references\n    fn unique_references(&'a self) -> impl Iterator<Item = R>;\n}\n\nimpl<'a, T: 'a, E: AllChildren<AlgebraicExpression<T>>> UniqueReferences<'a, T, AlgebraicReference>\n    for E\n{\n    // Output unique column references sorted by ascending id of original instruction columns\n    fn unique_references(&'a self) -> impl Iterator<Item = AlgebraicReference> {\n        self.all_children()\n            .filter_map(|e| {\n                if let AlgebraicExpression::Reference(r) = e {\n                    Some(r.clone())\n                } else {\n                    None\n                }\n            })\n            .map(|r| (r.id, r))\n            .collect::<BTreeMap<_, _>>()\n            .into_values()\n    }\n}\n\n/// Globalizes the references in the machine by appending a suffix to their names\n/// and offsetting their IDs to start from `curr_id`.\n/// Returns:\n/// - The updated `next_global_id`.\n/// - The substitutions, mapping the local reference IDs to the global ones.\n/// - The updated machine with globalized references.\npub fn globalize_references<T: FieldElement>(\n    machine: SymbolicMachine<T>,\n    mut next_global_id: u64,\n    suffix: usize,\n) -> (u64, Vec<u64>, SymbolicMachine<T>) {\n    let unique_reference_ids = machine.unique_references().map(|r| r.id).collect_vec();\n    let machine_size = unique_reference_ids.len() as u64;\n    assert_eq!(\n        *unique_reference_ids.iter().max().unwrap(),\n        machine_size - 1,\n        \"The reference ids must be contiguous\"\n    );\n\n    let machine = globalize_reference_names(machine, suffix);\n    let machine = offset_reference_ids(machine, next_global_id);\n\n    let subs = (next_global_id..(next_global_id + machine_size)).collect::<Vec<_>>();\n    next_global_id += machine_size;\n    (next_global_id, subs, machine)\n}\n\n/// Globalizes the names of references in the machine by appending a suffix.\nfn globalize_reference_names<T: FieldElement>(\n    mut machine: SymbolicMachine<T>,\n    suffix: usize,\n) -> SymbolicMachine<T> {\n    // Allocate a new string for each *unique* reference in the machine\n    let globalized_name = |name| Arc::new(format!(\"{name}_{suffix}\"));\n    let name_by_id = machine\n        .unique_references()\n        .map(|reference| (reference.id, globalized_name(reference.name)))\n        .collect::<BTreeMap<_, _>>();\n\n    // Update the names\n    machine.pre_visit_expressions_mut(&mut |e| {\n        if let AlgebraicExpression::Reference(r) = e {\n            r.name = name_by_id.get(&r.id).unwrap().clone();\n        }\n    });\n\n    machine\n}\n\nfn offset_reference_ids<T: FieldElement>(\n    mut machine: SymbolicMachine<T>,\n    offset: u64,\n) -> SymbolicMachine<T> {\n    machine.pre_visit_expressions_mut(&mut |e| {\n        if let AlgebraicExpression::Reference(r) = e {\n            r.id += offset;\n        }\n    });\n    machine\n}\n"
  },
  {
    "path": "autoprecompiles/src/range_constraint_optimizer.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::{\n    AlgebraicConstraint, BusInteraction, BusInteractionHandler, ConstraintSystem,\n};\nuse powdr_constraint_solver::grouped_expression::GroupedExpression;\nuse powdr_constraint_solver::inliner::DegreeBound;\nuse powdr_constraint_solver::range_constraint::RangeConstraint;\nuse powdr_constraint_solver::solver::{new_solver, Solver};\nuse powdr_number::FieldElement;\n\npub type RangeConstraints<T, V> = Vec<(GroupedExpression<T, V>, RangeConstraint<T>)>;\n\n/// The requested range constraint cannot be implemented.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct MakeRangeConstraintsError(pub String);\n\npub trait RangeConstraintHandler<T: FieldElement> {\n    /// If the bus interaction *only* enforces range constraints, returns them\n    /// as a map of expressions to range constraints.\n    ///\n    /// For example:\n    /// - If a bus interaction takes two arguments `a` and `b` and enforces the\n    ///   range constraints `0 <= a < 2^b`, it is *not* a pure range constraint if\n    ///   both values are unknown (because the allowed values of `a` depend on `b`)\n    /// - On the other hand, if `b` is known, it is a pure range constraint.\n    ///\n    /// Any stateful bus interaction is not a pure range constraint.\n    /// This function will only be called with bus interactions with multiplicity 1.\n    fn pure_range_constraints<V: Ord + Clone + Eq + Display + Hash>(\n        &self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n    ) -> Option<RangeConstraints<T, V>>;\n\n    /// Given a set of range constraints, returns a list of bus interactions\n    /// that implements them. The implementation is free to implement multiple\n    /// range constraints using a single bus interaction.\n    /// As all input range constraints are unconditional, the multiplicity of\n    /// the returned bus interactions should be 1.\n    /// If one of the range constraints cannot be implemented exactly, an error\n    /// is returned. For soundness, the implementation should *never* relax the\n    /// range constraint.\n    fn batch_make_range_constraints<V: Ord + Clone + Eq + Display + Hash>(\n        &self,\n        range_constraints: RangeConstraints<T, V>,\n    ) -> Result<Vec<BusInteraction<GroupedExpression<T, V>>>, MakeRangeConstraintsError>;\n}\n\n/// Optimizes range constraints, minimizing the number of bus interactions.\n///\n/// This step:\n/// - removes range constraints that are already implied by existing constraints\n/// - batches several range constraints into one bus interaction, if possible\n/// - implements bit constraints via polynomial constraints, if the degree bound allows\npub fn optimize_range_constraints<T: FieldElement, V: Ord + Clone + Hash + Eq + Display>(\n    mut system: ConstraintSystem<T, V>,\n    bus_interaction_handler: impl BusInteractionHandler<T> + RangeConstraintHandler<T> + Clone,\n    degree_bound: DegreeBound,\n) -> ConstraintSystem<T, V> {\n    // Remove all pure range constraints, but collect what was removed.\n    // We store the expressions to constrain in a vector, so that we can keep the order of\n    // the range constraints as much as possible.\n    let mut to_constrain = Vec::new();\n    let mut range_constraints = BTreeMap::new();\n    system.bus_interactions.retain(|bus_int| {\n        if bus_int.multiplicity != GroupedExpression::from_number(T::one()) {\n            // Most range constraints are unconditional in practice, it's probably not\n            // worth dealing with the conditional ones.\n            return true;\n        }\n\n        match bus_interaction_handler.pure_range_constraints(bus_int) {\n            Some(new_range_constraints) => {\n                to_constrain.extend(new_range_constraints.iter().map(|(expr, _)| expr.clone()));\n                for (expr, rc) in new_range_constraints {\n                    let existing_rc = range_constraints\n                        .entry(expr)\n                        .or_insert_with(RangeConstraint::default);\n                    *existing_rc = existing_rc.conjunction(&rc);\n                }\n                false\n            }\n            None => true,\n        }\n    });\n\n    // Filter range constraints that are already implied by existing constraints.\n    // TODO: They could also be implied by each other.\n    let mut solver = new_solver(system.clone(), bus_interaction_handler.clone());\n    solver.solve().unwrap();\n    let to_constrain = to_constrain\n        .into_iter()\n        .unique()\n        .map(|expr| {\n            let rc = range_constraints.remove(&expr).unwrap();\n            (expr, rc)\n        })\n        .filter(|(expr, rc)| {\n            let current_rc = solver.range_constraint_for_expression(expr);\n            current_rc != current_rc.conjunction(rc)\n        })\n        .collect::<Vec<_>>();\n\n    // Implement bit constraints via polynomial constraints, if the degree bound allows.\n    let mut bit_constraints = Vec::new();\n    let to_constrain = to_constrain\n        .into_iter()\n        .filter(|(expr, rc)| {\n            let bit_range_constraint = AlgebraicConstraint::assert_bool(expr.clone());\n            if rc == &RangeConstraint::from_mask(1)\n                && bit_range_constraint.degree() <= degree_bound.identities\n            {\n                bit_constraints.push(bit_range_constraint);\n                false\n            } else {\n                true\n            }\n        })\n        .collect();\n\n    // Create all range constraints in batch and add them to the system.\n    // Note that unwrapping here should be fine, because we only pass range constraints\n    // that were returned from `pure_range_constraints`, so clearly the VM is able to\n    // implement them.\n    let range_constraints = bus_interaction_handler\n        .batch_make_range_constraints(to_constrain)\n        .unwrap();\n    for bus_interaction in &range_constraints {\n        assert_eq!(bus_interaction.multiplicity.try_to_number(), Some(T::one()));\n    }\n    system.bus_interactions.extend(range_constraints);\n    system.algebraic_constraints.extend(bit_constraints);\n\n    system\n}\n\n/// Utility functions useful for implementing `batch_make_range_constraints`.\npub mod utils {\n    use itertools::Itertools;\n    use powdr_constraint_solver::{\n        grouped_expression::GroupedExpression, range_constraint::RangeConstraint,\n    };\n    use powdr_number::FieldElement;\n    use std::fmt::Display;\n    use std::hash::Hash;\n\n    use crate::range_constraint_optimizer::RangeConstraints;\n\n    /// If the range constraints is the range 0..(2^bits - 1), returns Some(bits).\n    pub fn range_constraint_to_num_bits<T: FieldElement>(\n        range_constraint: &RangeConstraint<T>,\n    ) -> Option<usize> {\n        (0..30).find(|num_bits| {\n            let mask = (1u64 << num_bits) - 1;\n            range_constraint == &RangeConstraint::from_mask(mask)\n        })\n    }\n\n    /// Given a set of range constraints, filters out the byte constraints and returns them.\n    pub fn filter_byte_constraints<T: FieldElement, V: Ord + Clone + Eq + Display + Hash>(\n        range_constraints: &mut RangeConstraints<T, V>,\n    ) -> Vec<GroupedExpression<T, V>> {\n        let mut byte_constraints = Vec::new();\n        range_constraints.retain(|(expr, rc)| match range_constraint_to_num_bits(rc) {\n            Some(8) => {\n                byte_constraints.push(expr.clone());\n                false\n            }\n            _ => true,\n        });\n        byte_constraints.into_iter().unique().collect()\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/stats_logger.rs",
    "content": "use std::hash::Hash;\nuse std::{fmt::Display, time::Instant};\n\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::ConstraintSystem;\nuse powdr_constraint_solver::indexed_constraint_system::IndexedConstraintSystem;\nuse powdr_number::FieldElement;\n\nuse crate::{powdr::UniqueReferences, SymbolicMachine};\n\npub struct StatsLogger {\n    start_time: Instant,\n    step_start_time: Instant,\n}\n\nimpl StatsLogger {\n    pub fn start(system: impl Into<Stats>) -> Self {\n        log::info!(\"Starting optimization - {}\", system.into());\n        let start_time = Instant::now();\n        let step_start_time = start_time;\n        StatsLogger {\n            start_time,\n            step_start_time,\n        }\n    }\n\n    pub fn log(&mut self, step: &str, system: impl Into<Stats>) {\n        let elapsed = self.step_start_time.elapsed().as_secs_f32();\n        log::debug!(\n            \"After {step:<32} (took {elapsed:7.4} s) - {}\",\n            system.into()\n        );\n        self.step_start_time = Instant::now();\n    }\n    pub fn finalize(self, system: impl Into<Stats>) {\n        let elapsed = self.start_time.elapsed().as_secs_f32();\n        log::info!(\n            \"Optimization took (took {elapsed:7.4} s) - {}\",\n            system.into()\n        );\n    }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct Stats {\n    num_constraints: usize,\n    num_bus_interactions: usize,\n    num_witness_columns: usize,\n}\n\nimpl Display for Stats {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"Constraints: {}, Bus Interactions: {}, Witness Columns: {}\",\n            self.num_constraints, self.num_bus_interactions, self.num_witness_columns\n        )\n    }\n}\n\nimpl<P: FieldElement> From<&SymbolicMachine<P>> for Stats {\n    fn from(machine: &SymbolicMachine<P>) -> Self {\n        Stats {\n            num_constraints: machine.constraints.len(),\n            num_bus_interactions: machine.bus_interactions.len(),\n            num_witness_columns: machine.unique_references().count(),\n        }\n    }\n}\n\nimpl<P: FieldElement, V: Ord + Clone + Hash + Eq> From<&ConstraintSystem<P, V>> for Stats {\n    fn from(constraint_system: &ConstraintSystem<P, V>) -> Self {\n        Stats {\n            num_constraints: constraint_system.algebraic_constraints.len(),\n            num_bus_interactions: constraint_system.bus_interactions.len(),\n            num_witness_columns: constraint_system\n                .referenced_unknown_variables()\n                .unique()\n                .count(),\n        }\n    }\n}\n\nimpl<P: FieldElement, V: Ord + Clone + Hash + Eq> From<&IndexedConstraintSystem<P, V>> for Stats {\n    fn from(constraint_system: &IndexedConstraintSystem<P, V>) -> Self {\n        Stats::from(constraint_system.system())\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/symbolic_machine.rs",
    "content": "use crate::bus_map::BusMap;\nuse crate::expression::{AlgebraicExpression, AlgebraicReference};\nuse crate::expression_conversion::{\n    algebraic_to_grouped_expression, grouped_expression_to_algebraic,\n};\nuse crate::powdr::UniqueReferences;\nuse itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::{\n    self, AlgebraicConstraint, BusInteraction, ConstraintSystem, DerivedVariable,\n};\nuse powdr_constraint_solver::grouped_expression::GroupedExpression;\nuse powdr_expression::AlgebraicUnaryOperator;\nuse powdr_expression::{visitors::Children, AlgebraicUnaryOperation};\nuse serde::{Deserialize, Serialize};\nuse std::fmt::Display;\nuse std::iter::once;\n\nuse powdr_number::FieldElement;\n\n#[derive(Debug, Clone, PartialEq, Hash, Eq, Serialize, Deserialize)]\npub struct SymbolicInstructionStatement<T> {\n    pub opcode: T,\n    pub args: Vec<T>,\n}\n\nimpl<T> IntoIterator for SymbolicInstructionStatement<T> {\n    type IntoIter = std::iter::Chain<std::iter::Once<T>, std::vec::IntoIter<T>>;\n    type Item = T;\n\n    fn into_iter(self) -> Self::IntoIter {\n        once(self.opcode).chain(self.args)\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(transparent)]\npub struct SymbolicConstraint<T> {\n    pub expr: AlgebraicExpression<T>,\n}\n\nimpl<T: Display> Display for SymbolicConstraint<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.expr)\n    }\n}\n\nimpl<T> From<AlgebraicExpression<T>> for SymbolicConstraint<T> {\n    fn from(expr: AlgebraicExpression<T>) -> Self {\n        let expr = match expr {\n            AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation {\n                op: AlgebraicUnaryOperator::Minus,\n                expr,\n            }) => *expr, // Remove the negation at the outside.\n            other => other,\n        };\n        Self { expr }\n    }\n}\n\nimpl<T> Children<AlgebraicExpression<T>> for SymbolicConstraint<T> {\n    fn children(&self) -> Box<dyn Iterator<Item = &AlgebraicExpression<T>> + '_> {\n        Box::new(once(&self.expr))\n    }\n\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut AlgebraicExpression<T>> + '_> {\n        Box::new(once(&mut self.expr))\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]\npub struct SymbolicBusInteraction<T> {\n    pub id: u64,\n    pub mult: AlgebraicExpression<T>,\n    pub args: Vec<AlgebraicExpression<T>>,\n}\n\nimpl<T: Display> Display for SymbolicBusInteraction<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"(id={}, mult={}, args=[{}])\",\n            self.id,\n            self.mult,\n            self.args.iter().join(\", \")\n        )\n    }\n}\n\nimpl<T: Copy> SymbolicBusInteraction<T> {\n    pub fn try_multiplicity_to_number(&self) -> Option<T> {\n        match self.mult {\n            AlgebraicExpression::Number(n) => Some(n),\n            _ => None,\n        }\n    }\n}\n\nimpl<T> Children<AlgebraicExpression<T>> for SymbolicBusInteraction<T> {\n    fn children(&self) -> Box<dyn Iterator<Item = &AlgebraicExpression<T>> + '_> {\n        Box::new(once(&self.mult).chain(&self.args))\n    }\n\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut AlgebraicExpression<T>> + '_> {\n        Box::new(once(&mut self.mult).chain(&mut self.args))\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Ord, PartialOrd, Eq, PartialEq, Hash)]\npub enum BusInteractionKind {\n    Send,\n    Receive,\n}\n\n/// A machine comprised of algebraic constraints, bus interactions and potentially derived columns.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SymbolicMachine<T> {\n    /// Constraints whose expressions have to evaluate to zero for an assignment to be satisfying.\n    pub constraints: Vec<SymbolicConstraint<T>>,\n    /// Bus interactions that model communication with other machines / chips or static lookups.\n    pub bus_interactions: Vec<SymbolicBusInteraction<T>>,\n    /// Columns that have been newly created during the optimization process with a method\n    /// to compute their values from other columns.\n    pub derived_columns: Vec<DerivedVariable<T, AlgebraicReference, AlgebraicExpression<T>>>,\n}\n\ntype ComputationMethod<T> =\n    powdr_constraint_solver::constraint_system::ComputationMethod<T, AlgebraicExpression<T>>;\n\nimpl<T> SymbolicMachine<T> {\n    pub fn main_columns(&self) -> impl Iterator<Item = AlgebraicReference> + use<'_, T> {\n        self.unique_references()\n    }\n\n    pub fn concatenate(mut self, other: SymbolicMachine<T>) -> Self {\n        self.constraints.extend(other.constraints);\n        self.bus_interactions.extend(other.bus_interactions);\n        self.derived_columns.extend(other.derived_columns);\n        self\n    }\n}\n\nimpl<T: Display> Display for SymbolicMachine<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        for bus_interaction in &self.bus_interactions {\n            writeln!(f, \"{bus_interaction}\")?;\n        }\n        for constraint in &self.constraints {\n            writeln!(f, \"{constraint} = 0\")?;\n        }\n        Ok(())\n    }\n}\n\nimpl<T: Display + Ord + Clone> SymbolicMachine<T> {\n    pub fn render<C: Display + Clone + PartialEq + Eq>(&self, bus_map: &BusMap<C>) -> String {\n        let main_columns = self.main_columns().sorted().collect_vec();\n        let mut output = format!(\n            \"Symbolic machine using {} unique main columns:\\n  {}\\n\",\n            main_columns.len(),\n            main_columns.iter().join(\"\\n  \")\n        );\n        let bus_interactions_by_bus = self\n            .bus_interactions\n            .iter()\n            .map(|bus_interaction| (bus_interaction.id, bus_interaction))\n            .into_group_map()\n            .into_iter()\n            // sorted_by_key is stable, so we'll keep the order within each bus\n            .sorted_by_key(|(bus_id, _)| *bus_id)\n            .collect::<Vec<_>>();\n        for (bus_id, bus_interactions) in &bus_interactions_by_bus {\n            let bus_type = bus_map.bus_type(*bus_id);\n            output.push_str(&format!(\"\\n// Bus {bus_id} ({bus_type}):\\n\",));\n            for bus_interaction in bus_interactions {\n                output.push_str(&format!(\n                    \"mult={}, args=[{}]\\n\",\n                    bus_interaction.mult,\n                    bus_interaction.args.iter().join(\", \")\n                ));\n            }\n        }\n\n        if !self.constraints.is_empty() {\n            output.push_str(\"\\n// Algebraic constraints:\\n\");\n        }\n\n        for constraint in &self.constraints {\n            output.push_str(&format!(\"{constraint} = 0\\n\"));\n        }\n\n        output.trim().to_string()\n    }\n}\n\nimpl<T> SymbolicMachine<T> {\n    pub fn degree(&self) -> usize {\n        self.children().map(|e| e.degree()).max().unwrap_or(0)\n    }\n}\n\nimpl<T> Children<AlgebraicExpression<T>> for SymbolicMachine<T> {\n    fn children(&self) -> Box<dyn Iterator<Item = &AlgebraicExpression<T>> + '_> {\n        Box::new(\n            self.constraints\n                .iter()\n                .flat_map(|c| c.children())\n                .chain(self.bus_interactions.iter().flat_map(|i| i.children())),\n        )\n    }\n\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut AlgebraicExpression<T>> + '_> {\n        Box::new(\n            self.constraints\n                .iter_mut()\n                .flat_map(|c| c.children_mut())\n                .chain(\n                    self.bus_interactions\n                        .iter_mut()\n                        .flat_map(|i| i.children_mut()),\n                ),\n        )\n    }\n}\n\npub fn symbolic_machine_to_constraint_system<P: FieldElement>(\n    symbolic_machine: SymbolicMachine<P>,\n) -> ConstraintSystem<P, AlgebraicReference> {\n    ConstraintSystem {\n        algebraic_constraints: symbolic_machine\n            .constraints\n            .iter()\n            .map(|constraint| {\n                AlgebraicConstraint::assert_zero(algebraic_to_grouped_expression(&constraint.expr))\n            })\n            .collect(),\n        bus_interactions: symbolic_machine\n            .bus_interactions\n            .iter()\n            .map(symbolic_bus_interaction_to_bus_interaction)\n            .collect(),\n        derived_variables: symbolic_machine\n            .derived_columns\n            .iter()\n            .map(|derived_variable| {\n                let method = match &derived_variable.computation_method {\n                    ComputationMethod::Constant(c) => {\n                        constraint_system::ComputationMethod::Constant(*c)\n                    }\n                    ComputationMethod::QuotientOrZero(e1, e2) => {\n                        constraint_system::ComputationMethod::QuotientOrZero(\n                            algebraic_to_grouped_expression(e1),\n                            algebraic_to_grouped_expression(e2),\n                        )\n                    }\n                };\n                DerivedVariable::new(derived_variable.variable.clone(), method)\n            })\n            .collect(),\n    }\n}\n\npub fn constraint_system_to_symbolic_machine<P: FieldElement>(\n    constraint_system: ConstraintSystem<P, AlgebraicReference>,\n) -> SymbolicMachine<P> {\n    SymbolicMachine {\n        constraints: constraint_system\n            .algebraic_constraints\n            .into_iter()\n            .map(|constraint| grouped_expression_to_algebraic(constraint.expression).into())\n            .collect(),\n        bus_interactions: constraint_system\n            .bus_interactions\n            .into_iter()\n            .map(bus_interaction_to_symbolic_bus_interaction)\n            .collect(),\n        derived_columns: constraint_system\n            .derived_variables\n            .into_iter()\n            .map(|derived_var| {\n                let method = match derived_var.computation_method {\n                    constraint_system::ComputationMethod::Constant(c) => {\n                        constraint_system::ComputationMethod::Constant(c)\n                    }\n                    constraint_system::ComputationMethod::QuotientOrZero(e1, e2) => {\n                        constraint_system::ComputationMethod::QuotientOrZero(\n                            grouped_expression_to_algebraic(e1),\n                            grouped_expression_to_algebraic(e2),\n                        )\n                    }\n                };\n                DerivedVariable::new(derived_var.variable, method)\n            })\n            .collect(),\n    }\n}\n\npub fn symbolic_bus_interaction_to_bus_interaction<P: FieldElement>(\n    bus_interaction: &SymbolicBusInteraction<P>,\n) -> BusInteraction<GroupedExpression<P, AlgebraicReference>> {\n    BusInteraction {\n        bus_id: GroupedExpression::from_number(P::from(bus_interaction.id)),\n        payload: bus_interaction\n            .args\n            .iter()\n            .map(|arg| algebraic_to_grouped_expression(arg))\n            .collect(),\n        multiplicity: algebraic_to_grouped_expression(&bus_interaction.mult),\n    }\n}\n\nfn bus_interaction_to_symbolic_bus_interaction<P: FieldElement>(\n    bus_interaction: BusInteraction<GroupedExpression<P, AlgebraicReference>>,\n) -> SymbolicBusInteraction<P> {\n    // We set the bus_id to a constant in `bus_interaction_to_symbolic_bus_interaction`,\n    // so this should always succeed.\n    let id = bus_interaction\n        .bus_id\n        .try_to_number()\n        .unwrap()\n        .to_arbitrary_integer()\n        .try_into()\n        .unwrap();\n    SymbolicBusInteraction {\n        id,\n        args: bus_interaction\n            .payload\n            .into_iter()\n            .map(|arg| grouped_expression_to_algebraic(arg))\n            .collect(),\n        mult: grouped_expression_to_algebraic(bus_interaction.multiplicity),\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/src/symbolic_machine_generator.rs",
    "content": "use itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::{ComputationMethod, DerivedVariable};\nuse powdr_expression::AlgebraicBinaryOperation;\nuse powdr_number::FieldElement;\n\nuse crate::{\n    adapter::Adapter,\n    blocks::{Instruction, SuperBlock},\n    expression::AlgebraicExpression,\n    powdr,\n    symbolic_machine::{SymbolicBusInteraction, SymbolicConstraint, SymbolicMachine},\n    Apc, BusMap, BusType, ColumnAllocator, InstructionHandler,\n};\n\n/// Converts the field type of a symbolic machine.\npub fn convert_apc_field_type<T, I, A, V, U>(\n    apc: Apc<T, I, A, V>,\n    convert_field_element: &impl Fn(T) -> U,\n) -> Apc<U, I, A, V> {\n    Apc {\n        block: apc.block,\n        machine: convert_machine_field_type(apc.machine, convert_field_element),\n        subs: apc.subs,\n        optimistic_constraints: apc.optimistic_constraints,\n    }\n}\n\n/// Converts the field type of a symbolic machine.\npub fn convert_machine_field_type<T, U>(\n    machine: SymbolicMachine<T>,\n    convert_field_element: &impl Fn(T) -> U,\n) -> SymbolicMachine<U> {\n    SymbolicMachine {\n        constraints: machine\n            .constraints\n            .into_iter()\n            .map(|c| convert_symbolic_constraint(c, convert_field_element))\n            .collect(),\n        bus_interactions: machine\n            .bus_interactions\n            .into_iter()\n            .map(|i| convert_bus_interaction(i, convert_field_element))\n            .collect(),\n        derived_columns: machine\n            .derived_columns\n            .into_iter()\n            .map(|derived_variable| {\n                let method = match derived_variable.computation_method {\n                    ComputationMethod::Constant(c) => {\n                        ComputationMethod::Constant(convert_field_element(c))\n                    }\n                    ComputationMethod::QuotientOrZero(e1, e2) => ComputationMethod::QuotientOrZero(\n                        convert_expression(e1, convert_field_element),\n                        convert_expression(e2, convert_field_element),\n                    ),\n                };\n                DerivedVariable::new(derived_variable.variable, method)\n            })\n            .collect(),\n    }\n}\n\nfn convert_symbolic_constraint<T, U>(\n    constraint: SymbolicConstraint<T>,\n    convert: &impl Fn(T) -> U,\n) -> SymbolicConstraint<U> {\n    SymbolicConstraint {\n        expr: convert_expression(constraint.expr, convert),\n    }\n}\n\nfn convert_bus_interaction<T, U>(\n    constraint: SymbolicBusInteraction<T>,\n    convert: &impl Fn(T) -> U,\n) -> SymbolicBusInteraction<U> {\n    SymbolicBusInteraction {\n        id: constraint.id,\n        mult: convert_expression(constraint.mult, convert),\n        args: constraint\n            .args\n            .into_iter()\n            .map(|e| convert_expression(e, convert))\n            .collect(),\n    }\n}\n\nfn convert_expression<T, U>(\n    expr: AlgebraicExpression<T>,\n    convert: &impl Fn(T) -> U,\n) -> AlgebraicExpression<U> {\n    match expr {\n        AlgebraicExpression::Number(n) => AlgebraicExpression::Number(convert(n)),\n        AlgebraicExpression::Reference(r) => AlgebraicExpression::Reference(r),\n        AlgebraicExpression::BinaryOperation(algebraic_binary_operation) => {\n            AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {\n                op: algebraic_binary_operation.op,\n                left: Box::new(convert_expression(\n                    *algebraic_binary_operation.left,\n                    convert,\n                )),\n                right: Box::new(convert_expression(\n                    *algebraic_binary_operation.right,\n                    convert,\n                )),\n            })\n        }\n        AlgebraicExpression::UnaryOperation(algebraic_unary_operation) => {\n            AlgebraicExpression::UnaryOperation(powdr_expression::AlgebraicUnaryOperation {\n                op: algebraic_unary_operation.op,\n                expr: Box::new(convert_expression(*algebraic_unary_operation.expr, convert)),\n            })\n        }\n    }\n}\n\n/// Converts a basic block into a symbolic machines (all instruction circuits\n/// concatenated) and a column allocator.\npub(crate) fn statements_to_symbolic_machine<A: Adapter>(\n    block: &SuperBlock<A::Instruction>,\n    instruction_handler: &A::InstructionHandler,\n    bus_map: &BusMap<A::CustomBusTypes>,\n) -> (SymbolicMachine<A::PowdrField>, ColumnAllocator) {\n    let (machines, column_allocator) =\n        statements_to_symbolic_machines::<A>(block, instruction_handler, bus_map);\n    let machine = machines\n        .into_iter()\n        .reduce(SymbolicMachine::concatenate)\n        .unwrap();\n    (machine, column_allocator)\n}\n\n/// Converts a basic block into a list of symbolic machines (one per instruction)\n/// and a column allocator. All columns are globally unique across all instructions.\npub(crate) fn statements_to_symbolic_machines<A: Adapter>(\n    block: &SuperBlock<A::Instruction>,\n    instruction_handler: &A::InstructionHandler,\n    bus_map: &BusMap<A::CustomBusTypes>,\n) -> (Vec<SymbolicMachine<A::PowdrField>>, ColumnAllocator) {\n    let mut col_subs: Vec<Vec<u64>> = Vec::new();\n    let mut global_idx = 0;\n    let mut machines: Vec<SymbolicMachine<A::PowdrField>> = Vec::new();\n\n    for (i, (pc, instr)) in block.instructions().enumerate() {\n        let machine = instruction_handler\n            .get_instruction_air_and_id(instr)\n            .1\n            .clone();\n\n        let machine: SymbolicMachine<<A as Adapter>::PowdrField> =\n            convert_machine_field_type(machine, &|x| A::from_field(x));\n\n        let pc_lookup_row = instr\n            .pc_lookup_row(pc)\n            .into_iter()\n            .map(|x| A::from_field(x))\n            .collect::<Vec<_>>();\n\n        let (next_global_idx, subs, machine) = powdr::globalize_references(machine, global_idx, i);\n        global_idx = next_global_idx;\n\n        // Make machine mutable, to add local constraints\n        let mut machine = machine;\n\n        let pc_lookup = machine\n            .bus_interactions\n            .iter()\n            .filter(|bus_int| bus_int.id == bus_map.get_bus_id(&BusType::PcLookup).unwrap())\n            .exactly_one()\n            .expect(\"Expected single pc lookup\");\n\n        // To simplify constraint solving, we constrain `is_valid` to be 1, which effectively\n        // removes the column. The optimized precompile will then have to be guarded by a new\n        // `is_valid` column.\n        let minus_is_valid: AlgebraicExpression<_> = exec_receive(\n            &machine,\n            bus_map.get_bus_id(&BusType::ExecutionBridge).unwrap(),\n        )\n        .mult\n        .clone();\n        let one = AlgebraicExpression::Number(1u64.into());\n        machine\n            .constraints\n            .push((minus_is_valid.clone() + one).into());\n\n        // Constrain the pc lookup to the current instruction.\n        machine.constraints.extend(\n            pc_lookup\n                .args\n                .iter()\n                .zip_eq(pc_lookup_row)\n                .map(|(l, r)| (l.clone() - r.into()).into()),\n        );\n\n        col_subs.push(subs);\n        machines.push(machine);\n    }\n\n    (\n        machines,\n        ColumnAllocator {\n            subs: col_subs,\n            next_poly_id: global_idx,\n        },\n    )\n}\n\nfn exec_receive<T: FieldElement>(\n    machine: &SymbolicMachine<T>,\n    exec_bus_id: u64,\n) -> SymbolicBusInteraction<T> {\n    let [r, _s] = machine\n        .bus_interactions\n        .iter()\n        .filter(|bus_int| bus_int.id == exec_bus_id)\n        .collect::<Vec<_>>()\n        .try_into()\n        .unwrap();\n    // TODO assert that r.mult matches -expr\n    r.clone()\n}\n"
  },
  {
    "path": "autoprecompiles/src/trace_handler.rs",
    "content": "use itertools::Itertools;\nuse powdr_constraint_solver::constraint_system::DerivedVariable;\nuse rayon::prelude::*;\nuse std::collections::{BTreeMap, HashMap};\nuse std::fmt::Display;\nuse std::{cmp::Eq, hash::Hash};\n\nuse crate::blocks::PcStep;\nuse crate::expression::{AlgebraicExpression, AlgebraicReference};\nuse crate::{Apc, InstructionHandler};\n\npub struct OriginalRowReference<'a, D> {\n    pub data: &'a D,\n    pub start: usize,\n    pub length: usize,\n}\n\npub struct TraceData<'a, F, D> {\n    /// For each call of the apc, the values of each original instruction's dummy trace.\n    pub dummy_values: Vec<Vec<OriginalRowReference<'a, D>>>,\n    /// The mapping from dummy trace index to APC index for each instruction.\n    pub dummy_trace_index_to_apc_index_by_instruction: Vec<Vec<(usize, usize)>>,\n    /// The mapping from poly_id to the index in the list of apc columns.\n    /// The values are always unique and contiguous.\n    pub apc_poly_id_to_index: BTreeMap<u64, usize>,\n    /// Indices of columns to compute and the way to compute them\n    /// (from other values).\n    pub columns_to_compute: &'a [DerivedVariable<F, AlgebraicReference, AlgebraicExpression<F>>],\n}\n\npub trait TraceTrait<F>: Send + Sync {\n    type Values: Send + Sync;\n\n    fn width(&self) -> usize;\n\n    fn values(&self) -> &Self::Values;\n}\n\n// TODO: refactor `Apc` so we don't have to pass A, V here\npub fn generate_trace<'a, IH, M: TraceTrait<IH::Field>, A, V>(\n    air_id_to_dummy_trace: &'a HashMap<IH::AirId, M>,\n    instruction_handler: &'a IH,\n    apc_call_count: usize,\n    apc: &'a Apc<IH::Field, IH::Instruction, A, V>,\n) -> TraceData<'a, IH::Field, M::Values>\nwhere\n    IH: InstructionHandler,\n    IH::Field: Display + Clone + Send + Sync,\n    IH::AirId: Eq + Hash + Send + Sync,\n    IH::Instruction: PcStep,\n{\n    // Keep only instructions that produce dummy records\n    let instructions_with_subs = apc\n        .instructions()\n        .zip_eq(apc.subs.iter())\n        .filter(|(_, subs)| !subs.is_empty());\n    let instructions_with_subs = instructions_with_subs.collect::<Vec<_>>();\n\n    let original_instruction_air_ids = instructions_with_subs\n        .iter()\n        .map(|(instruction, _)| {\n            instruction_handler\n                .get_instruction_air_and_id(instruction)\n                .0\n        })\n        .collect::<Vec<_>>();\n\n    let air_id_occurrences = original_instruction_air_ids.iter().counts();\n\n    let apc_poly_id_to_index: BTreeMap<u64, usize> = apc\n        .machine\n        .main_columns()\n        .enumerate()\n        .map(|(index, c)| (c.id, index))\n        .collect();\n\n    let original_instruction_table_offsets = original_instruction_air_ids\n        .iter()\n        .scan(\n            HashMap::default(),\n            |counts: &mut HashMap<&IH::AirId, usize>, air_id| {\n                let count = counts.entry(air_id).or_default();\n                let current_count = *count;\n                *count += 1;\n                Some(current_count)\n            },\n        )\n        .collect::<Vec<_>>();\n\n    let dummy_trace_index_to_apc_index_by_instruction = instructions_with_subs\n        .iter()\n        .map(|(_, subs)| {\n            subs.iter()\n                .map(|substitution| {\n                    (\n                        substitution.original_poly_index,\n                        apc_poly_id_to_index[&substitution.apc_poly_id],\n                    )\n                })\n                .collect_vec()\n        })\n        .collect();\n\n    let dummy_values = (0..apc_call_count)\n        .into_par_iter()\n        .map(|trace_row| {\n            original_instruction_air_ids\n                .iter()\n                .zip_eq(original_instruction_table_offsets.iter())\n                .map(|(air_id, dummy_table_offset)| {\n                    let trace = air_id_to_dummy_trace.get(air_id).unwrap();\n                    let values = trace.values();\n                    let width = trace.width();\n                    let occurrences_per_record = air_id_occurrences.get(air_id).unwrap();\n                    let start = (trace_row * occurrences_per_record + dummy_table_offset) * width;\n                    OriginalRowReference {\n                        data: values,\n                        start,\n                        length: width,\n                    }\n                })\n                .collect_vec()\n        })\n        .collect();\n\n    let columns_to_compute = &apc.machine.derived_columns;\n\n    TraceData {\n        dummy_values,\n        dummy_trace_index_to_apc_index_by_instruction,\n        apc_poly_id_to_index,\n        columns_to_compute,\n    }\n}\n"
  },
  {
    "path": "autoprecompiles/tests/optimizer.rs",
    "content": "use expect_test::expect;\nuse itertools::Itertools;\nuse powdr_autoprecompiles::bus_map::BusMap;\nuse powdr_autoprecompiles::export::{ApcWithBusMap, SimpleInstruction};\nuse powdr_autoprecompiles::optimizer::optimize;\nuse powdr_autoprecompiles::symbolic_machine::SymbolicMachine;\nuse powdr_autoprecompiles::{Apc, ColumnAllocator, DegreeBound};\nuse powdr_number::BabyBearField;\nuse powdr_openvm_bus_interaction_handler::memory_bus_interaction::OpenVmMemoryBusInteraction;\nuse powdr_openvm_bus_interaction_handler::{\n    bus_map::{default_openvm_bus_map, OpenVmBusType},\n    OpenVmBusInteractionHandler,\n};\nuse test_log::test;\n\nconst DEFAULT_DEGREE_BOUND: DegreeBound = DegreeBound {\n    identities: 3,\n    bus_interactions: 2,\n};\n\ntype TestApc = Apc<BabyBearField, SimpleInstruction<BabyBearField>, (), ()>;\n\nfn import_apc_from_gzipped_json(file: &str) -> ApcWithBusMap<TestApc, BusMap<OpenVmBusType>> {\n    let file = std::fs::File::open(file).unwrap();\n    let reader = flate2::read::GzDecoder::new(file);\n    serde_json::from_reader(reader).unwrap()\n}\n\n#[test]\nfn load_machine_json() {\n    let apc = import_apc_from_gzipped_json(\"tests/keccak_apc_pre_opt.json.gz\");\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n\n    expect![[r#\"\n        27521\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        13262\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        28627\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n\n#[test]\nfn test_optimize() {\n    let apc = import_apc_from_gzipped_json(\"tests/keccak_apc_pre_opt.json.gz\");\n\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n\n    let column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&machine);\n    let machine = optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n        machine,\n        OpenVmBusInteractionHandler::default(),\n        DEFAULT_DEGREE_BOUND,\n        &apc.bus_map,\n        column_allocator,\n        &mut Default::default(),\n    )\n    .unwrap()\n    .0;\n\n    // This cbor file above has the `is_valid` column removed, this is why the number below\n    // might be one less than in other tests.\n    expect![[r#\"\n        2021\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        1734\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        186\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n\n#[test]\nfn test_ecrecover() {\n    let apc = import_apc_from_gzipped_json(\"tests/ecrecover_apc_pre_opt.json.gz\");\n\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n\n    let column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&machine);\n    let machine = optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n        machine,\n        OpenVmBusInteractionHandler::default(),\n        DEFAULT_DEGREE_BOUND,\n        &default_openvm_bus_map(),\n        column_allocator,\n        &mut Default::default(),\n    )\n    .unwrap()\n    .0;\n\n    // This cbor file above has the `is_valid` column removed, this is why the number below\n    // might be one less than in other tests.\n    expect![[r#\"\n        3730\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        2314\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        3114\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n\n#[test]\nfn test_sha256() {\n    let apc = import_apc_from_gzipped_json(\"tests/sha256_apc_pre_opt.json.gz\");\n\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n    let column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&machine);\n\n    let machine = optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n        machine,\n        OpenVmBusInteractionHandler::default(),\n        DEFAULT_DEGREE_BOUND,\n        &default_openvm_bus_map(),\n        column_allocator,\n        &mut Default::default(),\n    )\n    .unwrap()\n    .0;\n\n    // This cbor file above has the `is_valid` column removed, this is why the number below\n    // might be one less than in other tests.\n    expect![[r#\"\n        12034\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        9539\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        3770\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n\n#[test]\nfn test_single_div_nondet() {\n    let apc = import_apc_from_gzipped_json(\"tests/single_div_nondet.json.gz\");\n\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n    let column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&machine);\n\n    let machine = optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n        machine,\n        OpenVmBusInteractionHandler::default(),\n        DEFAULT_DEGREE_BOUND,\n        &default_openvm_bus_map(),\n        column_allocator,\n        &mut Default::default(),\n    )\n    .unwrap()\n    .0;\n\n    let algebraic_constraints_with_zero = machine\n        .constraints\n        .iter()\n        .map(|c| c.to_string())\n        .filter(|s| s.contains(\"zero\"))\n        .join(\"\\n\");\n    expect![[r#\"\n        (zero_divisor_0 + r_zero_0) * (zero_divisor_0 + r_zero_0 - 1)\n        zero_divisor_0 * (zero_divisor_0 - 1)\n        zero_divisor_0 * (q__0_0 - 255)\n        zero_divisor_0 * (q__1_0 - 255)\n        zero_divisor_0 * (q__2_0 - 255)\n        zero_divisor_0 * (q__3_0 - 255)\n        (1 - zero_divisor_0) * ((c__0_0 + c__1_0 + c__2_0 + c__3_0) * c_sum_inv_0 - 1)\n        r_zero_0 * (r_zero_0 - 1)\n        (1 - (zero_divisor_0 + r_zero_0)) * ((r__0_0 + r__1_0 + r__2_0 + r__3_0) * r_sum_inv_0 - 1)\n        (q__0_0 + q__1_0 + q__2_0 + q__3_0) * ((1 - zero_divisor_0) * (q_sign_0 - sign_xor_0))\n        (q_sign_0 - sign_xor_0) * ((1 - zero_divisor_0) * q_sign_0)\n        (1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)\n        (1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (lt_diff_0 - (r_prime__3_0 * (2 * c_sign_0 - 1) + c__3_0 * (1 - 2 * c_sign_0)))\n        zero_divisor_0 * (c__0_0 + c__1_0 + c__2_0 + c__3_0)\n        r_zero_0 * (r__0_0 + r__1_0 + r__2_0 + r__3_0)\"#]]\n    .assert_eq(&algebraic_constraints_with_zero);\n\n    expect![[r#\"\n        47\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        24\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        44\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n\n#[test]\nfn test_optimize_reth_op() {\n    let apc = import_apc_from_gzipped_json(\"tests/apc_reth_op_bug.json.gz\");\n    let machine: SymbolicMachine<BabyBearField> = apc.apc.machine;\n    assert!(machine.derived_columns.is_empty());\n\n    let bus_map = &apc.bus_map;\n    let bus_int_handler = OpenVmBusInteractionHandler::new(bus_map.clone());\n\n    let column_allocator = ColumnAllocator::from_max_poly_id_of_machine(&machine);\n    let machine = optimize::<_, _, _, OpenVmMemoryBusInteraction<_, _>>(\n        machine,\n        bus_int_handler,\n        DEFAULT_DEGREE_BOUND,\n        bus_map,\n        column_allocator,\n        &mut Default::default(),\n    )\n    .unwrap()\n    .0;\n\n    expect![[r#\"\n        446\n    \"#]]\n    .assert_debug_eq(&machine.main_columns().count());\n    expect![[r#\"\n        356\n    \"#]]\n    .assert_debug_eq(&machine.bus_interactions.len());\n    expect![[r#\"\n        313\n    \"#]]\n    .assert_debug_eq(&machine.constraints.len());\n}\n"
  },
  {
    "path": "cli-openvm-riscv/Cargo.toml",
    "content": "[package]\nname = \"cli-openvm-riscv\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[features]\ndefault = [\"metrics\"]\nmetrics = [\"powdr-openvm/metrics\", \"openvm-sdk/metrics\", \"openvm-stark-backend/metrics\", \"openvm-stark-sdk/metrics\"]\n\n[[bin]]\nname = \"powdr_openvm_riscv\"\npath = \"src/main.rs\"\nbench = false         # See https://github.com/bheisler/criterion.rs/issues/458\n\n[dependencies]\nopenvm-sdk.workspace = true\nopenvm-stark-sdk.workspace = true\nopenvm-stark-backend.workspace = true\npowdr-autoprecompiles.workspace = true\n\npowdr-openvm.workspace = true\npowdr-openvm-riscv.workspace = true\n\neyre.workspace = true\n\nclap = { version = \"^4.3\", features = [\"derive\"] }\n\nserde_cbor.workspace = true\n\ntracing.workspace = true\ntracing-subscriber = { version = \"0.3.17\", features = [\"std\", \"env-filter\"] }\ntracing-forest = \"0.1\"\nmetrics.workspace = true\nmetrics-tracing-context = \"0.16.0\"\nmetrics-util = \"0.17.0\"\nserde_json.workspace = true\n\n[lints]\nworkspace = true\n"
  },
  {
    "path": "cli-openvm-riscv/README.md",
    "content": "# cli-openvm\n\nUse command `execute` to run the program only, and `prove` to prove.\nThe `prove` command has a `mock` option to only check the constraints.\n\nExamples:\n\n```sh\n# Run the original program\nRUSTFLAGS='-C target-cpu=native' cargo run -r execute guest\n# Prove the original program\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest\n# Check the constraints and witness of the original program\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --mock\n# Run the program with autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r execute guest --skip 37 --autoprecompiles 1\n# Run the program with optimized autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r execute guest --skip 37 --autoprecompiles 1 --optimize\n# Prove the program with autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1\n# Prove the program with optimized autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --optimize\n# Check the constraints and witness of the program with autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --mock\n# Check the constraints and witness of the program with optimized autoprecompiles\nRUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --mock --optimize\n```\n\nIt is recommended to use at least `RUST_LOG=info` for information, and `RUST_LOG=debug` for benchmarks.\n\n"
  },
  {
    "path": "cli-openvm-riscv/src/main.rs",
    "content": "use eyre::Result;\nuse metrics_tracing_context::{MetricsLayer, TracingContextLayer};\nuse metrics_util::{debugging::DebuggingRecorder, layers::Layer};\nuse openvm_sdk::StdIn;\nuse openvm_stark_sdk::bench::serialize_metric_snapshot;\nuse powdr_autoprecompiles::empirical_constraints::EmpiricalConstraints;\nuse powdr_autoprecompiles::pgo::{pgo_config, PgoType};\nuse powdr_autoprecompiles::PowdrConfig;\nuse powdr_openvm_riscv::{\n    compile_openvm, detect_empirical_constraints, CompiledProgram, GuestOptions,\n    OriginalCompiledProgram, RiscvISA,\n};\n\n#[cfg(feature = \"metrics\")]\nuse openvm_stark_sdk::metrics_tracing::TimingMetricsLayer;\n\nuse clap::{Args, CommandFactory, Parser, Subcommand};\nuse powdr_openvm::default_powdr_openvm_config;\nuse std::{io, path::PathBuf};\nuse tracing::Level;\nuse tracing_forest::ForestLayer;\nuse tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry};\n\n#[derive(Parser)]\n#[command(name = \"powdr-openvm\", author, version, about, long_about = None)]\nstruct Cli {\n    #[command(subcommand)]\n    command: Option<Commands>,\n}\n\n#[derive(Args)]\nstruct SharedArgs {\n    #[arg(long, default_value_t = 0)]\n    autoprecompiles: usize,\n\n    #[arg(long, default_value_t = 0)]\n    skip: usize,\n\n    #[arg(long)]\n    input: Option<u32>,\n\n    #[arg(long, default_value_t = PgoType::default())]\n    pgo: PgoType,\n\n    /// When `--pgo-mode cell`, the optional max columns\n    #[clap(long)]\n    max_columns: Option<usize>,\n\n    /// When `--pgo-mode cell`, the directory to persist all APC candidates + a metrics summary\n    #[arg(long)]\n    apc_candidates_dir: Option<PathBuf>,\n\n    /// Maximum number of instructions in an APC\n    #[arg(long)]\n    apc_max_instructions: Option<u32>,\n\n    /// Ignore APCs executed less times than the cutoff\n    #[arg(long)]\n    apc_exec_count_cutoff: Option<u32>,\n\n    /// If active, generates \"optimistic\" precompiles. Optimistic precompiles are smaller in size\n    /// but may fail at runtime if the assumptions they make are violated.\n    #[arg(long)]\n    #[arg(default_value_t = false)]\n    optimistic_precompiles: bool,\n\n    /// When larger than 1, enables superblocks with up to the given number of basic blocks.\n    #[arg(long, default_value_t = 1, value_parser = clap::value_parser!(u8).range(1..))]\n    superblocks: u8,\n}\n\n#[derive(Subcommand)]\nenum Commands {\n    Compile {\n        guest: String,\n\n        #[command(flatten)]\n        shared: SharedArgs,\n    },\n\n    Execute {\n        guest: String,\n\n        #[command(flatten)]\n        shared: SharedArgs,\n\n        #[arg(long)]\n        metrics: Option<PathBuf>,\n    },\n\n    Prove {\n        guest: String,\n\n        #[command(flatten)]\n        shared: SharedArgs,\n\n        #[arg(long)]\n        #[arg(default_value_t = false)]\n        mock: bool,\n\n        #[arg(long)]\n        #[arg(default_value_t = false)]\n        recursion: bool,\n\n        #[arg(long)]\n        metrics: Option<PathBuf>,\n    },\n}\n\nfn main() -> Result<(), io::Error> {\n    let args = Cli::parse();\n\n    setup_tracing_with_log_level(Level::INFO);\n\n    if let Some(command) = args.command {\n        run_command(command);\n        Ok(())\n    } else {\n        Cli::command().print_help()\n    }\n}\n\nfn build_powdr_config(shared: &SharedArgs) -> PowdrConfig {\n    let mut powdr_config =\n        default_powdr_openvm_config(shared.autoprecompiles as u64, shared.skip as u64);\n    if let Some(apc_candidates_dir) = &shared.apc_candidates_dir {\n        powdr_config = powdr_config.with_apc_candidates_dir(apc_candidates_dir);\n    }\n    powdr_config\n        .with_optimistic_precompiles(shared.optimistic_precompiles)\n        .with_superblocks(\n            shared.superblocks,\n            shared.apc_max_instructions,\n            shared.apc_exec_count_cutoff,\n        )\n}\n\nfn run_command(command: Commands) {\n    let guest_opts = GuestOptions::default();\n    match command {\n        Commands::Compile { guest, shared } => {\n            validate_shared_args(&shared);\n            let powdr_config = build_powdr_config(&shared);\n            let guest_program = compile_openvm(&guest, guest_opts.clone()).unwrap();\n            let execution_profile = powdr_openvm::execution_profile_from_guest(\n                &guest_program,\n                stdin_from(shared.input),\n            );\n\n            let empirical_constraints = maybe_compute_empirical_constraints(\n                &guest_program,\n                &powdr_config,\n                stdin_from(shared.input),\n            );\n            let pgo_config = pgo_config(shared.pgo, shared.max_columns, execution_profile);\n            let program = powdr_openvm_riscv::compile_exe(\n                guest_program,\n                powdr_config,\n                pgo_config,\n                empirical_constraints,\n            )\n            .unwrap();\n            write_program_to_file(program, &format!(\"{guest}_compiled.cbor\")).unwrap();\n        }\n\n        Commands::Execute {\n            guest,\n            shared,\n            metrics,\n        } => {\n            validate_shared_args(&shared);\n            if shared.superblocks > 1 {\n                Cli::command()\n                    .error(\n                        clap::error::ErrorKind::ArgumentConflict,\n                        \"OpenVM execution with superblocks not yet supported.\",\n                    )\n                    .exit();\n            }\n            let powdr_config = build_powdr_config(&shared);\n            let guest_program = compile_openvm(&guest, guest_opts.clone()).unwrap();\n            let empirical_constraints = maybe_compute_empirical_constraints(\n                &guest_program,\n                &powdr_config,\n                stdin_from(shared.input),\n            );\n            let execution_profile = powdr_openvm::execution_profile_from_guest(\n                &guest_program,\n                stdin_from(shared.input),\n            );\n            let pgo_config = pgo_config(shared.pgo, shared.max_columns, execution_profile);\n            let compile_and_exec = || {\n                let program = powdr_openvm_riscv::compile_exe(\n                    guest_program,\n                    powdr_config,\n                    pgo_config,\n                    empirical_constraints,\n                )\n                .unwrap();\n                powdr_openvm::execute(program, stdin_from(shared.input)).unwrap();\n            };\n            if let Some(metrics_path) = metrics {\n                run_with_metric_collection_to_file(\n                    std::fs::File::create(metrics_path).expect(\"Failed to create metrics file\"),\n                    compile_and_exec,\n                );\n            } else {\n                compile_and_exec()\n            }\n        }\n\n        Commands::Prove {\n            guest,\n            shared,\n            mock,\n            recursion,\n            metrics,\n        } => {\n            validate_shared_args(&shared);\n            if shared.superblocks > 1 {\n                Cli::command()\n                    .error(\n                        clap::error::ErrorKind::ArgumentConflict,\n                        \"OpenVM execution with superblocks not yet supported.\",\n                    )\n                    .exit();\n            }\n            let powdr_config = build_powdr_config(&shared);\n            let guest_program = compile_openvm(&guest, guest_opts).unwrap();\n            let empirical_constraints = maybe_compute_empirical_constraints(\n                &guest_program,\n                &powdr_config,\n                stdin_from(shared.input),\n            );\n\n            let execution_profile = powdr_openvm::execution_profile_from_guest(\n                &guest_program,\n                stdin_from(shared.input),\n            );\n            let pgo_config = pgo_config(shared.pgo, shared.max_columns, execution_profile);\n            let compile_and_prove = || {\n                let program = powdr_openvm_riscv::compile_exe(\n                    guest_program,\n                    powdr_config,\n                    pgo_config,\n                    empirical_constraints,\n                )\n                .unwrap();\n                powdr_openvm_riscv::prove(&program, mock, recursion, stdin_from(shared.input), None)\n                    .unwrap()\n            };\n            if let Some(metrics_path) = metrics {\n                run_with_metric_collection_to_file(\n                    std::fs::File::create(metrics_path).expect(\"Failed to create metrics file\"),\n                    compile_and_prove,\n                );\n            } else {\n                compile_and_prove()\n            }\n        }\n    }\n}\n\nfn write_program_to_file(\n    program: CompiledProgram<RiscvISA>,\n    filename: &str,\n) -> Result<(), io::Error> {\n    use std::fs::File;\n\n    let mut file = File::create(filename)?;\n    serde_cbor::to_writer(&mut file, &program).map_err(io::Error::other)?;\n    Ok(())\n}\n\nfn validate_shared_args(args: &SharedArgs) {\n    if args.superblocks > 1 && !matches!(args.pgo, PgoType::Cell) {\n        Cli::command()\n            .error(\n                clap::error::ErrorKind::ArgumentConflict,\n                \"superblocks are only supported with `--pgo cell`\",\n            )\n            .exit();\n    }\n}\n\nfn stdin_from(input: Option<u32>) -> StdIn {\n    let mut s = StdIn::default();\n    if let Some(i) = input {\n        s.write(&i)\n    }\n    s\n}\n\nfn setup_tracing_with_log_level(level: Level) {\n    let env_filter = EnvFilter::try_from_default_env()\n        .unwrap_or_else(|_| EnvFilter::new(format!(\"{level},p3_=warn\")));\n    let subscriber = Registry::default()\n        .with(env_filter)\n        .with(ForestLayer::default())\n        .with(MetricsLayer::new());\n    #[cfg(feature = \"metrics\")]\n    let subscriber = subscriber.with(TimingMetricsLayer::new());\n    tracing::subscriber::set_global_default(subscriber).unwrap();\n}\n\n/// export stark-backend metrics to the given file\npub fn run_with_metric_collection_to_file<R>(file: std::fs::File, f: impl FnOnce() -> R) -> R {\n    let recorder = DebuggingRecorder::new();\n    let snapshotter = recorder.snapshotter();\n    let recorder = TracingContextLayer::all().layer(recorder);\n    metrics::set_global_recorder(recorder).unwrap();\n    let res = f();\n\n    serde_json::to_writer_pretty(&file, &serialize_metric_snapshot(snapshotter.snapshot()))\n        .unwrap();\n    res\n}\n\n/// If optimistic precompiles are enabled, compute empirical constraints from the execution\n/// of the guest program on the given stdin, and save them to disk.\nfn maybe_compute_empirical_constraints(\n    guest_program: &OriginalCompiledProgram<RiscvISA>,\n    powdr_config: &PowdrConfig,\n    stdin: StdIn,\n) -> EmpiricalConstraints {\n    if !powdr_config.should_use_optimistic_precompiles {\n        return EmpiricalConstraints::default();\n    }\n\n    tracing::warn!(\n        \"Optimistic precompiles are not implemented yet. Computing empirical constraints...\"\n    );\n\n    let empirical_constraints =\n        detect_empirical_constraints(guest_program, powdr_config.degree_bound, vec![stdin]);\n\n    if let Some(path) = &powdr_config.apc_candidates_dir_path {\n        std::fs::create_dir_all(path).expect(\"Failed to create apc candidates directory\");\n        tracing::info!(\n            \"Saving empirical constraints debug info to {}/empirical_constraints.json\",\n            path.display()\n        );\n        let json = serde_json::to_string_pretty(&empirical_constraints).unwrap();\n        std::fs::write(path.join(\"empirical_constraints.json\"), json).unwrap();\n    }\n    empirical_constraints\n}\n"
  },
  {
    "path": "constraint-solver/Cargo.toml",
    "content": "[package]\nname = \"powdr-constraint-solver\"\ndescription = \"powdr tools to analyze and solve algebraic constraints\"\nversion = { workspace = true }\nedition = { workspace = true }\nlicense = { workspace = true }\nhomepage = { workspace = true }\nrepository = { workspace = true }\n\n[dependencies]\npowdr-number.workspace = true\n\nitertools.workspace = true\nnum-traits.workspace = true\nderive_more.workspace = true\nauto_enums = \"0.8.5\"\nlog.workspace = true\nbitvec = \"1.0.1\"\nserde.workspace = true\n\ncrepe = { git = \"https://github.com/powdr-labs/crepe\", rev = \"powdr-0.1.11\" }\nderivative.workspace = true\n\n[dev-dependencies]\npretty_assertions.workspace = true\nenv_logger.workspace = true\ntest-log.workspace = true\nexpect-test = \"1.5.1\"\nserde_json.workspace = true\n\n[package.metadata.cargo-udeps.ignore]\ndevelopment = [\"env_logger\"]\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "constraint-solver/src/algebraic_constraint/mod.rs",
    "content": "use std::fmt::Display;\n\nuse crate::{\n    grouped_expression::GroupedExpression,\n    runtime_constant::{RuntimeConstant, Substitutable},\n};\n\nuse num_traits::{One, Zero};\nuse serde::Serialize;\n\npub mod solve;\n\n/// An algebraic constraint\n#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Serialize)]\n#[serde(transparent)]\npub struct AlgebraicConstraint<V> {\n    /// The expression representing the constraint, which must evaluate to 0 for the constraint to be satisfied.\n    pub expression: V,\n}\n\n// We implement `From` to make writing tests easier. However, we recommend using `AlgebraicConstraint::assert_zero` for clarity\nimpl<V> From<V> for AlgebraicConstraint<V> {\n    fn from(expression: V) -> Self {\n        AlgebraicConstraint::assert_zero(expression)\n    }\n}\n\nimpl<V> AlgebraicConstraint<V> {\n    /// Create a constraint which asserts that the expression evaluates to 0.\n    pub fn assert_zero(expression: V) -> Self {\n        AlgebraicConstraint { expression }\n    }\n\n    /// Returns a constraint over a reference to the expression. This is useful to interact with the solver.\n    pub fn as_ref(&self) -> AlgebraicConstraint<&V> {\n        AlgebraicConstraint {\n            expression: &self.expression,\n        }\n    }\n}\n\nimpl<V: Clone> AlgebraicConstraint<&V> {\n    pub(crate) fn cloned(&self) -> AlgebraicConstraint<V> {\n        AlgebraicConstraint {\n            expression: self.expression.clone(),\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord> AlgebraicConstraint<GroupedExpression<T, V>> {\n    /// Returns a constraint which asserts that the two expressions are equal.\n    pub fn assert_eq(expression: GroupedExpression<T, V>, other: GroupedExpression<T, V>) -> Self {\n        Self::assert_zero(expression - other)\n    }\n\n    /// Returns a constraint which asserts that the expression is a boolean.\n    pub fn assert_bool(expression: GroupedExpression<T, V>) -> Self {\n        Self::assert_zero(expression.clone() * (expression - GroupedExpression::one()))\n    }\n}\n\nimpl<V: Zero> AlgebraicConstraint<V> {\n    pub fn is_redundant(&self) -> bool {\n        self.expression.is_zero()\n    }\n}\n\nimpl<T: RuntimeConstant + Substitutable<V>, V: Clone + Eq + Ord>\n    AlgebraicConstraint<GroupedExpression<T, V>>\n{\n    /// Substitute a variable by a symbolically known expression. The variable can be known or unknown.\n    /// If it was already known, it will be substituted in the known expressions.\n    pub fn substitute_by_known(&mut self, variable: &V, substitution: &T) {\n        self.expression.substitute_by_known(variable, substitution);\n    }\n\n    pub fn degree(&self) -> usize {\n        self.expression.degree()\n    }\n}\n\nimpl<V: Display> Display for AlgebraicConstraint<V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{} = 0\", self.expression)\n    }\n}\n\nimpl<T, V> AlgebraicConstraint<GroupedExpression<T, V>> {\n    /// Returns the referenced unknown variables. Might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &V> + '_> {\n        self.expression.referenced_unknown_variables()\n    }\n}\n\nimpl<T, V> AlgebraicConstraint<&GroupedExpression<T, V>> {\n    /// Returns the referenced unknown variables. Might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &V> + '_> {\n        self.expression.referenced_unknown_variables()\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/algebraic_constraint/solve.rs",
    "content": "use std::{collections::HashSet, fmt::Display, hash::Hash};\n\nuse itertools::Itertools;\nuse num_traits::Zero;\nuse powdr_number::FieldElement;\n\nuse crate::{\n    algebraic_constraint::AlgebraicConstraint,\n    effect::{Assertion, Condition, Effect},\n    grouped_expression::{GroupedExpression, RangeConstraintProvider},\n    range_constraint::RangeConstraint,\n    runtime_constant::RuntimeConstant,\n};\n\n#[derive(Default)]\npub struct ProcessResult<T: FieldElement, V> {\n    pub effects: Vec<Effect<T, V>>,\n    pub complete: bool,\n}\n\nimpl<T: FieldElement, V> ProcessResult<T, V> {\n    pub fn empty() -> Self {\n        Self {\n            effects: vec![],\n            complete: false,\n        }\n    }\n    pub fn complete(effects: Vec<Effect<T, V>>) -> Self {\n        Self {\n            effects,\n            complete: true,\n        }\n    }\n}\n\n#[derive(Debug, PartialEq, Eq)]\npub enum Error {\n    /// The range constraints of the parts do not cover the full constant sum.\n    ConflictingRangeConstraints,\n    /// An equality constraint evaluates to a known-nonzero value.\n    ConstraintUnsatisfiable(String),\n}\n\nimpl<T, V> AlgebraicConstraint<&GroupedExpression<T, V>>\nwhere\n    T: FieldElement,\n    V: Ord + Clone + Eq + Hash + Display,\n{\n    /// Solves the equation `self = 0` and returns how to compute the solution.\n    /// The solution can contain assignments to multiple variables.\n    /// If no way to solve the equation (and no way to derive new range\n    /// constraints) has been found, but it still contains\n    /// unknown variables, returns an empty, incomplete result.\n    /// If the equation is known to be unsolvable, returns an error.\n    pub fn solve(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T, V>,\n    ) -> Result<ProcessResult<T, V>, Error> {\n        let expression = self.expression;\n\n        if !expression\n            .range_constraint(range_constraints)\n            .allows_value(Zero::zero())\n        {\n            return Err(Error::ConstraintUnsatisfiable(self.to_string()));\n        }\n\n        if expression.is_quadratic() {\n            self.solve_quadratic(range_constraints)\n        } else if let Some(k) = expression.try_to_known() {\n            // If we know `expression` to be nonzero, we should have returned\n            // Err already in the range constraint check above.\n            assert!(k.is_zero());\n            // TODO we could still process more information\n            // and reach \"unsatisfiable\" here.\n            Ok(ProcessResult::complete(vec![]))\n        } else {\n            self.solve_affine(range_constraints)\n        }\n    }\n\n    /// Solves the constraint for `variable`. This is only possible if\n    /// `variable` does not appear in the quadratic component and\n    /// has a coefficient which is known to be not zero.\n    ///\n    /// If the constraint has the form `A + k * x = 0` where `A` does not\n    /// contain the variable `x` and `k` is a non-zero runtime constant,\n    /// it returns `A * (-k^(-1))`.\n    ///\n    /// Returns the resulting solved grouped expression.\n    pub fn try_solve_for(&self, variable: &V) -> Option<GroupedExpression<T, V>> {\n        let coefficient = self\n            .expression\n            .coefficient_of_variable_in_affine_part(variable)?;\n        assert!(!coefficient.is_zero());\n\n        let subtracted = self.expression.clone()\n            - GroupedExpression::from_unknown_variable(variable.clone()) * *coefficient;\n        if subtracted.referenced_unknown_variables().contains(variable) {\n            // There is another occurrence of the variable in the quadratic component,\n            // we cannot solve for it.\n            return None;\n        }\n        Some(subtracted * (-coefficient.field_inverse()))\n    }\n\n    /// Algebraically transforms the constraint such that `self = 0` is equivalent\n    /// to `expr = result` and returns `result`.\n    ///\n    /// Returns `None` if it cannot solve (this happens for example if self is quadratic).\n    /// Panics if `expr` is quadratic.\n    pub fn try_solve_for_expr(\n        &self,\n        expr: &GroupedExpression<T, V>,\n    ) -> Option<GroupedExpression<T, V>> {\n        let expression = self.expression;\n\n        assert!(\n            expr.is_affine(),\n            \"Tried to solve for quadratic expression {expr}\"\n        );\n        if expression.is_quadratic() {\n            return None;\n        }\n\n        // Find a normalization factor by iterating over the variables.\n        let normalization_factor = expr\n            .referenced_unknown_variables()\n            .find_map(|var| {\n                let coeff = expression.coefficient_of_variable_in_affine_part(var)?;\n                // We can only divide if we know the coefficient is non-zero.\n                if coeff.is_known_nonzero() {\n                    Some(\n                        expr.coefficient_of_variable_in_affine_part(var)\n                            .unwrap()\n                            .field_div(coeff),\n                    )\n                } else {\n                    None\n                }\n            })\n            .unwrap_or(T::one());\n        let result = expr - &(self.expression.clone() * normalization_factor);\n\n        // Check that the operations removed all variables in `expr` from `self`.\n        if !expr\n            .referenced_unknown_variables()\n            .collect::<HashSet<_>>()\n            .is_disjoint(\n                &result\n                    .referenced_unknown_variables()\n                    .collect::<HashSet<_>>(),\n            )\n        {\n            // The variables did not fully cancel out\n            return None;\n        }\n        Some(result)\n    }\n\n    fn solve_affine(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T, V>,\n    ) -> Result<ProcessResult<T, V>, Error> {\n        Ok(\n            if let Ok((var, coeff)) = self.expression.linear_components().exactly_one() {\n                // Solve \"coeff * X + self.constant = 0\" by division.\n                assert!(\n                    !coeff.is_known_zero(),\n                    \"Zero coefficient has not been removed: {self}\"\n                );\n                let constant = self.expression.constant_offset();\n                if coeff.is_known_nonzero() {\n                    // In this case, we can always compute a solution.\n                    let value = constant.field_div(&-*coeff);\n                    ProcessResult::complete(vec![assignment_if_satisfies_range_constraints(\n                        var.clone(),\n                        value,\n                        range_constraints,\n                    )?])\n                } else if constant.is_known_nonzero() {\n                    // If the offset is not zero, then the coefficient must be non-zero,\n                    // otherwise the constraint is violated.\n                    let value = constant.field_div(&-*coeff);\n                    ProcessResult::complete(vec![\n                        Assertion::assert_is_nonzero(*coeff),\n                        assignment_if_satisfies_range_constraints(\n                            var.clone(),\n                            value,\n                            range_constraints,\n                        )?,\n                    ])\n                } else {\n                    // If this case, we could have an equation of the form\n                    // 0 * X = 0, which is valid and generates no information about X.\n                    ProcessResult::empty()\n                }\n            } else {\n                ProcessResult {\n                    effects: self.transfer_constraints(range_constraints),\n                    complete: false,\n                }\n            },\n        )\n    }\n\n    /// Extract the range constraints from the expression.\n    /// Assumptions:\n    /// - The expression is linear\n    fn transfer_constraints(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T, V>,\n    ) -> Vec<Effect<T, V>> {\n        // Solve for each of the variables in the linear component and\n        // compute the range constraints.\n        assert!(!self.expression.is_quadratic());\n        self.expression\n            .linear_components()\n            .filter_map(|(var, _)| {\n                let rc = self.try_solve_for(var)?.range_constraint(range_constraints);\n                Some((var, rc))\n            })\n            .filter(|(_, constraint)| !constraint.is_unconstrained())\n            .map(|(var, constraint)| Effect::RangeConstraint(var.clone(), constraint))\n            .collect()\n    }\n\n    fn solve_quadratic(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T, V>,\n    ) -> Result<ProcessResult<T, V>, Error> {\n        let expression = self.expression;\n        let Some((left, right)) = expression.try_as_single_product() else {\n            return Ok(ProcessResult::empty());\n        };\n        // Now we have `left * right = 0`, i.e. one (or both) of them has to be zero.\n        let (left_solution, right_solution) = match (\n            AlgebraicConstraint::assert_zero(left).solve(range_constraints),\n            AlgebraicConstraint::assert_zero(right).solve(range_constraints),\n        ) {\n            // If one of them is always unsatisfiable, it is equivalent to just solving the other one for zero.\n            (Err(_), o) | (o, Err(_)) => {\n                return o;\n            }\n            (Ok(left), Ok(right)) => (left, right),\n        };\n\n        if let Some(result) =\n            combine_to_conditional_assignment(&left_solution, &right_solution, range_constraints)\n        {\n            return Ok(result);\n        }\n\n        // Now at least combine new range constraints on the same variable.\n        // TODO: This will correctly find a bit range constraint on\n        // `(X - 1) * X = 0`, but it fails to detect the case of\n        // `X * X - X`.\n        // This could be fixed by finding a canonical form for the quadratic\n        // expression, and normalizing the constants.\n        Ok(combine_range_constraints(&left_solution, &right_solution))\n    }\n}\n\n/// Tries to combine two process results from alternative branches into a\n/// conditional assignment.\nfn combine_to_conditional_assignment<T: FieldElement, V: Ord + Clone + Eq + Display>(\n    left: &ProcessResult<T, V>,\n    right: &ProcessResult<T, V>,\n    range_constraints: &impl RangeConstraintProvider<T, V>,\n) -> Option<ProcessResult<T, V>> {\n    let [Effect::Assignment(first_var, first_assignment)] = left.effects.as_slice() else {\n        return None;\n    };\n    let [Effect::Assignment(second_var, second_assignment)] = right.effects.as_slice() else {\n        return None;\n    };\n\n    if first_var != second_var {\n        return None;\n    }\n\n    // At this point, we have two assignments to the same variable, i.e.\n    // \"`X = A` or `X = B`\". If the two alternatives can never be satisfied at\n    // the same time (i.e. the \"or\" is exclusive), we can turn this into a\n    // conditional assignment.\n\n    let diff = *first_assignment + -*second_assignment;\n\n    // Now if `rc + diff` is disjoint from `rc`, it means\n    // that if the value that `A` evaluates to falls into the allowed range for `X`,\n    // then `B = A + diff` is not a possible value for `X` and vice-versa.\n    // This means the two alternatives are disjoint and we can use a conditional assignment.\n    let rc = range_constraints.get(first_var);\n    if !rc\n        .combine_sum(&RangeConstraint::from_value(diff))\n        .is_disjoint(&rc)\n    {\n        return None;\n    }\n\n    Some(ProcessResult {\n        effects: vec![Effect::ConditionalAssignment {\n            variable: first_var.clone(),\n            condition: Condition {\n                value: *first_assignment,\n                condition: rc,\n            },\n            in_range_value: *first_assignment,\n            out_of_range_value: *second_assignment,\n        }],\n        complete: left.complete && right.complete,\n    })\n}\n\n/// Turns an effect into a range constraint on a variable.\nfn effect_to_range_constraint<T: FieldElement, V: Ord + Clone + Eq>(\n    effect: &Effect<T, V>,\n) -> Option<(V, RangeConstraint<T>)> {\n    match effect {\n        Effect::RangeConstraint(var, rc) => Some((var.clone(), *rc)),\n        Effect::Assignment(var, value) => Some((var.clone(), value.range_constraint())),\n        _ => None,\n    }\n}\n\n/// Tries to combine range constraint results from two alternative branches.\n/// In some cases, if both branches produce a complete range constraint for the same variable,\n/// and those range constraints can be combined without loss, the result is complete as well.\nfn combine_range_constraints<T: FieldElement, V: Ord + Clone + Eq + Hash + Display>(\n    left: &ProcessResult<T, V>,\n    right: &ProcessResult<T, V>,\n) -> ProcessResult<T, V> {\n    let left_constraints = left\n        .effects\n        .iter()\n        .filter_map(|e| effect_to_range_constraint(e))\n        .into_grouping_map()\n        .reduce(|rc1, _, rc2| rc1.conjunction(&rc2));\n    let right_constraints = right\n        .effects\n        .iter()\n        .filter_map(|e| effect_to_range_constraint(e))\n        .into_grouping_map()\n        .reduce(|rc1, _, rc2| rc1.conjunction(&rc2));\n\n    let effects = left_constraints\n        .iter()\n        .filter_map(|(v, rc1)| {\n            let rc2 = right_constraints.get(v)?;\n            let rc = rc1.disjunction(rc2);\n            // This does not capture all cases where the disjunction does not lose information,\n            // but we want this to be an indicator of whether we can remove the original\n            // constraint, and thus we want it to only hit the \"single value\" case.\n            let complete = rc1.try_to_single_value().is_some()\n                && rc2.try_to_single_value().is_some()\n                && rc.size_estimate() <= 2.into();\n            Some((v, rc, complete))\n        })\n        .collect_vec();\n    // The completeness is tricky, but if there is just a single left effect\n    // and a single right effect and the final range constraint is complete,\n    // it means that both branches have a concrete assignment for the variable\n    // and thus the range constraint is exactly what the original constraint captures.\n    let complete = left.effects.len() == 1\n        && right.effects.len() == 1\n        && effects.len() == 1\n        && effects.iter().all(|(_, _, complete)| *complete);\n    ProcessResult {\n        effects: effects\n            .into_iter()\n            .map(|(v, rc, _)| Effect::RangeConstraint(v.clone(), rc))\n            .collect(),\n        complete,\n    }\n}\n\nfn assignment_if_satisfies_range_constraints<T: FieldElement, V: Ord + Clone + Eq>(\n    var: V,\n    value: T,\n    range_constraints: &impl RangeConstraintProvider<T, V>,\n) -> Result<Effect<T, V>, Error> {\n    let rc = range_constraints.get(&var);\n    if rc.is_disjoint(&value.range_constraint()) {\n        return Err(Error::ConflictingRangeConstraints);\n    }\n    Ok(Effect::Assignment(var, value))\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::HashMap;\n\n    use crate::grouped_expression::NoRangeConstraints;\n\n    use super::*;\n    use powdr_number::GoldilocksField;\n\n    use pretty_assertions::assert_eq;\n\n    type Qse = GroupedExpression<GoldilocksField, &'static str>;\n\n    fn var(name: &'static str) -> Qse {\n        Qse::from_unknown_variable(name)\n    }\n\n    fn constant(value: u64) -> Qse {\n        Qse::from_number(GoldilocksField::from(value))\n    }\n\n    #[test]\n    fn unsolvable() {\n        let r = AlgebraicConstraint::assert_zero(&Qse::from_number(GoldilocksField::from(10)))\n            .solve(&NoRangeConstraints);\n        assert!(r.is_err());\n    }\n\n    #[test]\n    fn solvable_without_vars() {\n        let constr = constant(0);\n        let result = AlgebraicConstraint::assert_zero(&constr)\n            .solve(&NoRangeConstraints)\n            .unwrap();\n        assert!(result.complete && result.effects.is_empty());\n    }\n\n    #[test]\n    fn solve_simple_eq() {\n        let y = Qse::from_unknown_variable(\"y\");\n        let x = Qse::from_unknown_variable(\"X\");\n        // 2 * X + 7 * y - 10 = 0\n        let two = constant(2);\n        let seven = constant(7);\n        let ten = constant(10);\n        let mut constr = two * x + seven * y - ten;\n        constr.substitute_by_known(&\"y\", &GoldilocksField::from(13));\n        let result = AlgebraicConstraint::assert_zero(&constr)\n            .solve(&NoRangeConstraints)\n            .unwrap();\n        assert!(result.complete);\n        assert_eq!(result.effects.len(), 1);\n        let Effect::Assignment(var, expr) = &result.effects[0] else {\n            panic!(\"Expected assignment\");\n        };\n        assert_eq!(var.to_string(), \"X\");\n        assert_eq!(\n            expr.to_string(),\n            ((GoldilocksField::from(7) * GoldilocksField::from(13) - GoldilocksField::from(10))\n                / GoldilocksField::from(-2))\n            .to_string()\n        );\n    }\n\n    #[test]\n    fn solve_constraint_transfer() {\n        let rc = RangeConstraint::from_mask(0xffu32);\n        let a = Qse::from_unknown_variable(\"a\");\n        let b = Qse::from_unknown_variable(\"b\");\n        let c = Qse::from_unknown_variable(\"c\");\n        let z = Qse::from_unknown_variable(\"Z\");\n        let range_constraints = HashMap::from([(\"a\", rc), (\"b\", rc), (\"c\", rc)]);\n        // a * 0x100 + b * 0x10000 + c * 0x1000000 + 10 - Z = 0\n        let ten = constant(10);\n        let constr =\n            a * constant(0x100) + b * constant(0x10000) + c * constant(0x1000000) + ten.clone()\n                - z.clone();\n        let result = AlgebraicConstraint::assert_zero(&constr)\n            .solve(&range_constraints)\n            .unwrap();\n        assert!(!result.complete);\n        let effects = result\n            .effects\n            .into_iter()\n            .map(|effect| match effect {\n                Effect::RangeConstraint(v, rc) => format!(\"{v}: {rc};\\n\"),\n                _ => panic!(),\n            })\n            .format(\"\")\n            .to_string();\n        // It appears twice because we solve the positive and the negated equation.\n        // Note that the negated version has a different bit mask.\n        assert_eq!(\n            effects,\n            \"Z: [10, 4294967050] & 0xffffff0a;\n\"\n        );\n    }\n\n    fn unpack_range_constraint(\n        process_result: &ProcessResult<GoldilocksField, &'static str>,\n    ) -> (&'static str, RangeConstraint<GoldilocksField>) {\n        let [effect] = &process_result.effects[..] else {\n            panic!();\n        };\n        let Effect::RangeConstraint(var, rc) = effect else {\n            panic!();\n        };\n        (var, *rc)\n    }\n\n    #[test]\n    fn detect_bit_constraint() {\n        let a = Qse::from_unknown_variable(\"a\");\n        let one = constant(1);\n        let three = constant(3);\n        let five = constant(5);\n\n        // All these constraints should be equivalent to a bit constraint.\n        let constraints = [\n            a.clone() * (a.clone() - one.clone()),\n            (a.clone() - one.clone()) * a.clone(),\n            (three * a.clone()) * (five.clone() * a.clone() - five),\n        ];\n\n        for constraint in constraints {\n            let result = AlgebraicConstraint::assert_zero(&constraint)\n                .solve(&NoRangeConstraints)\n                .unwrap();\n            assert!(result.complete);\n            let (var, rc) = unpack_range_constraint(&result);\n            assert_eq!(var.to_string(), \"a\");\n            assert_eq!(rc, RangeConstraint::from_mask(1u64));\n        }\n    }\n\n    #[test]\n    fn detect_complete_range_constraint() {\n        let a = Qse::from_unknown_variable(\"a\");\n        let three = constant(3);\n        let four = constant(4);\n\n        // `a` can be 3 or 4, which is can be completely represented by\n        // RangeConstraint::from_range(3, 4), so the identity should be\n        // marked as complete.\n        let constraint = (a.clone() - three) * (a - four);\n\n        let result = AlgebraicConstraint::assert_zero(&constraint)\n            .solve(&NoRangeConstraints)\n            .unwrap();\n        assert!(result.complete);\n        let (var, rc) = unpack_range_constraint(&result);\n        assert_eq!(var.to_string(), \"a\");\n        assert_eq!(\n            rc,\n            RangeConstraint::from_range(GoldilocksField::from(3), GoldilocksField::from(4))\n        );\n    }\n\n    #[test]\n    fn detect_incomplete_range_constraint() {\n        let a = Qse::from_unknown_variable(\"a\");\n        let three = constant(3);\n        let five = constant(5);\n\n        // `a` can be 3 or 5, so there is a range constraint\n        // RangeConstraint::from_range(3, 5) on `a`.\n        // However, the identity is not complete, because the\n        // range constraint allows for a value of 4, so removing\n        // the identity would loose information.\n        let constraint = (a.clone() - three) * (a - five);\n\n        let result = AlgebraicConstraint::assert_zero(&constraint)\n            .solve(&NoRangeConstraints)\n            .unwrap();\n        assert!(!result.complete);\n        let (var, rc) = unpack_range_constraint(&result);\n        assert_eq!(var.to_string(), \"a\");\n        assert_eq!(\n            rc,\n            RangeConstraint::from_range(GoldilocksField::from(3), GoldilocksField::from(5))\n        );\n    }\n\n    #[test]\n    fn bool_plus_one_cant_be_zero() {\n        let expr = var(\"a\") + constant(1);\n        let rc = RangeConstraint::from_mask(0x1u64);\n        let range_constraints = HashMap::from([(\"a\", rc)]);\n        assert!(AlgebraicConstraint::assert_zero(&expr)\n            .solve(&range_constraints)\n            .is_err());\n    }\n\n    #[test]\n    fn solve_for() {\n        let expr = var(\"w\") + var(\"x\") + constant(3) * var(\"y\") + constant(5);\n        let constr = AlgebraicConstraint::assert_zero(&expr);\n        assert_eq!(expr.to_string(), \"w + x + 3 * y + 5\");\n        assert_eq!(\n            constr.try_solve_for(&\"x\").unwrap().to_string(),\n            \"-(w + 3 * y + 5)\"\n        );\n        assert_eq!(\n            constr.try_solve_for(&\"y\").unwrap().to_string(),\n            \"6148914689804861440 * w + 6148914689804861440 * x - 6148914689804861442\"\n        );\n        assert!(constr.try_solve_for(&\"t\").is_none());\n    }\n\n    #[test]\n    fn solve_for_expr() {\n        let expr = var(\"w\") + var(\"x\") + constant(3) * var(\"y\") + constant(5);\n        let constr = AlgebraicConstraint::assert_zero(&expr);\n        assert_eq!(expr.to_string(), \"w + x + 3 * y + 5\");\n        assert_eq!(\n            constr.try_solve_for_expr(&var(\"x\")).unwrap().to_string(),\n            \"-(w + 3 * y + 5)\"\n        );\n        assert_eq!(\n            constr.try_solve_for_expr(&var(\"y\")).unwrap().to_string(),\n            \"6148914689804861440 * w + 6148914689804861440 * x - 6148914689804861442\"\n        );\n        assert_eq!(\n            constr\n                .try_solve_for_expr(&-(constant(3) * var(\"y\")))\n                .unwrap()\n                .to_string(),\n            \"w + x + 5\"\n        );\n        assert_eq!(\n            constr\n                .try_solve_for_expr(&-(constant(3) * var(\"y\") + constant(2)))\n                .unwrap()\n                .to_string(),\n            \"w + x + 3\"\n        );\n        assert_eq!(\n            constr\n                .try_solve_for_expr(&(var(\"x\") + constant(3) * var(\"y\") + constant(2)))\n                .unwrap()\n                .to_string(),\n            \"-(w + 3)\"\n        );\n        // We cannot solve these because the constraint does not contain a linear multiple\n        // of the expression.\n        assert!(constr\n            .try_solve_for_expr(&(var(\"x\") + constant(2) * var(\"y\")))\n            .is_none());\n        assert!(constr.try_solve_for_expr(&(var(\"x\") + var(\"y\"))).is_none());\n        assert!(constr\n            .try_solve_for_expr(&(constant(2) * var(\"x\") + var(\"y\")))\n            .is_none());\n    }\n\n    #[test]\n    fn solve_for_expr_normalization() {\n        // Test normalization\n        let t = GoldilocksField::from(3);\n        let r = GoldilocksField::from(7);\n        let expr = var(\"x\") * r + var(\"y\") * t;\n        let constr = AlgebraicConstraint::assert_zero(&expr);\n        assert_eq!(constr.to_string(), \"7 * x + 3 * y = 0\");\n        assert_eq!(\n            constr\n                .try_solve_for_expr(&(var(\"x\") * r))\n                .unwrap()\n                .to_string(),\n            \"-(3 * y)\"\n        );\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/bus_interaction_handler.rs",
    "content": "use itertools::Itertools;\nuse powdr_number::FieldElement;\n\nuse crate::{constraint_system::BusInteraction, range_constraint::RangeConstraint};\n\n/// The sent / received data could not be received / sent.\n#[derive(Debug)]\npub struct ViolatesBusRules {}\n\n/// A trait for handling bus interactions.\npub trait BusInteractionHandler<T: FieldElement> {\n    /// Handles a bus interaction, by transforming taking a bus interaction\n    /// (with the fields represented by range constraints) and returning\n    /// updated range constraints.\n    /// The idea is that a certain combination of range constraints on elements\n    /// can be further restricted given internal knowledge about the specific\n    /// bus interaction, in particular if some elements are restricted to just\n    /// a few or even concrete values.\n    /// The range constraints are intersected with the previous ones by the\n    /// caller, so there is no need to do that in the implementation of this\n    /// trait.\n    fn handle_bus_interaction(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<T>>,\n    ) -> BusInteraction<RangeConstraint<T>>;\n\n    /// Like handle_bus_interaction, but returns an error if the current bus\n    /// interaction violates the rules of the bus (e.g. [1234] in [BYTES]).\n    fn handle_bus_interaction_checked(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<T>>,\n    ) -> Result<BusInteraction<RangeConstraint<T>>, ViolatesBusRules> {\n        let previous_constraints = bus_interaction.clone();\n        let new_constraints = self.handle_bus_interaction(bus_interaction);\n\n        // Intersect the old and new range constraints. If they don't overlap,\n        // there is a contradiction.\n        for (previous_rc, new_rc) in previous_constraints\n            .fields()\n            .zip_eq(new_constraints.fields())\n        {\n            if previous_rc.is_disjoint(new_rc) {\n                return Err(ViolatesBusRules {});\n            }\n        }\n        Ok(new_constraints)\n    }\n}\n\n/// A default bus interaction handler that does nothing. Using it is\n/// equivalent to ignoring bus interactions.\n#[derive(Default, Clone)]\npub struct DefaultBusInteractionHandler<T: FieldElement> {\n    _marker: std::marker::PhantomData<T>,\n}\n\nimpl<T: FieldElement> BusInteractionHandler<T> for DefaultBusInteractionHandler<T> {\n    fn handle_bus_interaction(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<T>>,\n    ) -> BusInteraction<RangeConstraint<T>> {\n        bus_interaction\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/constraint_system.rs",
    "content": "use crate::{\n    bus_interaction_handler::ViolatesBusRules,\n    effect::Effect,\n    grouped_expression::{GroupedExpression, RangeConstraintProvider},\n    range_constraint::RangeConstraint,\n    runtime_constant::{RuntimeConstant, Substitutable},\n};\nuse derivative::Derivative;\nuse itertools::Itertools;\nuse powdr_number::FieldElement;\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\nuse std::{fmt::Display, hash::Hash};\n\npub use crate::algebraic_constraint::AlgebraicConstraint;\npub use crate::bus_interaction_handler::BusInteractionHandler;\n\n/// Description of a constraint system.\n#[derive(Derivative, Serialize)]\n#[derivative(Default(bound = \"\"), Clone)]\n#[serde(bound(serialize = \"V: Clone + Ord + Eq + Serialize, T: RuntimeConstant + Serialize\"))]\npub struct ConstraintSystem<T, V> {\n    /// The algebraic expressions which have to evaluate to zero.\n    #[serde(rename = \"constraints\")]\n    pub algebraic_constraints: Vec<AlgebraicConstraint<GroupedExpression<T, V>>>,\n    /// Bus interactions, which can further restrict variables.\n    /// Exact semantics are up to the implementation of BusInteractionHandler\n    pub bus_interactions: Vec<BusInteraction<GroupedExpression<T, V>>>,\n    /// Newly added variables whose values are derived from existing variables.\n    #[serde(rename = \"derived_columns\")]\n    pub derived_variables: Vec<DerivedVariable<T, V, GroupedExpression<T, V>>>,\n}\n\nimpl<T: RuntimeConstant + Display, V: Clone + Ord + Display> Display for ConstraintSystem<T, V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"{}\",\n            self.algebraic_constraints\n                .iter()\n                .map(|constraint| format!(\"{constraint}\"))\n                .chain(\n                    self.bus_interactions\n                        .iter()\n                        .map(|bus_inter| format!(\"{bus_inter}\"))\n                )\n                .chain(self.derived_variables.iter().map(\n                    |DerivedVariable {\n                         variable,\n                         computation_method,\n                     }| { format!(\"{variable} := {computation_method}\") }\n                ))\n                .format(\"\\n\")\n        )\n    }\n}\n\nimpl<T: RuntimeConstant, V> ConstraintSystem<T, V> {\n    /// Returns all referenced unknown variables in the system. Might contain repetitions.\n    ///\n    /// Variables referenced in derived variables are not included, as they are not part of the constraints.\n    pub fn referenced_unknown_variables(&self) -> impl Iterator<Item = &V> {\n        self.algebraic_constraints\n            .iter()\n            .flat_map(|c| c.referenced_unknown_variables())\n            .chain(\n                self.bus_interactions\n                    .iter()\n                    .flat_map(|b| b.referenced_unknown_variables()),\n            )\n    }\n\n    /// Extends the constraint system by the constraints of another system.\n    /// No de-duplication of constraints or disambiguation of variables is performed.\n    pub fn extend(&mut self, system: ConstraintSystem<T, V>) {\n        self.algebraic_constraints\n            .extend(system.algebraic_constraints);\n        self.bus_interactions.extend(system.bus_interactions);\n        self.derived_variables.extend(system.derived_variables);\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct DerivedVariable<T, V, E> {\n    pub variable: V,\n    pub computation_method: ComputationMethod<T, E>,\n}\n\nimpl<T, V, E> DerivedVariable<T, V, E> {\n    pub fn new(variable: V, computation_method: ComputationMethod<T, E>) -> Self {\n        Self {\n            variable,\n            computation_method,\n        }\n    }\n}\n\nimpl<T, V, E> Serialize for DerivedVariable<T, V, E>\nwhere\n    V: Serialize,\n    ComputationMethod<T, E>: Serialize,\n{\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        (&self.variable, &self.computation_method).serialize(serializer)\n    }\n}\n\nimpl<'de, T, V, E> Deserialize<'de> for DerivedVariable<T, V, E>\nwhere\n    V: Deserialize<'de>,\n    ComputationMethod<T, E>: Deserialize<'de>,\n{\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        let (variable, computation_method) =\n            <(V, ComputationMethod<T, E>)>::deserialize(deserializer)?;\n        Ok(Self {\n            variable,\n            computation_method,\n        })\n    }\n}\n\n/// Specifies a way to compute the value of a variable from other variables.\n/// It is generic over the field `T` and the expression type `E`.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub enum ComputationMethod<T, E> {\n    /// A constant value.\n    Constant(T),\n    /// The quotiont (using inversion in the field) of the first argument\n    /// by the second argument, or zero if the latter is zero.\n    QuotientOrZero(E, E),\n}\n\nimpl<T: Display, E: Display> Display for ComputationMethod<T, E> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            ComputationMethod::Constant(c) => write!(f, \"{c}\"),\n            ComputationMethod::QuotientOrZero(e1, e2) => write!(f, \"QuotientOrZero({e1}, {e2})\"),\n        }\n    }\n}\n\nimpl<T, F> ComputationMethod<T, GroupedExpression<T, F>> {\n    /// Returns the set of referenced unknown variables in the computation method. Might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &F> + '_> {\n        match self {\n            ComputationMethod::Constant(_) => Box::new(std::iter::empty()),\n            ComputationMethod::QuotientOrZero(e1, e2) => Box::new(\n                e1.referenced_unknown_variables()\n                    .chain(e2.referenced_unknown_variables()),\n            ),\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Substitutable<V>, V: Ord + Clone + Eq>\n    ComputationMethod<T, GroupedExpression<T, V>>\n{\n    /// Substitute a variable by a symbolically known expression. The variable can be known or unknown.\n    /// If it was already known, it will be substituted in the known expressions.\n    pub fn substitute_by_known(&mut self, variable: &V, substitution: &T) {\n        match self {\n            ComputationMethod::Constant(_) => {}\n            ComputationMethod::QuotientOrZero(e1, e2) => {\n                e1.substitute_by_known(variable, substitution);\n                e2.substitute_by_known(variable, substitution);\n            }\n        }\n    }\n\n    /// Substitute an unknown variable by a GroupedExpression.\n    ///\n    /// Note this does NOT work properly if the variable is used inside a\n    /// known SymbolicExpression.\n    pub fn substitute_by_unknown(&mut self, variable: &V, substitution: &GroupedExpression<T, V>) {\n        match self {\n            ComputationMethod::Constant(_) => {}\n            ComputationMethod::QuotientOrZero(e1, e2) => {\n                e1.substitute_by_unknown(variable, substitution);\n                e2.substitute_by_unknown(variable, substitution);\n            }\n        }\n    }\n}\n\n/// A bus interaction.\n#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize)]\npub struct BusInteraction<V> {\n    /// The ID of the bus.\n    #[serde(rename = \"id\")]\n    pub bus_id: V,\n    /// The multiplicity of the bus interaction. In most cases,\n    /// this should evaluate to 1 or -1.\n    #[serde(rename = \"mult\")]\n    pub multiplicity: V,\n    /// The payload of the bus interaction.\n    #[serde(rename = \"args\")]\n    pub payload: Vec<V>,\n}\n\nimpl<V> BusInteraction<V> {\n    pub fn fields(&self) -> impl Iterator<Item = &V> {\n        Box::new(\n            [&self.bus_id, &self.multiplicity]\n                .into_iter()\n                .chain(self.payload.iter()),\n        )\n    }\n\n    pub fn fields_mut(&mut self) -> impl Iterator<Item = &mut V> {\n        Box::new(\n            [&mut self.bus_id, &mut self.multiplicity]\n                .into_iter()\n                .chain(self.payload.iter_mut()),\n        )\n    }\n}\n\nimpl<V: Display> Display for BusInteraction<V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"BusInteraction {{ bus_id: {}, multiplicity: {}, payload: {} }}\",\n            self.bus_id,\n            self.multiplicity,\n            self.payload.iter().format(\", \")\n        )\n    }\n}\n\nimpl<V> FromIterator<V> for BusInteraction<V> {\n    fn from_iter<T: IntoIterator<Item = V>>(iter: T) -> Self {\n        let mut iter = iter.into_iter();\n        let bus_id = iter.next().unwrap();\n        let multiplicity = iter.next().unwrap();\n        let payload = iter.collect();\n        BusInteraction {\n            bus_id,\n            payload,\n            multiplicity,\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> BusInteraction<GroupedExpression<T, V>> {\n    /// Converts a bus interactions with fields represented by expressions\n    /// to a bus interaction with fields represented by range constraints.\n    pub fn to_range_constraints(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T::FieldType, V>,\n    ) -> BusInteraction<RangeConstraint<T::FieldType>> {\n        BusInteraction::from_iter(\n            self.fields()\n                .map(|expr| expr.range_constraint(range_constraints)),\n        )\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Hash + Ord + Eq + Display>\n    BusInteraction<GroupedExpression<T, V>>\n{\n    /// Refines range constraints of the bus interaction's fields\n    /// using the provided `BusInteractionHandler`.\n    /// Returns a list of updates to be executed by the caller.\n    /// Forwards and error by the bus interaction handler.\n    pub fn solve(\n        &self,\n        bus_interaction_handler: &dyn BusInteractionHandler<T>,\n        range_constraint_provider: &impl RangeConstraintProvider<T, V>,\n    ) -> Result<Vec<Effect<T, V>>, ViolatesBusRules> {\n        let range_constraints = self.to_range_constraints(range_constraint_provider);\n        let range_constraints =\n            bus_interaction_handler.handle_bus_interaction_checked(range_constraints)?;\n        Ok(self\n            .fields()\n            .zip_eq(range_constraints.fields())\n            .filter(|(expr, _)| expr.is_affine())\n            .flat_map(|(expr, rc)| {\n                expr.referenced_unknown_variables().filter_map(move |var| {\n                    // `k * var + e` is in range rc <=>\n                    // `var` is in range `(rc - RC[e]) / k` = `rc / k + RC[-e / k]`\n                    // If we solve `expr` for `var`, we get `-e / k`.\n                    let k = expr\n                        .coefficient_of_variable_in_affine_part(var)\n                        .unwrap()\n                        .try_to_number()?;\n                    let expr = AlgebraicConstraint::assert_zero(expr).try_solve_for(var)?;\n                    let rc = rc\n                        .multiple(T::from(1) / k)\n                        .combine_sum(&expr.range_constraint(range_constraint_provider));\n                    (!rc.is_unconstrained()).then(|| Effect::RangeConstraint(var.clone(), rc))\n                })\n            })\n            .collect())\n    }\n}\n\nimpl<T, V> BusInteraction<GroupedExpression<T, V>> {\n    /// Returns the set of referenced unknown variables. Might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &V> + '_> {\n        Box::new(\n            self.fields()\n                .flat_map(|expr| expr.referenced_unknown_variables()),\n        )\n    }\n}\n\n#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)]\npub enum ConstraintRef<'a, T, V> {\n    AlgebraicConstraint(AlgebraicConstraint<&'a GroupedExpression<T, V>>),\n    BusInteraction(&'a BusInteraction<GroupedExpression<T, V>>),\n}\n\nimpl<'a, T, V> ConstraintRef<'a, T, V> {\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &V> + '_> {\n        match self {\n            ConstraintRef::AlgebraicConstraint(expr) => expr.referenced_unknown_variables(),\n            ConstraintRef::BusInteraction(bus_interaction) => {\n                bus_interaction.referenced_unknown_variables()\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/effect.rs",
    "content": "use crate::{range_constraint::RangeConstraint, runtime_constant::RuntimeConstant};\n\n/// The effect of solving a symbolic equation.\n#[derive(Clone, PartialEq, Eq)]\npub enum Effect<T: RuntimeConstant, V> {\n    /// Variable can be assigned a value.\n    Assignment(V, T),\n    /// We learnt a new range constraint on variable.\n    RangeConstraint(V, RangeConstraint<T::FieldType>),\n    /// A run-time assertion. If this fails, we have conflicting constraints.\n    Assertion(Assertion<T>),\n    /// A variable is assigned one of two alternative expressions, depending on a condition.\n    ConditionalAssignment {\n        variable: V,\n        condition: Condition<T>,\n        in_range_value: T,\n        out_of_range_value: T,\n    },\n}\n\n/// A run-time assertion. If this fails, we have conflicting constraints.\n#[derive(Clone, PartialEq, Eq)]\npub struct Assertion<T: RuntimeConstant> {\n    pub lhs: T,\n    pub rhs: T,\n    /// If this is true, we assert that both sides are equal.\n    /// Otherwise, we assert that they are different.\n    pub expected_equal: bool,\n}\n\nimpl<T: RuntimeConstant> Assertion<T> {\n    pub fn assert_is_zero<V>(condition: T) -> Effect<T, V> {\n        Self::assert_eq(condition, T::from_u64(0))\n    }\n    pub fn assert_is_nonzero<V>(condition: T) -> Effect<T, V> {\n        Self::assert_neq(condition, T::from_u64(0))\n    }\n    pub fn assert_eq<V>(lhs: T, rhs: T) -> Effect<T, V> {\n        Effect::Assertion(Assertion {\n            lhs,\n            rhs,\n            expected_equal: true,\n        })\n    }\n    pub fn assert_neq<V>(lhs: T, rhs: T) -> Effect<T, V> {\n        Effect::Assertion(Assertion {\n            lhs,\n            rhs,\n            expected_equal: false,\n        })\n    }\n}\n\n#[derive(Clone, PartialEq, Eq)]\npub struct Condition<T: RuntimeConstant> {\n    pub value: T,\n    pub condition: RangeConstraint<T::FieldType>,\n}\n"
  },
  {
    "path": "constraint-solver/src/grouped_expression.rs",
    "content": "use std::{\n    collections::{BTreeMap, HashMap, HashSet},\n    fmt::Display,\n    hash::Hash,\n    iter::{once, Sum},\n    ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub},\n};\n\nuse crate::runtime_constant::{RuntimeConstant, Substitutable, VarTransformable};\nuse itertools::Itertools;\nuse num_traits::One;\nuse num_traits::Zero;\nuse powdr_number::FieldElement;\nuse serde::{Serialize, Serializer};\n\nuse super::range_constraint::RangeConstraint;\nuse super::symbolic_expression::SymbolicExpression;\n\n/// Terms with more than `MAX_SUM_SIZE_FOR_QUADRATIC_ANALYSIS` quadratic terms\n/// are not analyzed for pairs that sum to zero.\nconst MAX_SUM_SIZE_FOR_QUADRATIC_ANALYSIS: usize = 20;\n\n/// A symbolic expression in unknown variables of type `V` and (symbolically)\n/// known terms, representing a sum of (super-)quadratic, linear and constant parts.\n/// The quadratic terms are of the form `X * Y`, where `X` and `Y` are\n/// `GroupedExpression`s that have at least one unknown.\n/// The linear terms are of the form `a * X`, where `a` is a (symbolically) known\n/// value and `X` is an unknown variable.\n/// The constant term is a (symbolically) known value.\n///\n/// It also provides ways to quickly update the expression when the value of\n/// an unknown variable gets known and provides functions to solve\n/// (some kinds of) equations.\n///\n/// The name is derived from the fact that it groups linear terms by variable.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct GroupedExpression<T, V> {\n    /// Quadratic terms of the form `a * X * Y`, where `a` is a (symbolically)\n    /// known value and `X` and `Y` are grouped expressions that\n    /// have at least one unknown.\n    quadratic: Vec<(Self, Self)>,\n    /// Linear terms of the form `a * X`, where `a` is a (symbolically) known\n    /// value and `X` is an unknown variable.\n    linear: BTreeMap<V, T>,\n    /// Constant term, a (symbolically) known value.\n    constant: T,\n}\n\n/// A component of a grouped expression.\npub enum GroupedExpressionComponent<T, V> {\n    /// A quadratic component `(c1, c1)`, representing `c1 * c2`.\n    Quadratic(GroupedExpression<T, V>, GroupedExpression<T, V>),\n    /// A linear component `(v, c)`, representing `c * v`.\n    Linear(V, T),\n    /// A constant component `c`.\n    Constant(T),\n}\n\nimpl<F, T, V> From<GroupedExpressionComponent<T, V>> for GroupedExpression<T, V>\nwhere\n    F: FieldElement,\n    T: RuntimeConstant<FieldType = F>,\n    V: Clone + Ord + Eq,\n{\n    fn from(s: GroupedExpressionComponent<T, V>) -> Self {\n        match s {\n            GroupedExpressionComponent::Quadratic(l, r) => Self {\n                quadratic: vec![(l, r)],\n                linear: Default::default(),\n                constant: T::zero(),\n            },\n            GroupedExpressionComponent::Linear(v, c) => Self {\n                quadratic: Default::default(),\n                linear: [(v, c)].into_iter().collect(),\n                constant: T::zero(),\n            },\n            GroupedExpressionComponent::Constant(c) => Self {\n                quadratic: Default::default(),\n                linear: Default::default(),\n                constant: c,\n            },\n        }\n    }\n}\n\nimpl<F: FieldElement, T: RuntimeConstant<FieldType = F>, V> GroupedExpression<T, V> {\n    pub fn from_number(k: F) -> Self {\n        Self {\n            quadratic: Default::default(),\n            linear: Default::default(),\n            constant: T::from(k),\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Zero for GroupedExpression<T, V> {\n    fn zero() -> Self {\n        Self {\n            quadratic: Default::default(),\n            linear: Default::default(),\n            constant: T::zero(),\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        self.try_to_known().is_some_and(|k| k.is_known_zero())\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> One for GroupedExpression<T, V> {\n    fn one() -> Self {\n        Self {\n            quadratic: Default::default(),\n            linear: Default::default(),\n            constant: T::one(),\n        }\n    }\n\n    fn is_one(&self) -> bool {\n        self.try_to_known().is_some_and(|k| k.is_known_one())\n    }\n}\n\nimpl<F: FieldElement, V: Ord + Clone + Eq> GroupedExpression<SymbolicExpression<F, V>, V> {\n    pub fn from_known_symbol(symbol: V, rc: RangeConstraint<F>) -> Self {\n        Self::from_runtime_constant(SymbolicExpression::from_symbol(symbol, rc))\n    }\n}\n\nimpl<T: RuntimeConstant, V: Ord + Clone + Eq> GroupedExpression<T, V> {\n    pub fn from_runtime_constant(constant: T) -> Self {\n        Self {\n            quadratic: Default::default(),\n            linear: Default::default(),\n            constant,\n        }\n    }\n\n    pub fn from_unknown_variable(var: V) -> Self {\n        Self {\n            quadratic: Default::default(),\n            linear: [(var.clone(), T::one())].into_iter().collect(),\n            constant: T::zero(),\n        }\n    }\n\n    /// If this expression does not contain unknown variables, returns the symbolic expression.\n    pub fn try_to_known(&self) -> Option<&T> {\n        if self.quadratic.is_empty() && self.linear.is_empty() {\n            Some(&self.constant)\n        } else {\n            None\n        }\n    }\n\n    /// Returns true if this expression does not contain any quadratic terms.\n    pub fn is_affine(&self) -> bool {\n        !self.is_quadratic()\n    }\n\n    /// If the expression is a known number, returns it.\n    pub fn try_to_number(&self) -> Option<T::FieldType> {\n        self.try_to_known()?.try_to_number()\n    }\n\n    /// If the expression is equal to `GroupedExpression::from_unknown_variable(v)`, returns `v`.\n    pub fn try_to_simple_unknown(&self) -> Option<V> {\n        if self.is_quadratic() || !self.constant.is_known_zero() {\n            return None;\n        }\n        let Ok((var, coeff)) = self.linear.iter().exactly_one() else {\n            return None;\n        };\n        if !coeff.is_known_one() {\n            return None;\n        }\n        Some(var.clone())\n    }\n\n    /// Returns true if this expression contains at least one quadratic term.\n    pub fn is_quadratic(&self) -> bool {\n        !self.quadratic.is_empty()\n    }\n\n    /// Returns `(l, r)` if `self == l * r`.\n    pub fn try_as_single_product(&self) -> Option<(&Self, &Self)> {\n        if self.linear.is_empty() && self.constant.is_known_zero() {\n            match self.quadratic.as_slice() {\n                [(l, r)] => Some((l, r)),\n                _ => None,\n            }\n        } else {\n            None\n        }\n    }\n\n    /// Returns `vec![f1, f2, ..., fn]` such that `self` is equivalent to\n    /// `c * f1 * f2 * ... * fn` for some constant `c`.\n    /// Tries to find as many factors as possible and also tries to normalize\n    /// the factors as much as possible.\n    pub fn to_factors(&self) -> Vec<Self> {\n        let summands = self.quadratic.len()\n            + self.linear.len()\n            + if self.constant.is_known_zero() { 0 } else { 1 };\n        if summands == 0 {\n            vec![Self::zero()]\n        } else if summands == 1 {\n            if let [(l, r)] = self.quadratic.as_slice() {\n                l.to_factors().into_iter().chain(r.to_factors()).collect()\n            } else if let Some((var, _)) = self.linear.iter().next() {\n                vec![Self::from_unknown_variable(var.clone())]\n            } else {\n                vec![]\n            }\n        } else {\n            // Try to normalize\n            let divide_by = if !self.constant.is_known_zero() {\n                // If the constant is not zero, we divide by the constant.\n                if self.constant.is_known_nonzero() {\n                    self.constant.clone()\n                } else {\n                    T::one()\n                }\n            } else if !self.linear.is_empty() {\n                // Otherwise, we divide by the coefficient of the smallest variable.\n                self.linear.iter().next().unwrap().1.clone()\n            } else {\n                // This is a sum of quadratic expressions, we cannot really normalize this part.\n                T::one()\n            };\n            vec![self.clone() * T::one().field_div(&divide_by)]\n        }\n    }\n\n    /// Splits this expression into head and tail, i.e., `self = head + tail`\n    /// head is the first summand, i.e., either the first quadratic term or the first linear term.\n    pub fn try_split_head_tail(mut self) -> Option<(Self, Self)> {\n        if !self.quadratic.is_empty() {\n            let mut quadratic = self.quadratic.into_iter();\n            let (hl, hr) = quadratic.next().unwrap();\n            self.quadratic = quadratic.collect();\n            Some(((hl * hr), self))\n        } else if !self.linear.is_empty() {\n            let (hv, hc) = self.linear.pop_first()?;\n            Some((GroupedExpressionComponent::Linear(hv, hc).into(), self))\n        } else {\n            None\n        }\n    }\n\n    /// Returns the linear components of this expression, i.e. summands that we were\n    /// able to determine to be only a runtime constant times a single variable.\n    /// If `is_affine()` returns true, this returns all summands except the constant offset.\n    /// Otherwise, the variables returned here might also appear inside the higher order terms\n    /// and this the dependency on these variables might be more complicated than just a\n    /// runtime constant factor.\n    pub fn linear_components(\n        &self,\n    ) -> impl DoubleEndedIterator<Item = (&V, &T)> + ExactSizeIterator<Item = (&V, &T)> + Clone\n    {\n        self.linear.iter()\n    }\n\n    /// Returns the constant offset in this expression.\n    pub fn constant_offset(&self) -> &T {\n        &self.constant\n    }\n\n    /// Returns a slice of the quadratic components of this expression.\n    pub fn quadratic_components(&self) -> &[(Self, Self)] {\n        &self.quadratic\n    }\n\n    /// Turns this expression into an iterator over its summands.\n    pub fn into_summands(self) -> impl Iterator<Item = GroupedExpressionComponent<T, V>> {\n        self.quadratic\n            .into_iter()\n            .map(|(l, r)| GroupedExpressionComponent::Quadratic(l, r))\n            .chain(\n                self.linear\n                    .into_iter()\n                    .map(|(v, c)| GroupedExpressionComponent::Linear(v, c)),\n            )\n            .chain(\n                (!self.constant.is_zero())\n                    .then_some(GroupedExpressionComponent::Constant(self.constant)),\n            )\n    }\n\n    /// Computes the degree of a GroupedExpression in the unknown variables.\n    /// Note that it might overestimate the degree if the expression contains\n    /// terms that cancel each other out, e.g. `a * (b + 1) - a * b - a`.\n    /// Variables inside runtime constants are ignored.\n    pub fn degree(&self) -> usize {\n        self.quadratic\n            .iter()\n            .map(|(l, r)| l.degree() + r.degree())\n            .chain((!self.linear.is_empty()).then_some(1))\n            .max()\n            .unwrap_or(0)\n    }\n\n    /// Computes the degree of a variable in this expression.\n    /// Variables inside runtime constants are ignored.\n    pub fn degree_of_variable(&self, var: &V) -> usize {\n        let linear_degree = if self.linear.contains_key(var) { 1 } else { 0 };\n        self.quadratic\n            .iter()\n            .map(|(l, r)| l.degree_of_variable(var) + r.degree_of_variable(var))\n            .chain(once(linear_degree))\n            .max()\n            .unwrap()\n    }\n\n    /// Returns the coefficient of the variable `variable` in the affine part of this\n    /// expression.\n    /// If the expression is affine, this is the actual coefficient of the variable\n    /// in the expression. Otherwise, the quadratic part of the expression could\n    /// also contain the variable and thus the actual coefficient might be different\n    /// (even zero).\n    pub fn coefficient_of_variable_in_affine_part<'a>(&'a self, var: &V) -> Option<&'a T> {\n        self.linear.get(var)\n    }\n\n    /// If `self` contains `var` exactly once in an affine way,\n    /// returns `Some((coeff, rest))` where `self = coeff * var + rest`.\n    ///\n    /// This is relatively expensive because it needs to construct a new\n    /// GroupedExpression.\n    pub fn try_extract_affine_var(&self, var: V) -> Option<(T, Self)> {\n        if self\n            .referenced_unknown_variables()\n            .filter(|v| *v == &var)\n            .count()\n            != 1\n        {\n            return None;\n        }\n        let coeff = self.linear.get(&var)?.clone();\n        let mut rest = self.clone();\n        rest.linear.remove(&var);\n        Some((coeff, rest))\n    }\n\n    /// Returns the range constraint of the full expression.\n    pub fn range_constraint(\n        &self,\n        range_constraints: &impl RangeConstraintProvider<T::FieldType, V>,\n    ) -> RangeConstraint<T::FieldType> {\n        self.quadratic\n            .iter()\n            .map(|(l, r)| {\n                if l == r {\n                    l.range_constraint(range_constraints).square()\n                } else {\n                    l.range_constraint(range_constraints)\n                        .combine_product(&r.range_constraint(range_constraints))\n                }\n            })\n            .chain(self.linear.iter().map(|(var, coeff)| {\n                range_constraints\n                    .get(var)\n                    .combine_product(&coeff.range_constraint())\n            }))\n            .chain(std::iter::once(self.constant.range_constraint()))\n            .reduce(|rc1, rc2| rc1.combine_sum(&rc2))\n            .unwrap_or_else(|| RangeConstraint::from_value(0.into()))\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq> GroupedExpression<T, V> {\n    pub fn substitute_simple(&mut self, variable: &V, substitution: T) {\n        if self.linear.contains_key(variable) {\n            let coeff = self.linear.remove(variable).unwrap();\n            self.constant += coeff * substitution;\n        }\n\n        let mut to_add = GroupedExpression::zero();\n        self.quadratic.retain_mut(|(l, r)| {\n            l.substitute_simple(variable, substitution);\n            r.substitute_simple(variable, substitution);\n            match (l.try_to_known(), r.try_to_known()) {\n                (Some(l), Some(r)) => {\n                    self.constant += *l * *r;\n                    false\n                }\n                (Some(l), None) => {\n                    if !l.is_zero() {\n                        to_add += r.clone() * l;\n                    }\n                    false\n                }\n                (None, Some(r)) => {\n                    if !r.is_zero() {\n                        to_add += l.clone() * r;\n                    }\n                    false\n                }\n                _ => true,\n            }\n        });\n        // remove_quadratic_terms_adding_to_zero(&mut self.quadratic);\n\n        if !to_add.is_zero() {\n            *self += to_add;\n        }\n    }\n}\nimpl<T: RuntimeConstant + Substitutable<V>, V: Ord + Clone + Eq> GroupedExpression<T, V> {\n    /// Substitute a variable by a symbolically known expression. The variable can be known or unknown.\n    /// If it was already known, it will be substituted in the known expressions.\n    pub fn substitute_by_known(&mut self, variable: &V, substitution: &T) {\n        self.constant.substitute(variable, substitution);\n\n        if self.linear.contains_key(variable) {\n            // If the variable is a key in `linear`, it must be unknown\n            // and thus can only occur there. Otherwise, it can be in\n            // any symbolic expression.\n            // We replace the variable by a symbolic expression, so it goes into the constant part.\n            let coeff = self.linear.remove(variable).unwrap();\n            self.constant += coeff * substitution.clone();\n        } else {\n            for coeff in self.linear.values_mut() {\n                coeff.substitute(variable, substitution);\n            }\n            self.linear.retain(|_, f| !f.is_known_zero());\n        }\n\n        // TODO can we do that without moving everything?\n        // In the end, the order does not matter much.\n\n        let mut to_add = GroupedExpression::zero();\n        self.quadratic.retain_mut(|(l, r)| {\n            l.substitute_by_known(variable, substitution);\n            r.substitute_by_known(variable, substitution);\n            match (l.try_to_known(), r.try_to_known()) {\n                (Some(l), Some(r)) => {\n                    to_add += GroupedExpression::from_runtime_constant(l.clone() * r.clone());\n                    false\n                }\n                (Some(l), None) => {\n                    to_add += r.clone() * l;\n                    false\n                }\n                (None, Some(r)) => {\n                    to_add += l.clone() * r;\n                    false\n                }\n                _ => true,\n            }\n        });\n        remove_quadratic_terms_adding_to_zero(&mut self.quadratic);\n\n        if to_add.try_to_known().map(|ta| ta.is_known_zero()) != Some(true) {\n            *self += to_add;\n        }\n    }\n\n    /// Substitute an unknown variable by a GroupedExpression.\n    ///\n    /// Note this does NOT work properly if the variable is used inside a\n    /// known SymbolicExpression.\n    pub fn substitute_by_unknown(&mut self, variable: &V, substitution: &GroupedExpression<T, V>) {\n        if !self.referenced_unknown_variables().any(|v| v == variable) {\n            return;\n        }\n\n        let mut to_add = GroupedExpression::zero();\n        for (var, coeff) in std::mem::take(&mut self.linear) {\n            if var == *variable {\n                to_add += substitution.clone() * coeff;\n            } else {\n                self.linear.insert(var, coeff);\n            }\n        }\n\n        self.quadratic = std::mem::take(&mut self.quadratic)\n            .into_iter()\n            .filter_map(|(mut l, mut r)| {\n                l.substitute_by_unknown(variable, substitution);\n                r.substitute_by_unknown(variable, substitution);\n                match (l.try_to_known(), r.try_to_known()) {\n                    (Some(lval), Some(rval)) => {\n                        to_add += Self::from_runtime_constant(lval.clone() * rval.clone());\n                        None\n                    }\n                    (Some(lval), None) => {\n                        to_add += r * lval;\n                        None\n                    }\n                    (None, Some(rval)) => {\n                        to_add += l * rval;\n                        None\n                    }\n                    _ => Some((l, r)),\n                }\n            })\n            .collect();\n        remove_quadratic_terms_adding_to_zero(&mut self.quadratic);\n\n        *self += to_add;\n    }\n}\n\nimpl<T, V> GroupedExpression<T, V> {\n    /// Returns the referenced unknown variables. Might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> Box<dyn Iterator<Item = &V> + '_> {\n        let quadratic = self.quadratic.iter().flat_map(|(a, b)| {\n            a.referenced_unknown_variables()\n                .chain(b.referenced_unknown_variables())\n        });\n        Box::new(quadratic.chain(self.linear.keys()))\n    }\n}\n\nimpl<T: RuntimeConstant + VarTransformable<V1, V2>, V1: Ord + Clone, V2: Ord + Clone>\n    VarTransformable<V1, V2> for GroupedExpression<T, V1>\n{\n    type Transformed = GroupedExpression<T::Transformed, V2>;\n\n    fn try_transform_var_type(\n        &self,\n        var_transform: &mut impl FnMut(&V1) -> Option<V2>,\n    ) -> Option<Self::Transformed> {\n        Some(GroupedExpression {\n            quadratic: self\n                .quadratic\n                .iter()\n                .map(|(l, r)| {\n                    Some((\n                        l.try_transform_var_type(var_transform)?,\n                        r.try_transform_var_type(var_transform)?,\n                    ))\n                })\n                .collect::<Option<Vec<_>>>()?,\n            linear: self\n                .linear\n                .iter()\n                .map(|(var, coeff)| {\n                    let new_var = var_transform(var)?;\n                    Some((new_var, coeff.try_transform_var_type(var_transform)?))\n                })\n                .collect::<Option<BTreeMap<_, _>>>()?,\n            constant: self.constant.try_transform_var_type(var_transform)?,\n        })\n    }\n}\n\npub trait RangeConstraintProvider<T: FieldElement, V> {\n    fn get(&self, var: &V) -> RangeConstraint<T>;\n}\n\nimpl<R: RangeConstraintProvider<T, V>, T: FieldElement, V> RangeConstraintProvider<T, V> for &R {\n    fn get(&self, var: &V) -> RangeConstraint<T> {\n        R::get(self, var)\n    }\n}\n\nimpl<T: FieldElement, V: Eq + Hash> RangeConstraintProvider<T, V>\n    for HashMap<V, RangeConstraint<T>>\n{\n    fn get(&self, var: &V) -> RangeConstraint<T> {\n        HashMap::get(self, var).cloned().unwrap_or_default()\n    }\n}\n\n#[derive(Clone, Copy)]\npub struct NoRangeConstraints;\nimpl<T: FieldElement, V> RangeConstraintProvider<T, V> for NoRangeConstraints {\n    fn get(&self, _var: &V) -> RangeConstraint<T> {\n        RangeConstraint::default()\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Add for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn add(mut self, rhs: Self) -> Self {\n        self += rhs;\n        self\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Add for &GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn add(self, rhs: Self) -> Self::Output {\n        self.clone() + rhs.clone()\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> AddAssign<GroupedExpression<T, V>>\n    for GroupedExpression<T, V>\n{\n    fn add_assign(&mut self, rhs: Self) {\n        self.quadratic = combine_removing_zeros(std::mem::take(&mut self.quadratic), rhs.quadratic);\n        for (var, coeff) in rhs.linear {\n            self.linear\n                .entry(var.clone())\n                .and_modify(|f| *f += coeff.clone())\n                .or_insert_with(|| coeff);\n        }\n        self.constant += rhs.constant.clone();\n        self.linear.retain(|_, f| !f.is_known_zero());\n    }\n}\n\n/// Returns the sum of these quadratic terms while removing terms that\n/// cancel each other out.\nfn combine_removing_zeros<E: PartialEq>(first: Vec<(E, E)>, mut second: Vec<(E, E)>) -> Vec<(E, E)>\nwhere\n    for<'a> &'a E: Neg<Output = E>,\n{\n    if first.len() + second.len() > MAX_SUM_SIZE_FOR_QUADRATIC_ANALYSIS {\n        // If there are too many terms, we cannot do this efficiently.\n        return first.into_iter().chain(second).collect();\n    }\n\n    let mut result = first\n        .into_iter()\n        .filter(|first| {\n            // Try to find l1 * r1 inside `second`.\n            if let Some((j, _)) = second\n                .iter()\n                .find_position(|second| quadratic_terms_add_to_zero(first, second))\n            {\n                // We found a match, so they cancel each other out, we remove both.\n                second.remove(j);\n                false\n            } else {\n                true\n            }\n        })\n        .collect_vec();\n    result.extend(second);\n    result\n}\n\n/// Removes pairs of items from `terms` whose products add to zero.\nfn remove_quadratic_terms_adding_to_zero<E: PartialEq>(terms: &mut Vec<(E, E)>)\nwhere\n    for<'a> &'a E: Neg<Output = E>,\n{\n    if terms.len() > MAX_SUM_SIZE_FOR_QUADRATIC_ANALYSIS {\n        // If there are too many terms, we cannot do this efficiently.\n        return;\n    }\n\n    let mut to_remove = HashSet::new();\n    for ((i, first), (j, second)) in terms.iter().enumerate().tuple_combinations() {\n        if to_remove.contains(&i) || to_remove.contains(&j) {\n            // We already removed this term.\n            continue;\n        }\n        if quadratic_terms_add_to_zero(first, second) {\n            // We found a match, so they cancel each other out, we remove both.\n            to_remove.insert(i);\n            to_remove.insert(j);\n        }\n    }\n    if !to_remove.is_empty() {\n        *terms = terms\n            .drain(..)\n            .enumerate()\n            .filter(|(i, _)| !to_remove.contains(i))\n            .map(|(_, term)| term)\n            .collect();\n    }\n}\n\n/// Returns true if `first.0 * first.1 = -second.0 * second.1`,\n/// but does not catch all cases.\nfn quadratic_terms_add_to_zero<E: PartialEq>(first: &(E, E), second: &(E, E)) -> bool\nwhere\n    for<'a> &'a E: Neg<Output = E>,\n{\n    let (s0, s1) = second;\n    // Check if `first.0 * first.1 == -(second.0 * second.1)`, but we can swap left and right\n    // and we can put the negation either left or right.\n    let n1 = (&-s0, s1);\n    let n2 = (s0, &-s1);\n    [n1, n2].contains(&(&first.0, &first.1)) || [n1, n2].contains(&(&first.1, &first.0))\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Sub for &GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn sub(self, rhs: Self) -> Self::Output {\n        self + &-rhs\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Sub for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn sub(self, rhs: Self) -> Self::Output {\n        &self - &rhs\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord> GroupedExpression<T, V> {\n    fn negate(&mut self) {\n        for (first, _) in &mut self.quadratic {\n            first.negate()\n        }\n        for coeff in self.linear.values_mut() {\n            *coeff = -coeff.clone();\n        }\n        self.constant = -self.constant.clone();\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord> Neg for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn neg(mut self) -> Self {\n        self.negate();\n        self\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord> Neg for &GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn neg(self) -> Self::Output {\n        -((*self).clone())\n    }\n}\n\n/// Multiply by known symbolic expression.\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Mul<&T> for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn mul(mut self, rhs: &T) -> Self {\n        self *= rhs;\n        self\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Mul<T> for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn mul(self, rhs: T) -> Self {\n        self * &rhs\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> MulAssign<&T> for GroupedExpression<T, V> {\n    fn mul_assign(&mut self, rhs: &T) {\n        if rhs.is_known_zero() {\n            *self = Self::zero();\n        } else {\n            for (first, _) in &mut self.quadratic {\n                *first *= rhs;\n            }\n            for coeff in self.linear.values_mut() {\n                *coeff *= rhs.clone();\n            }\n            self.constant *= rhs.clone();\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Sum for GroupedExpression<T, V> {\n    fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {\n        iter.fold(Self::zero(), |mut acc, item| {\n            acc += item;\n            acc\n        })\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Ord + Eq> Mul for GroupedExpression<T, V> {\n    type Output = GroupedExpression<T, V>;\n\n    fn mul(self, rhs: GroupedExpression<T, V>) -> Self {\n        if let Some(k) = rhs.try_to_known() {\n            self * k\n        } else if let Some(k) = self.try_to_known() {\n            rhs * k\n        } else {\n            Self {\n                quadratic: vec![(self, rhs)],\n                linear: Default::default(),\n                constant: T::zero(),\n            }\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Display, V: Clone + Ord + Display> Display for GroupedExpression<T, V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        let (sign, s) = self.to_signed_string();\n        if sign {\n            write!(f, \"-({s})\")\n        } else {\n            write!(f, \"{s}\")\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Display, V: Clone + Ord + Display> GroupedExpression<T, V> {\n    fn to_signed_string(&self) -> (bool, String) {\n        self.quadratic\n            .iter()\n            .map(|(a, b)| {\n                let (a_sign, a) = a.to_signed_string();\n                let (b_sign, b) = b.to_signed_string();\n                (a_sign ^ b_sign, format!(\"({a}) * ({b})\"))\n            })\n            .chain(\n                self.linear\n                    .iter()\n                    .map(|(var, coeff)| match coeff.try_to_number() {\n                        Some(k) if k == T::FieldType::one() => (false, format!(\"{var}\")),\n                        Some(k) if k == -T::FieldType::one() => (true, format!(\"{var}\")),\n                        _ => {\n                            let (sign, coeff) = Self::symbolic_expression_to_signed_string(coeff);\n                            (sign, format!(\"{coeff} * {var}\"))\n                        }\n                    }),\n            )\n            .chain(match self.constant.try_to_number() {\n                Some(k) if k == T::FieldType::zero() => None,\n                _ => Some(Self::symbolic_expression_to_signed_string(&self.constant)),\n            })\n            .reduce(|(n1, p1), (n2, p2)| {\n                (\n                    n1,\n                    if n1 == n2 {\n                        format!(\"{p1} + {p2}\")\n                    } else {\n                        format!(\"{p1} - {p2}\")\n                    },\n                )\n            })\n            .unwrap_or((false, \"0\".to_string()))\n    }\n\n    fn symbolic_expression_to_signed_string(value: &T) -> (bool, String) {\n        match value.try_to_number() {\n            Some(k) => {\n                if k.is_in_lower_half() {\n                    (false, format!(\"{k}\"))\n                } else {\n                    (true, format!(\"{}\", -k))\n                }\n            }\n            _ => (false, value.to_string()),\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Serialize, V: Ord + Clone + Eq + Serialize> Serialize\n    for GroupedExpression<T, V>\n{\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        let summands = self.clone().into_summands().collect::<Vec<_>>();\n        if summands.is_empty() {\n            T::zero().serialize(serializer)\n        } else {\n            SumSerializer::new(&summands).serialize(serializer)\n        }\n    }\n}\n\n/// Serializes [1, 2, 3] into ((1, \"+\", 2), \"+\", 3),\nstruct SumSerializer<'a, I> {\n    items: &'a [I],\n}\n\nimpl<'a, I> SumSerializer<'a, I> {\n    pub fn new(items: &'a [I]) -> Self {\n        assert!(!items.is_empty());\n        Self { items }\n    }\n}\n\nimpl<'a, I: Serialize> Serialize for SumSerializer<'a, I> {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        let (last, beginning) = self.items.split_last().unwrap();\n        if beginning.is_empty() {\n            last.serialize(serializer)\n        } else {\n            (&SumSerializer { items: beginning }, \"+\", last).serialize(serializer)\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Serialize, V: Ord + Clone + Eq + Serialize> Serialize\n    for GroupedExpressionComponent<T, V>\n{\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n        match self {\n            GroupedExpressionComponent::Quadratic(l, r) => (l, \"*\", r).serialize(serializer),\n            GroupedExpressionComponent::Linear(v, c) => {\n                if c.is_one() {\n                    v.serialize(serializer)\n                } else {\n                    (c, \"*\", v).serialize(serializer)\n                }\n            }\n            GroupedExpressionComponent::Constant(c) => c.serialize(serializer),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use std::collections::HashMap;\n\n    use crate::{\n        symbolic_expression::SymbolicExpression,\n        test_utils::{constant, var},\n    };\n\n    use super::*;\n    use expect_test::expect;\n    use powdr_number::GoldilocksField;\n\n    use pretty_assertions::assert_eq;\n\n    type Qse = GroupedExpression<SymbolicExpression<GoldilocksField, &'static str>, &'static str>;\n\n    #[test]\n    fn test_mul() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let t = x * y + a;\n        assert_eq!(t.to_string(), \"(X) * (Y) + A\");\n    }\n\n    #[test]\n    fn test_add() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_unknown_variable(\"A\");\n        let b = Qse::from_known_symbol(\"B\", RangeConstraint::default());\n        let t: Qse = x * y - a + b;\n        assert_eq!(t.to_string(), \"(X) * (Y) - A + B\");\n        assert_eq!(\n            (t.clone() + t).to_string(),\n            \"(X) * (Y) + (X) * (Y) - 2 * A + (B + B)\"\n        );\n    }\n\n    #[test]\n    fn test_mul_by_known() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let b = Qse::from_known_symbol(\"B\", RangeConstraint::default());\n        let t: Qse = (x * y + a) * b;\n        assert_eq!(t.to_string(), \"(B * X) * (Y) + (A * B)\");\n    }\n\n    #[test]\n    fn test_mul_by_zero() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let zero = Qse::zero();\n        let t: Qse = x * y + a;\n        assert_eq!(t.to_string(), \"(X) * (Y) + A\");\n        assert_eq!((t.clone() * zero).to_string(), \"0\");\n    }\n\n    #[test]\n    fn test_apply_update() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let b = Qse::from_known_symbol(\"B\", RangeConstraint::default());\n        let mut t: Qse = (x * y + a) * b;\n        assert_eq!(t.to_string(), \"(B * X) * (Y) + (A * B)\");\n        t.substitute_by_known(\n            &\"B\",\n            &SymbolicExpression::from_symbol(\"B\", RangeConstraint::from_value(7.into())),\n        );\n        assert!(t.is_quadratic());\n        assert_eq!(t.to_string(), \"(7 * X) * (Y) + (A * 7)\");\n        t.substitute_by_known(\n            &\"X\",\n            &SymbolicExpression::from_symbol(\"X\", RangeConstraint::from_range(1.into(), 2.into())),\n        );\n        assert!(!t.is_quadratic());\n        assert_eq!(t.to_string(), \"(7 * X) * Y + (A * 7)\");\n        t.substitute_by_known(\n            &\"Y\",\n            &SymbolicExpression::from_symbol(\"Y\", RangeConstraint::from_value(3.into())),\n        );\n        assert!(t.try_to_known().is_some());\n        assert_eq!(t.to_string(), \"((A * 7) + ((7 * X) * 3))\");\n    }\n\n    #[test]\n    fn test_apply_update_inner_zero() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let b = Qse::from_known_symbol(\"B\", RangeConstraint::default());\n        let mut t: Qse = (x * a + y) * b;\n        assert_eq!(t.to_string(), \"(A * B) * X + B * Y\");\n        t.substitute_by_known(\n            &\"B\",\n            &SymbolicExpression::from_symbol(\"B\", RangeConstraint::from_value(7.into())),\n        );\n        assert_eq!(t.to_string(), \"(A * 7) * X + 7 * Y\");\n        t.substitute_by_known(\n            &\"A\",\n            &SymbolicExpression::from_symbol(\"A\", RangeConstraint::from_value(0.into())),\n        );\n        assert_eq!(t.to_string(), \"7 * Y\");\n    }\n\n    #[test]\n    fn substitute_known() {\n        let x = Qse::from_unknown_variable(\"X\");\n        let y = Qse::from_unknown_variable(\"Y\");\n        let a = Qse::from_known_symbol(\"A\", RangeConstraint::default());\n        let b = Qse::from_known_symbol(\"B\", RangeConstraint::default());\n        let mut t: Qse = (x * a + y) * b.clone() + b;\n        assert_eq!(t.to_string(), \"(A * B) * X + B * Y + B\");\n        // We substitute B by an expression containing B on purpose.\n        t.substitute_by_known(\n            &\"B\",\n            &(SymbolicExpression::from_symbol(\"B\", Default::default())\n                + SymbolicExpression::from(GoldilocksField::from(1))),\n        );\n        assert_eq!(t.to_string(), \"(A * (B + 1)) * X + (B + 1) * Y + (B + 1)\");\n        t.substitute_by_known(\n            &\"B\",\n            &SymbolicExpression::from_symbol(\"B\", RangeConstraint::from_value(10.into())),\n        );\n        assert_eq!(t.to_string(), \"(A * 11) * X + 11 * Y + 11\");\n    }\n\n    #[test]\n    fn test_substitute_by_unknown_basic_replacement() {\n        let mut expr = var(\"a\");\n        let subst = var(\"x\");\n\n        expr.substitute_by_unknown(&\"a\", &subst);\n        assert_eq!(expr.to_string(), \"x\");\n    }\n\n    #[test]\n    fn test_substitute_by_unknown_linear_to_quadratic() {\n        let mut expr = var(\"x\");\n        let subst = var(\"y\") * var(\"z\") + constant(3);\n        expr.substitute_by_unknown(&\"x\", &subst);\n\n        assert!(expr.is_quadratic());\n        assert_eq!(expr.to_string(), \"(y) * (z) + 3\");\n    }\n\n    #[test]\n    fn test_substitute_by_unknown_inside_quadratic() {\n        let mut expr = var(\"x\") * var(\"y\");\n        let subst = var(\"a\") + constant(1);\n\n        expr.substitute_by_unknown(&\"x\", &subst);\n        assert!(expr.is_quadratic());\n        assert_eq!(expr.to_string(), \"(a + 1) * (y)\");\n    }\n\n    #[test]\n    fn test_substitute_by_unknown_linear() {\n        let mut expr = var(\"x\") + var(\"y\");\n        let subst = var(\"a\") + var(\"b\");\n\n        expr.substitute_by_unknown(&\"x\", &subst);\n        assert!(!expr.is_quadratic());\n        assert_eq!(expr.linear_components().count(), 3);\n        assert_eq!(expr.to_string(), \"a + b + y\");\n    }\n\n    #[test]\n    fn test_complex_expression_multiple_substitution() {\n        let mut expr = (var(\"x\") * var(\"w\")) + var(\"x\") + constant(3) * var(\"y\") + constant(5);\n        assert_eq!(expr.to_string(), \"(x) * (w) + x + 3 * y + 5\");\n\n        let subst = var(\"a\") * var(\"b\") + constant(1);\n\n        expr.substitute_by_unknown(&\"x\", &subst);\n\n        assert_eq!(\n            expr.to_string(),\n            \"((a) * (b) + 1) * (w) + (a) * (b) + 3 * y + 6\"\n        );\n\n        // Structural validation\n        let [first_quadratic, second_quadratic] = expr\n            .quadratic_components()\n            .iter()\n            .cloned()\n            .collect_vec()\n            .try_into()\n            .unwrap();\n\n        assert_eq!(first_quadratic.0.to_string(), \"(a) * (b) + 1\");\n        let inner_quadratic = first_quadratic.0.quadratic_components();\n        assert_eq!(inner_quadratic[0].0.to_string(), \"a\");\n        assert_eq!(inner_quadratic[0].1.to_string(), \"b\");\n        assert!(first_quadratic.0.linear_components().count() == 0);\n        assert_eq!(\n            first_quadratic.0.constant_offset().try_to_number(),\n            Some(GoldilocksField::from(1)),\n        );\n        assert_eq!(first_quadratic.1.to_string(), \"w\");\n\n        assert_eq!(second_quadratic.0.to_string(), \"a\");\n        assert_eq!(second_quadratic.1.to_string(), \"b\");\n\n        let [linear] = expr.linear_components().collect_vec().try_into().unwrap();\n        assert_eq!(linear.0.to_string(), \"y\");\n        assert_eq!(\n            expr.constant_offset().try_to_number(),\n            Some(GoldilocksField::from(6)),\n        );\n    }\n\n    #[test]\n    fn test_substitute_by_unknown_coeff_distribution() {\n        let mut expr = constant(2) * var(\"a\") + constant(7);\n        assert_eq!(expr.to_string(), \"2 * a + 7\");\n\n        let subst = var(\"x\") * var(\"y\");\n\n        expr.substitute_by_unknown(&\"a\", &subst);\n\n        assert_eq!(expr.to_string(), \"(2 * x) * (y) + 7\");\n\n        let quadratic = expr.quadratic_components();\n        assert_eq!(quadratic.len(), 1);\n        assert_eq!(quadratic[0].0.to_string(), \"2 * x\");\n        assert_eq!(quadratic[0].1.to_string(), \"y\");\n        assert!(expr.linear_components().next().is_none());\n        assert_eq!(\n            expr.constant_offset().try_to_number(),\n            Some(GoldilocksField::from(7))\n        );\n    }\n\n    #[test]\n    fn combine_removing_zeros() {\n        let a = var(\"x\") * var(\"y\") + var(\"z\") * constant(3);\n        let b = var(\"t\") * var(\"u\") + constant(5) + var(\"y\") * var(\"x\");\n        assert_eq!(\n            (a.clone() - b.clone()).to_string(),\n            \"-((t) * (u) - 3 * z + 5)\"\n        );\n        assert_eq!((b - a).to_string(), \"(t) * (u) - 3 * z + 5\");\n    }\n\n    #[test]\n    fn remove_quadratic_zeros_after_substitution() {\n        let a = var(\"x\") * var(\"r\") + var(\"z\") * constant(3);\n        let b = var(\"t\") * var(\"u\") + constant(5) + var(\"y\") * var(\"x\");\n        let mut t = b - a;\n        // Cannot simplify yet, because the terms are different\n        assert_eq!(\n            t.to_string(),\n            \"(t) * (u) + (y) * (x) - (x) * (r) - 3 * z + 5\"\n        );\n        t.substitute_by_unknown(&\"r\", &var(\"y\"));\n        // Now the first term in `a` is equal to the last in `b`.\n        assert_eq!(t.to_string(), \"(t) * (u) - 3 * z + 5\");\n    }\n\n    #[test]\n    fn to_factors() {\n        let expr = (constant(3) * var(\"x\"))\n            * -var(\"y\")\n            * constant(3)\n            * (constant(5) * var(\"z\") + constant(5))\n            * (constant(2) * var(\"t\") + constant(4) * var(\"z\"))\n            * (var(\"t\") * constant(2));\n        assert_eq!(\n            expr.to_string(),\n            \"-(((((9 * x) * (y)) * (5 * z + 5)) * (2 * t + 4 * z)) * (2 * t))\"\n        );\n        let factors = expr.to_factors().into_iter().format(\", \").to_string();\n        assert_eq!(factors, \"x, y, z + 1, t + 2 * z, t\");\n    }\n\n    #[test]\n    fn rc_of_square() {\n        let expr = (var(\"x\") * var(\"x\")) + constant(3);\n        let rc1 = HashMap::from([(\"x\", RangeConstraint::from_range(1.into(), 2.into()))]);\n        expect!(\"[4, 7] & 0x7\").assert_eq(&expr.range_constraint(&rc1).to_string());\n        let rc2 = HashMap::from([(\n            \"x\",\n            RangeConstraint::from_range(-GoldilocksField::from(5), 3.into()),\n        )]);\n        expect!(\"[3, 28] & 0x1f\").assert_eq(&expr.range_constraint(&rc2).to_string());\n        let rc3 = HashMap::from([(\n            \"x\",\n            RangeConstraint::from_range(-GoldilocksField::from(3), 5.into()),\n        )]);\n        expect!(\"[3, 28] & 0x1f\").assert_eq(&expr.range_constraint(&rc3).to_string());\n    }\n\n    #[test]\n    fn serialize_sum() {\n        let expr = [1, 2, 3];\n        let serialized = serde_json::to_string(&SumSerializer::new(&expr)).unwrap();\n        expect!(r#\"[[1,\"+\",2],\"+\",3]\"#).assert_eq(&serialized);\n\n        let expr = [1];\n        let serialized = serde_json::to_string(&SumSerializer::new(&expr)).unwrap();\n        expect!(\"1\").assert_eq(&serialized);\n    }\n\n    #[test]\n    fn serialize_grouped_expression() {\n        let x: GroupedExpression<GoldilocksField, &str> =\n            GroupedExpression::from_unknown_variable(\"X\");\n        let four = GroupedExpression::from_runtime_constant(GoldilocksField::from(4));\n        let expr = four.clone() * (x.clone() * x.clone()) + four.clone() * x.clone() + four;\n        let serialized = serde_json::to_string(&expr).unwrap();\n        expect!([r#\"[[[[4,\"*\",\"X\"],\"*\",\"X\"],\"+\",[4,\"*\",\"X\"]],\"+\",4]\"#]).assert_eq(&serialized);\n    }\n\n    #[test]\n    fn serialize_zero() {\n        let expr: GroupedExpression<GoldilocksField, &str> = GroupedExpression::zero();\n        let serialized = serde_json::to_string(&expr).unwrap();\n        expect!(\"0\").assert_eq(&serialized);\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/indexed_constraint_system.rs",
    "content": "use std::{\n    cmp,\n    collections::{BTreeSet, HashMap, VecDeque},\n    fmt::Display,\n    hash::Hash,\n};\n\nuse bitvec::vec::BitVec;\nuse derivative::Derivative;\nuse itertools::Itertools;\n\nuse crate::{\n    constraint_system::{\n        AlgebraicConstraint, BusInteraction, ConstraintRef, ConstraintSystem, DerivedVariable,\n    },\n    grouped_expression::GroupedExpression,\n    runtime_constant::{RuntimeConstant, Substitutable},\n};\n\n/// Applies multiple substitutions to a ConstraintSystem in an efficient manner.\npub fn apply_substitutions<T: RuntimeConstant + Substitutable<V>, V: Hash + Eq + Clone + Ord>(\n    constraint_system: ConstraintSystem<T, V>,\n    substitutions: impl IntoIterator<Item = (V, GroupedExpression<T, V>)>,\n) -> ConstraintSystem<T, V> {\n    let mut indexed_constraint_system = IndexedConstraintSystem::from(constraint_system);\n    indexed_constraint_system.apply_substitutions(substitutions);\n    indexed_constraint_system.into()\n}\n\n/// Applies multiple substitutions to all expressions in a sequence of expressions.\npub fn apply_substitutions_to_expressions<\n    T: RuntimeConstant + Substitutable<V>,\n    V: Hash + Eq + Clone + Ord,\n>(\n    expressions: impl IntoIterator<Item = GroupedExpression<T, V>>,\n    substitutions: impl IntoIterator<Item = (V, GroupedExpression<T, V>)>,\n) -> Vec<GroupedExpression<T, V>> {\n    apply_substitutions(\n        ConstraintSystem {\n            algebraic_constraints: expressions\n                .into_iter()\n                .map(AlgebraicConstraint::assert_zero)\n                .collect(),\n            bus_interactions: Vec::new(),\n            derived_variables: Vec::new(),\n        },\n        substitutions,\n    )\n    .algebraic_constraints\n    .into_iter()\n    .map(|constraint| constraint.expression)\n    .collect()\n}\n\n/// Structure on top of a [`ConstraintSystem`] that stores indices\n/// to more efficiently update the constraints.\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"), Clone)]\npub struct IndexedConstraintSystem<T, V> {\n    /// The constraint system.\n    constraint_system: ConstraintSystem<T, V>,\n    /// Stores where each unknown variable appears.\n    variable_occurrences: HashMap<V, BTreeSet<ConstraintSystemItem>>,\n}\n\n/// Structure on top of [`IndexedConstraintSystem`] that\n/// tracks changes to variables and how they may affect constraints.\n///\n/// In particular, the assumption is that items in the constraint system\n/// need to be \"handled\". Initially, all items need to be \"handled\"\n/// and are put in a queue. Handling an item can cause an update to a variable,\n/// which causes all constraints referencing that variable to be put back into the\n/// queue.\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"), Clone)]\npub struct IndexedConstraintSystemWithQueue<T, V> {\n    constraint_system: IndexedConstraintSystem<T, V>,\n    queue: ConstraintSystemQueue,\n}\n\n/// A reference to an item in the constraint system, based on the index.\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]\nenum ConstraintSystemItem {\n    /// A reference to an algebraic constraint.\n    AlgebraicConstraint(usize),\n    /// A reference to a bus interaction.\n    BusInteraction(usize),\n    /// A reference to a derived variable. This is only used internally to the\n    /// IndexedConstraintSystem.\n    DerivedVariable(usize),\n}\n\nimpl ConstraintSystemItem {\n    /// Returns an index that is unique across both algebraic constraints and bus interactions.\n    /// Panics for derived variables.\n    fn flat_constraint_id(&self) -> usize {\n        match self {\n            ConstraintSystemItem::AlgebraicConstraint(i) => 2 * i,\n            ConstraintSystemItem::BusInteraction(i) => 2 * i + 1,\n            ConstraintSystemItem::DerivedVariable(_) => panic!(),\n        }\n    }\n\n    /// Returns the index of the item. Note that the indices are not disjoint between different kinds\n    /// of items.\n    fn index(&self) -> usize {\n        match self {\n            ConstraintSystemItem::AlgebraicConstraint(index)\n            | ConstraintSystemItem::BusInteraction(index)\n            | ConstraintSystemItem::DerivedVariable(index) => *index,\n        }\n    }\n\n    /// Returns true if this constraint system item is a derived variable instead of an actual constraint.\n    fn is_derived_variable(&self) -> bool {\n        matches!(self, ConstraintSystemItem::DerivedVariable(_))\n    }\n\n    /// Turns this indexed-based item into a reference to the actual constraint.\n    /// Fails for derived variables.\n    fn try_to_constraint_ref<'a, T, V>(\n        self,\n        constraint_system: &'a ConstraintSystem<T, V>,\n    ) -> Option<ConstraintRef<'a, T, V>> {\n        match self {\n            ConstraintSystemItem::AlgebraicConstraint(i) => {\n                Some(ConstraintRef::AlgebraicConstraint(\n                    constraint_system.algebraic_constraints[i].as_ref(),\n                ))\n            }\n            ConstraintSystemItem::BusInteraction(i) => Some(ConstraintRef::BusInteraction(\n                &constraint_system.bus_interactions[i],\n            )),\n            ConstraintSystemItem::DerivedVariable(_) => None,\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Hash + Eq + Clone + Ord> From<ConstraintSystem<T, V>>\n    for IndexedConstraintSystem<T, V>\n{\n    fn from(constraint_system: ConstraintSystem<T, V>) -> Self {\n        let variable_occurrences = variable_occurrences(&constraint_system);\n        IndexedConstraintSystem {\n            constraint_system,\n            variable_occurrences,\n        }\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Eq> From<IndexedConstraintSystem<T, V>>\n    for ConstraintSystem<T, V>\n{\n    fn from(indexed_constraint_system: IndexedConstraintSystem<T, V>) -> Self {\n        indexed_constraint_system.constraint_system\n    }\n}\n\nimpl<T: RuntimeConstant, V: Clone + Eq> IndexedConstraintSystem<T, V> {\n    pub fn system(&self) -> &ConstraintSystem<T, V> {\n        &self.constraint_system\n    }\n\n    pub fn algebraic_constraints(&self) -> &[AlgebraicConstraint<GroupedExpression<T, V>>] {\n        &self.constraint_system.algebraic_constraints\n    }\n\n    pub fn bus_interactions(&self) -> &[BusInteraction<GroupedExpression<T, V>>] {\n        &self.constraint_system.bus_interactions\n    }\n\n    /// Returns all (unknown) variables in the system. Might contain variables\n    /// that do not appear in the system any more (because the constraints were deleted).\n    /// Does not contain repetitions and is very efficient but returns the variables in a\n    /// non-deterministic order.\n    pub fn variables(&self) -> impl Iterator<Item = &V> {\n        self.variable_occurrences.keys()\n    }\n\n    /// Returns all (unknown) variables that occur in the system in a deterministic order\n    /// but might contain repetitions.\n    pub fn referenced_unknown_variables(&self) -> impl Iterator<Item = &V> {\n        self.constraint_system.referenced_unknown_variables()\n    }\n\n    /// Removes all constraints that do not fulfill the predicate.\n    pub fn retain_algebraic_constraints(\n        &mut self,\n        mut f: impl FnMut(&AlgebraicConstraint<GroupedExpression<T, V>>) -> bool,\n    ) {\n        retain(\n            &mut self.constraint_system.algebraic_constraints,\n            &mut self.variable_occurrences,\n            &mut f,\n            ConstraintSystemItem::AlgebraicConstraint,\n        );\n    }\n\n    /// Removes all bus interactions that do not fulfill the predicate.\n    pub fn retain_bus_interactions(\n        &mut self,\n        mut f: impl FnMut(&BusInteraction<GroupedExpression<T, V>>) -> bool,\n    ) {\n        retain(\n            &mut self.constraint_system.bus_interactions,\n            &mut self.variable_occurrences,\n            &mut f,\n            ConstraintSystemItem::BusInteraction,\n        );\n    }\n\n    /// Removes all derived variables that do not fulfill the predicate.\n    pub fn retain_derived_variables(\n        &mut self,\n        mut f: impl FnMut(&DerivedVariable<T, V, GroupedExpression<T, V>>) -> bool,\n    ) {\n        retain(\n            &mut self.constraint_system.derived_variables,\n            &mut self.variable_occurrences,\n            &mut f,\n            ConstraintSystemItem::DerivedVariable,\n        );\n    }\n}\n\n/// Behaves like `list.retain(f)` but also updates the variable occurrences\n/// in `occurrences`. Note that `constraint_kind_constructor` is used to\n/// create the `ConstraintSystemItem` for the occurrences, so it should\n/// match the type of the items in `list`.\nfn retain<V, Item>(\n    list: &mut Vec<Item>,\n    occurrences: &mut HashMap<V, BTreeSet<ConstraintSystemItem>>,\n    mut f: impl FnMut(&Item) -> bool,\n    constraint_kind_constructor: impl Fn(usize) -> ConstraintSystemItem + Copy,\n) {\n    let mut counter = 0usize;\n    // `replacement_map[i]` = `Some(j)` if item at index `i` is kept and is now at index `j`\n    let mut replacement_map = vec![];\n    list.retain(|c| {\n        let retain = f(c);\n        if retain {\n            replacement_map.push(Some(counter));\n            counter += 1;\n        } else {\n            replacement_map.push(None);\n        }\n        retain\n    });\n    assert_eq!(counter, list.len());\n    // We call it once on zero just to find out which enum variant it returns,\n    // so we can compare the discriminants below.\n    let discriminant = std::mem::discriminant(&constraint_kind_constructor(0));\n    occurrences.values_mut().for_each(|occurrences| {\n        *occurrences = occurrences\n            .iter()\n            .filter_map(|item| {\n                if std::mem::discriminant(item) == discriminant {\n                    // We have an item of the kind we are modifying, so apply\n                    // the replacement map\n                    replacement_map[item.index()].map(constraint_kind_constructor)\n                } else {\n                    // This is a constraint of the wrong kind, do not modify it.\n                    Some(*item)\n                }\n            })\n            .collect();\n    });\n    occurrences.retain(|_, occurrences| !occurrences.is_empty());\n}\n\nimpl<T: RuntimeConstant, V: Clone + Eq + Hash> IndexedConstraintSystem<T, V> {\n    /// Adds new algebraic constraints to the system.\n    pub fn add_algebraic_constraints(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) {\n        self.extend(ConstraintSystem {\n            algebraic_constraints: constraints.into_iter().collect(),\n            bus_interactions: Vec::new(),\n            derived_variables: Vec::new(),\n        });\n    }\n\n    /// Adds new bus interactions to the system.\n    pub fn add_bus_interactions(\n        &mut self,\n        bus_interactions: impl IntoIterator<Item = BusInteraction<GroupedExpression<T, V>>>,\n    ) {\n        self.extend(ConstraintSystem {\n            algebraic_constraints: Vec::new(),\n            bus_interactions: bus_interactions.into_iter().collect(),\n            derived_variables: Vec::new(),\n        });\n    }\n\n    /// Extends the constraint system by the constraints of another system.\n    pub fn extend(&mut self, system: ConstraintSystem<T, V>) {\n        let algebraic_constraint_count = self.constraint_system.algebraic_constraints.len();\n        let bus_interactions_count = self.constraint_system.bus_interactions.len();\n        let derived_variables_count = self.constraint_system.derived_variables.len();\n        // Compute the occurrences of the variables in the new constraints,\n        // but update their indices.\n        // Iterating over hash map here is fine because we are just extending another hash map.\n        #[allow(clippy::iter_over_hash_type)]\n        for (variable, occurrences) in variable_occurrences(&system) {\n            let occurrences = occurrences.into_iter().map(|item| match item {\n                ConstraintSystemItem::AlgebraicConstraint(i) => {\n                    ConstraintSystemItem::AlgebraicConstraint(i + algebraic_constraint_count)\n                }\n                ConstraintSystemItem::BusInteraction(i) => {\n                    ConstraintSystemItem::BusInteraction(i + bus_interactions_count)\n                }\n                ConstraintSystemItem::DerivedVariable(i) => {\n                    ConstraintSystemItem::DerivedVariable(i + derived_variables_count)\n                }\n            });\n            self.variable_occurrences\n                .entry(variable)\n                .or_default()\n                .extend(occurrences);\n        }\n        self.constraint_system.extend(system)\n    }\n}\n\nimpl<T: RuntimeConstant, V: Hash + Ord + Eq> IndexedConstraintSystem<T, V> {\n    /// Returns a list of all constraints that contain at least one of the given variables.\n    pub fn constraints_referencing_variables<'a>(\n        &'a self,\n        variables: impl IntoIterator<Item = &'a V> + 'a,\n    ) -> impl Iterator<Item = ConstraintRef<'a, T, V>> + 'a {\n        variables\n            .into_iter()\n            .filter_map(|v| self.variable_occurrences.get(v))\n            .flatten()\n            .unique()\n            .flat_map(|&item| item.try_to_constraint_ref(&self.constraint_system))\n    }\n}\n\nimpl<T: RuntimeConstant + Substitutable<V>, V: Clone + Hash + Ord + Eq>\n    IndexedConstraintSystem<T, V>\n{\n    /// Substitutes a variable with a symbolic expression in the whole system\n    pub fn substitute_by_known(&mut self, variable: &V, substitution: &T) {\n        // Since we substitute by a known value, we do not need to update variable_occurrences.\n        for item in self\n            .variable_occurrences\n            .get(variable)\n            .unwrap_or(&BTreeSet::new())\n        {\n            substitute_by_known_in_item(&mut self.constraint_system, *item, variable, substitution);\n        }\n    }\n\n    /// Substitute an unknown variable by a GroupedExpression in the whole system.\n    ///\n    /// Note this does NOT work properly if the variable is used inside a\n    /// known SymbolicExpression.\n    ///\n    /// It does not delete the occurrence of `variable` so that it can be used to check\n    /// which constraints it used to occur in.\n    pub fn substitute_by_unknown(&mut self, variable: &V, substitution: &GroupedExpression<T, V>) {\n        let items = self\n            .variable_occurrences\n            .get(variable)\n            .cloned()\n            .unwrap_or(BTreeSet::new());\n        for item in &items {\n            substitute_by_unknown_in_item(\n                &mut self.constraint_system,\n                *item,\n                variable,\n                substitution,\n            );\n        }\n\n        // We just add all variables in the substitution to the items.\n        // It might be that cancellations occur, but we assume it is not worth the overhead.\n        for var in substitution.referenced_unknown_variables().unique() {\n            self.variable_occurrences\n                .entry(var.clone())\n                .or_default()\n                .extend(items.iter().cloned());\n        }\n    }\n\n    /// Applies multiple substitutions to the constraint system in an efficient manner.\n    pub fn apply_substitutions(\n        &mut self,\n        substitutions: impl IntoIterator<Item = (V, GroupedExpression<T, V>)>,\n    ) {\n        // We do not track substitutions yet, but we could.\n        for (variable, substitution) in substitutions {\n            self.substitute_by_unknown(&variable, &substitution);\n        }\n    }\n}\n\n/// Returns a hash map mapping all unknown variables in the constraint system\n/// to the items they occur in.\nfn variable_occurrences<T: RuntimeConstant, V: Hash + Eq + Clone>(\n    constraint_system: &ConstraintSystem<T, V>,\n) -> HashMap<V, BTreeSet<ConstraintSystemItem>> {\n    let occurrences_in_algebraic_constraints = constraint_system\n        .algebraic_constraints\n        .iter()\n        .enumerate()\n        .flat_map(|(i, constraint)| {\n            constraint\n                .referenced_unknown_variables()\n                .unique()\n                .map(move |v| (v.clone(), ConstraintSystemItem::AlgebraicConstraint(i)))\n        });\n    let occurrences_in_bus_interactions = constraint_system\n        .bus_interactions\n        .iter()\n        .enumerate()\n        .flat_map(|(i, bus_interaction)| {\n            bus_interaction\n                .fields()\n                .flat_map(|c| c.referenced_unknown_variables())\n                .unique()\n                .map(move |v| (v.clone(), ConstraintSystemItem::BusInteraction(i)))\n        });\n    let occurrences_in_derived_variables = constraint_system\n        .derived_variables\n        .iter()\n        .enumerate()\n        // We ignore the derived variable itself because it is not a constraint\n        // and does not matter in substitutions (if we substitute the derived\n        // variable it is deleted in a later step).\n        .flat_map(\n            |(\n                i,\n                DerivedVariable {\n                    computation_method, ..\n                },\n            )| {\n                computation_method\n                    .referenced_unknown_variables()\n                    .unique()\n                    .map(move |v| (v.clone(), ConstraintSystemItem::DerivedVariable(i)))\n            },\n        );\n    occurrences_in_algebraic_constraints\n        .chain(occurrences_in_bus_interactions)\n        .chain(occurrences_in_derived_variables)\n        .into_grouping_map()\n        .collect()\n}\n\nfn substitute_by_known_in_item<T: RuntimeConstant + Substitutable<V>, V: Ord + Clone + Eq>(\n    constraint_system: &mut ConstraintSystem<T, V>,\n    item: ConstraintSystemItem,\n    variable: &V,\n    substitution: &T,\n) {\n    match item {\n        ConstraintSystemItem::AlgebraicConstraint(i) => {\n            constraint_system.algebraic_constraints[i]\n                .expression\n                .substitute_by_known(variable, substitution);\n        }\n        ConstraintSystemItem::BusInteraction(i) => {\n            constraint_system.bus_interactions[i]\n                .fields_mut()\n                .for_each(|expr| expr.substitute_by_known(variable, substitution));\n        }\n        ConstraintSystemItem::DerivedVariable(i) => constraint_system.derived_variables[i]\n            .computation_method\n            .substitute_by_known(variable, substitution),\n    }\n}\n\nfn substitute_by_unknown_in_item<T: RuntimeConstant + Substitutable<V>, V: Ord + Clone + Eq>(\n    constraint_system: &mut ConstraintSystem<T, V>,\n    item: ConstraintSystemItem,\n    variable: &V,\n    substitution: &GroupedExpression<T, V>,\n) {\n    match item {\n        ConstraintSystemItem::AlgebraicConstraint(i) => {\n            constraint_system.algebraic_constraints[i]\n                .expression\n                .substitute_by_unknown(variable, substitution);\n        }\n        ConstraintSystemItem::BusInteraction(i) => {\n            constraint_system.bus_interactions[i]\n                .fields_mut()\n                .for_each(|expr| expr.substitute_by_unknown(variable, substitution));\n        }\n        ConstraintSystemItem::DerivedVariable(i) => constraint_system.derived_variables[i]\n            .computation_method\n            .substitute_by_unknown(variable, substitution),\n    }\n}\n\nimpl<T: RuntimeConstant + Display, V: Clone + Ord + Display + Hash> Display\n    for IndexedConstraintSystem<T, V>\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.constraint_system)\n    }\n}\n\nimpl<T: RuntimeConstant, V: Hash + Eq + Clone + Ord, C: Into<IndexedConstraintSystem<T, V>>> From<C>\n    for IndexedConstraintSystemWithQueue<T, V>\n{\n    fn from(constraint_system: C) -> Self {\n        let constraint_system = constraint_system.into();\n        let queue = ConstraintSystemQueue::new(constraint_system.system());\n        Self {\n            constraint_system,\n            queue,\n        }\n    }\n}\n\nimpl<T, V> IndexedConstraintSystemWithQueue<T, V>\nwhere\n    T: RuntimeConstant + Substitutable<V>,\n    V: Clone + Ord + Hash,\n{\n    /// Returns a reference to the underlying indexed constraint system.\n    pub fn system(&self) -> &IndexedConstraintSystem<T, V> {\n        &self.constraint_system\n    }\n\n    /// Removes the next item from the queue and returns it.\n    pub fn pop_front<'a>(&'a mut self) -> Option<ConstraintRef<'a, T, V>> {\n        self.queue.pop_front().map(|item| {\n            item.try_to_constraint_ref(&self.constraint_system.constraint_system)\n                // Derived variables should never be in the queue.\n                .unwrap()\n        })\n    }\n\n    /// Notifies the system that a variable has been updated and causes all constraints\n    /// referencing that variable to be put back into the queue.\n    ///\n    /// Note that this function does not have to be called if the system is modified directly.\n    pub fn variable_updated(&mut self, variable: &V) {\n        if let Some(items) = self.constraint_system.variable_occurrences.get(variable) {\n            for item in items {\n                if !item.is_derived_variable() {\n                    self.queue.push(*item);\n                }\n            }\n        }\n    }\n\n    /// Substitutes a variable with a known value in the whole system.\n    /// This function also updates the queue accordingly.\n    ///\n    /// It does not delete the occurrence of `variable` so that it can be used to check\n    /// which constraints it used to occur in.\n    pub fn substitute_by_unknown(&mut self, variable: &V, substitution: &GroupedExpression<T, V>) {\n        self.constraint_system\n            .substitute_by_unknown(variable, substitution);\n        self.variable_updated(variable);\n    }\n\n    pub fn add_algebraic_constraints(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) {\n        let initial_len = self\n            .constraint_system\n            .constraint_system\n            .algebraic_constraints\n            .len();\n        self.constraint_system\n            .add_algebraic_constraints(constraints.into_iter().enumerate().map(|(i, c)| {\n                self.queue\n                    .push(ConstraintSystemItem::AlgebraicConstraint(initial_len + i));\n                c\n            }));\n    }\n\n    pub fn add_bus_interactions(\n        &mut self,\n        bus_interactions: impl IntoIterator<Item = BusInteraction<GroupedExpression<T, V>>>,\n    ) {\n        let initial_len = self\n            .constraint_system\n            .constraint_system\n            .bus_interactions\n            .len();\n        self.constraint_system\n            .add_bus_interactions(bus_interactions.into_iter().enumerate().map(|(i, c)| {\n                self.queue\n                    .push(ConstraintSystemItem::BusInteraction(initial_len + i));\n                c\n            }));\n    }\n\n    pub fn retain_algebraic_constraints(\n        &mut self,\n        mut f: impl FnMut(&AlgebraicConstraint<GroupedExpression<T, V>>) -> bool,\n    ) {\n        self.constraint_system.retain_algebraic_constraints(&mut f);\n        if !self.queue.queue.is_empty() {\n            // Removing items will destroy the indices, which is only safe if\n            // the queue is empty. Otherwise, we just put all items back into the queue.\n            self.queue = ConstraintSystemQueue::new(self.constraint_system.system());\n        }\n    }\n\n    pub fn retain_bus_interactions(\n        &mut self,\n        mut f: impl FnMut(&BusInteraction<GroupedExpression<T, V>>) -> bool,\n    ) {\n        self.constraint_system.retain_bus_interactions(&mut f);\n        if !self.queue.queue.is_empty() {\n            // Removing items will destroy the indices, which is only safe if\n            // the queue is empty. Otherwise, we just put all items back into the queue.\n            self.queue = ConstraintSystemQueue::new(self.constraint_system.system());\n        }\n    }\n}\n\nimpl<T: RuntimeConstant + Display, V: Clone + Ord + Display + Hash> Display\n    for IndexedConstraintSystemWithQueue<T, V>\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.constraint_system)\n    }\n}\n\n/// The actual queue used in `IndexedConstraintSystemWithQueue`.\n///\n/// It keeps track that there are no duplicates in the queue by maintaining\n/// a flat bitvector of items in the queue.\n#[derive(Default, Clone)]\nstruct ConstraintSystemQueue {\n    queue: VecDeque<ConstraintSystemItem>,\n    in_queue: BitVec,\n}\n\nimpl ConstraintSystemQueue {\n    fn new<T, V>(constraint_system: &ConstraintSystem<T, V>) -> Self {\n        let num_algebraic = constraint_system.algebraic_constraints.len();\n        let num_bus = constraint_system.bus_interactions.len();\n        let queue = (0..num_algebraic)\n            .map(ConstraintSystemItem::AlgebraicConstraint)\n            .chain((0..num_bus).map(ConstraintSystemItem::BusInteraction))\n            .collect::<Vec<_>>()\n            .into();\n        // The maximum value of `item.flat_id()` is `2 * max(num_algebraic, num_bus) + 1`\n        let mut in_queue = BitVec::repeat(false, 2 * cmp::max(num_algebraic, num_bus) + 2);\n        for item in &queue {\n            let item: &ConstraintSystemItem = item;\n            in_queue.set(item.flat_constraint_id(), true);\n        }\n        Self { queue, in_queue }\n    }\n\n    fn push(&mut self, item: ConstraintSystemItem) {\n        assert!(!item.is_derived_variable());\n        if self.in_queue.len() <= item.flat_constraint_id() {\n            self.in_queue.resize(item.flat_constraint_id() + 1, false);\n        }\n        if !self.in_queue[item.flat_constraint_id()] {\n            self.queue.push_back(item);\n            self.in_queue.set(item.flat_constraint_id(), true);\n        }\n    }\n\n    fn pop_front(&mut self) -> Option<ConstraintSystemItem> {\n        let item = self.queue.pop_front();\n        if let Some(item) = &item {\n            self.in_queue.set(item.flat_constraint_id(), false);\n        }\n        item\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use powdr_number::GoldilocksField;\n\n    use crate::constraint_system::ComputationMethod;\n\n    use super::*;\n\n    fn format_system(s: &IndexedConstraintSystem<GoldilocksField, &'static str>) -> String {\n        format!(\n            \"{}  |  {}\",\n            s.algebraic_constraints().iter().format(\"  |  \"),\n            s.bus_interactions()\n                .iter()\n                .map(\n                    |BusInteraction {\n                         bus_id,\n                         payload,\n                         multiplicity,\n                     }| format!(\n                        \"{bus_id}: {multiplicity} * [{}]\",\n                        payload.iter().format(\", \")\n                    )\n                )\n                .format(\"  |  \")\n        )\n    }\n\n    #[test]\n    fn substitute_by_unknown() {\n        type Ge = GroupedExpression<GoldilocksField, &'static str>;\n        let x = Ge::from_unknown_variable(\"x\");\n        let y = Ge::from_unknown_variable(\"y\");\n        let z = Ge::from_unknown_variable(\"z\");\n        let mut s: IndexedConstraintSystem<_, _> = ConstraintSystem::default()\n            .with_constraints(vec![\n                x.clone() + y.clone(),\n                x.clone() - z.clone(),\n                y.clone() - z.clone(),\n            ])\n            .with_bus_interactions(vec![BusInteraction {\n                bus_id: x,\n                payload: vec![y.clone(), z],\n                multiplicity: y,\n            }])\n            .into();\n\n        s.substitute_by_unknown(&\"x\", &Ge::from_unknown_variable(\"z\"));\n\n        assert_eq!(\n            format_system(&s),\n            \"y + z = 0  |  0 = 0  |  y - z = 0  |  z: y * [y, z]\"\n        );\n\n        s.substitute_by_unknown(\n            &\"z\",\n            &(Ge::from_unknown_variable(\"x\") + Ge::from_number(GoldilocksField::from(7))),\n        );\n\n        assert_eq!(\n            format_system(&s),\n            \"x + y + 7 = 0  |  0 = 0  |  -(x - y + 7) = 0  |  x + 7: y * [y, x + 7]\"\n        );\n    }\n\n    #[test]\n    fn retain_update_index() {\n        type Ge = GroupedExpression<GoldilocksField, &'static str>;\n        let x = Ge::from_unknown_variable(\"x\");\n        let y = Ge::from_unknown_variable(\"y\");\n        let z = Ge::from_unknown_variable(\"z\");\n        let mut s: IndexedConstraintSystem<_, _> = ConstraintSystem::default()\n            .with_constraints(vec![\n                x.clone() + y.clone(),\n                x.clone() - z.clone(),\n                y.clone() - z.clone(),\n            ])\n            .with_bus_interactions(vec![\n                BusInteraction {\n                    bus_id: x.clone(),\n                    payload: vec![y.clone(), z],\n                    multiplicity: y,\n                },\n                BusInteraction {\n                    bus_id: x.clone(),\n                    payload: vec![x.clone(), x.clone()],\n                    multiplicity: x,\n                },\n            ])\n            .into();\n\n        s.retain_algebraic_constraints(|c| !c.referenced_unknown_variables().any(|v| *v == \"y\"));\n        s.retain_bus_interactions(|b| {\n            !b.fields()\n                .any(|e| e.referenced_unknown_variables().any(|v| *v == \"y\"))\n        });\n\n        assert_eq!(s.constraints_referencing_variables(&[\"y\"]).count(), 0);\n        let items_with_x = s\n            .constraints_referencing_variables(&[\"x\"])\n            .map(|c| match c {\n                ConstraintRef::AlgebraicConstraint(expr) => expr.to_string(),\n                ConstraintRef::BusInteraction(bus_interaction) => {\n                    format!(\n                        \"{}: {} * [{}]\",\n                        bus_interaction.bus_id,\n                        bus_interaction.multiplicity,\n                        bus_interaction.payload.iter().format(\", \")\n                    )\n                }\n            })\n            .format(\", \")\n            .to_string();\n        assert_eq!(items_with_x, \"x - z = 0, x: x * [x, x]\");\n\n        let items_with_z = s\n            .constraints_referencing_variables(&[\"z\"])\n            .map(|c| match c {\n                ConstraintRef::AlgebraicConstraint(expr) => expr.to_string(),\n                ConstraintRef::BusInteraction(bus_interaction) => {\n                    format!(\n                        \"{}: {} * [{}]\",\n                        bus_interaction.bus_id,\n                        bus_interaction.multiplicity,\n                        bus_interaction.payload.iter().format(\", \")\n                    )\n                }\n            })\n            .format(\", \")\n            .to_string();\n        assert_eq!(items_with_z, \"x - z = 0\");\n    }\n\n    #[test]\n    fn substitute_in_derived_columns() {\n        let mut system: IndexedConstraintSystem<_, _> = ConstraintSystem::<GoldilocksField, _> {\n            algebraic_constraints: vec![],\n            bus_interactions: vec![],\n            derived_variables: vec![\n                DerivedVariable::new(\n                    \"d1\",\n                    ComputationMethod::QuotientOrZero(\n                        GroupedExpression::from_unknown_variable(\"x1\"),\n                        GroupedExpression::from_unknown_variable(\"x2\"),\n                    ),\n                ),\n                DerivedVariable::new(\n                    \"d2\",\n                    ComputationMethod::QuotientOrZero(\n                        GroupedExpression::from_unknown_variable(\"y1\"),\n                        GroupedExpression::from_unknown_variable(\"y2\"),\n                    ),\n                ),\n            ],\n        }\n        .into();\n        // We first substitute `y2` by an expression that contains `x1` such that when we\n        // substitute `x1` in the next step, `d2` has to be updated again.\n        system.substitute_by_unknown(\n            &\"y2\",\n            &(GroupedExpression::from_unknown_variable(\"x1\")\n                + GroupedExpression::from_number(7.into())),\n        );\n        system.substitute_by_known(&\"x1\", &1.into());\n        assert_eq!(\n            format!(\"{system}\"),\n            \"d1 := QuotientOrZero(1, x2)\\nd2 := QuotientOrZero(y1, 8)\"\n        );\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/inliner.rs",
    "content": "use crate::constraint_system::{AlgebraicConstraint, ConstraintRef};\nuse crate::grouped_expression::GroupedExpression;\nuse crate::indexed_constraint_system::IndexedConstraintSystem;\n\nuse itertools::Itertools;\nuse powdr_number::FieldElement;\nuse serde::{Deserialize, Serialize};\n\nuse std::collections::{BTreeMap, HashSet};\nuse std::fmt::Display;\nuse std::hash::Hash;\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct DegreeBound {\n    pub identities: usize,\n    pub bus_interactions: usize,\n}\n\n/// Reduce variables in the constraint system by inlining them,\n/// if the callback `should_inline` returns true.\n/// Returns the modified system and a list of inlined variables and their expressions.\npub fn replace_constrained_witness_columns<\n    T: FieldElement,\n    V: Ord + Clone + Hash + Eq + Display,\n>(\n    mut constraint_system: IndexedConstraintSystem<T, V>,\n    should_inline: impl Fn(&V, &GroupedExpression<T, V>, &IndexedConstraintSystem<T, V>) -> bool,\n) -> (\n    IndexedConstraintSystem<T, V>,\n    BTreeMap<V, GroupedExpression<T, V>>,\n) {\n    let mut to_remove_idx = HashSet::new();\n    let mut substitutions = BTreeMap::new();\n    let constraint_count = constraint_system.algebraic_constraints().len();\n    loop {\n        let inlined_vars_count = substitutions.len();\n        for curr_idx in (0..constraint_count).rev() {\n            let constraint = &constraint_system.algebraic_constraints()[curr_idx];\n\n            for (var, expr) in find_inlinable_variables(constraint) {\n                if should_inline(&var, &expr, &constraint_system) {\n                    log::trace!(\"Substituting {var} = {expr}\");\n                    log::trace!(\"  (from identity {constraint})\");\n\n                    constraint_system.substitute_by_unknown(&var, &expr);\n                    to_remove_idx.insert(curr_idx);\n                    substitutions.insert(var, expr);\n\n                    break;\n                }\n            }\n        }\n        if substitutions.len() == inlined_vars_count {\n            // No more variables to inline\n            break;\n        }\n    }\n\n    // remove inlined constraints from system\n    let mut counter = 0;\n    constraint_system.retain_algebraic_constraints(|_| {\n        let retain = !to_remove_idx.contains(&(counter));\n        counter += 1;\n        retain\n    });\n\n    // sanity check\n    assert!(constraint_system\n        .referenced_unknown_variables()\n        .all(|var| { !substitutions.contains_key(var) }));\n\n    (constraint_system, substitutions)\n}\n\n/// Returns an inlining discriminator that allows everything to be inlined as long as\n/// the given degree bound is not violated.\npub fn inline_everything_below_degree_bound<T: FieldElement, V: Ord + Clone + Hash + Eq>(\n    degree_bound: DegreeBound,\n) -> impl Fn(&V, &GroupedExpression<T, V>, &IndexedConstraintSystem<T, V>) -> bool {\n    move |var, expr, constraint_system| {\n        substitution_would_not_violate_degree_bound(var, expr, constraint_system, degree_bound)\n    }\n}\n\n/// Returns true if substituting `var` by `expr` inside `constraint_system` would\n/// not create new constraints with a degree larger than `degree_bound`\npub fn substitution_would_not_violate_degree_bound<T: FieldElement, V: Ord + Clone + Hash + Eq>(\n    var: &V,\n    expr: &GroupedExpression<T, V>,\n    constraint_system: &IndexedConstraintSystem<T, V>,\n    degree_bound: DegreeBound,\n) -> bool {\n    let replacement_deg = expr.degree();\n\n    constraint_system\n        .constraints_referencing_variables(std::iter::once(var))\n        .all(|cref| match cref {\n            ConstraintRef::AlgebraicConstraint(identity) => {\n                let degree = expression_degree_with_virtual_substitution(\n                    identity.expression,\n                    var,\n                    replacement_deg,\n                );\n                degree <= degree_bound.identities\n            }\n            ConstraintRef::BusInteraction(interaction) => interaction.fields().all(|expr| {\n                let degree =\n                    expression_degree_with_virtual_substitution(expr, var, replacement_deg);\n                degree <= degree_bound.bus_interactions\n            }),\n        })\n}\n\n/// Returns substitutions of variables that appear linearly and do not depend on themselves.\nfn find_inlinable_variables<T: FieldElement, V: Ord + Clone + Hash + Eq + Display>(\n    constraint: &AlgebraicConstraint<GroupedExpression<T, V>>,\n) -> Vec<(V, GroupedExpression<T, V>)> {\n    constraint\n        .expression\n        .linear_components()\n        .rev()\n        .filter_map(|(target_var, _)| {\n            let rhs_expr = constraint.as_ref().try_solve_for(target_var)?;\n            assert!(!rhs_expr.referenced_unknown_variables().contains(target_var));\n            Some((target_var.clone(), rhs_expr))\n        })\n        .collect()\n}\n\n/// Calculate the degree of a GroupedExpression assuming a variable is\n/// replaced by an expression of known degree.\nfn expression_degree_with_virtual_substitution<T: FieldElement, V: Ord + Clone + Eq>(\n    expr: &GroupedExpression<T, V>,\n    var: &V,\n    replacement_deg: usize,\n) -> usize {\n    let quadratic = expr.quadratic_components();\n    let linear = expr.linear_components();\n    quadratic\n        .iter()\n        .map(|(l, r)| {\n            expression_degree_with_virtual_substitution(l, var, replacement_deg)\n                + expression_degree_with_virtual_substitution(r, var, replacement_deg)\n        })\n        .chain(linear.map(|(v, _)| if v == var { replacement_deg } else { 1 }))\n        .max()\n        .unwrap_or(0)\n}\n\n#[cfg(test)]\nmod test {\n    use crate::constraint_system::{BusInteraction, ConstraintSystem};\n\n    use super::*;\n\n    use powdr_number::GoldilocksField;\n    use test_log::test;\n\n    pub fn var(name: &'static str) -> GroupedExpression<GoldilocksField, &'static str> {\n        GroupedExpression::from_unknown_variable(name)\n    }\n\n    pub fn constant(value: u64) -> GroupedExpression<GoldilocksField, &'static str> {\n        GroupedExpression::from_number(GoldilocksField::from(value))\n    }\n\n    fn bounds<T: FieldElement, V: Ord + Clone + Hash + Eq>(\n        identities: usize,\n        bus_interactions: usize,\n    ) -> impl Fn(&V, &GroupedExpression<T, V>, &IndexedConstraintSystem<T, V>) -> bool {\n        inline_everything_below_degree_bound(DegreeBound {\n            identities,\n            bus_interactions,\n        })\n    }\n\n    #[test]\n    fn test_no_substitution() {\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(vec![\n                var(\"a\") * var(\"b\") + var(\"c\") * var(\"d\"),\n                var(\"e\") * var(\"e\") - constant(2),\n            ])\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n        assert_eq!(constraint_system.algebraic_constraints().len(), 2);\n    }\n\n    #[test]\n    fn test_replace_witness_columns() {\n        // keep column result\n        let bus_interactions = vec![BusInteraction {\n            bus_id: constant(1),\n            payload: vec![var(\"0result\"), var(\"b\")],\n            multiplicity: constant(1),\n        }];\n\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(vec![\n                var(\"a\") + var(\"b\") + var(\"c\"),\n                var(\"b\") + var(\"d\") - constant(1),\n                var(\"c\") + var(\"b\") + var(\"a\") + var(\"d\") - var(\"0result\"),\n            ])\n            .with_bus_interactions(bus_interactions)\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n        // 1) a + b + c = 0        => a = -b - c\n        // 2) b + d - 1 = 0        => d = -b + 1\n        // 3) c + b + a + d = result\n        //    =(1)=> c + b + (-b - c) + d\n        //         = (c - c) + (b - b) + d\n        //         = 0 + 0 + d\n        //    => result = d = -b + 1\n        //    => b = -result + 1\n        assert_eq!(constraint_system.algebraic_constraints().len(), 0);\n\n        let bus_interactions = constraint_system.bus_interactions();\n        let [BusInteraction { payload, .. }] = bus_interactions else {\n            panic!();\n        };\n        let [result, b] = payload.as_slice() else {\n            panic!();\n        };\n        assert_eq!(result.to_string(), \"0result\");\n        assert_eq!(b.to_string(), \"-(0result - 1)\");\n    }\n\n    #[test]\n    fn test_replace_witness_columns_with_multiplication() {\n        let mut identities = Vec::new();\n\n        // a * b = c\n        let constraint1 = var(\"c\") - var(\"a\") * var(\"b\");\n        identities.push(constraint1);\n\n        // b + d = 0\n        let constraint2 = var(\"b\") + var(\"d\");\n        identities.push(constraint2);\n\n        // a + b + c + d - result = 0\n        let expr = var(\"a\") + var(\"b\") + var(\"c\") + var(\"d\");\n        let expr_constraint = expr.clone() - var(\"result\");\n        identities.push(expr_constraint);\n\n        // keep column `result`\n        let bus_interactions = vec![BusInteraction {\n            bus_id: constant(1),\n            payload: vec![var(\"result\")],\n            multiplicity: constant(1),\n        }];\n\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(identities)\n            .with_bus_interactions(bus_interactions)\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n\n        let constraints = constraint_system.algebraic_constraints();\n        assert_eq!(constraints.len(), 0);\n    }\n\n    #[test]\n    fn test_replace_witness_columns_no_keep() {\n        let mut identities = Vec::new();\n\n        // a * b = c\n        let constraint1 = var(\"c\") - var(\"a\") * var(\"b\");\n        identities.push(constraint1);\n\n        // b + d = 0\n        let constraint2 = var(\"b\") + var(\"d\");\n        identities.push(constraint2);\n\n        // c * d = e\n        let constraint3 = var(\"e\") - var(\"c\") * var(\"d\");\n        identities.push(constraint3);\n\n        // a + b + c + d + e - result = 0\n        let expr = var(\"a\") + var(\"b\") + var(\"c\") + var(\"d\") + var(\"e\");\n        let expr_constraint = expr.clone() - var(\"result\");\n        identities.push(expr_constraint);\n\n        // no columns to keep\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(identities)\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n\n        let constraints = constraint_system.algebraic_constraints();\n        assert_eq!(constraints.len(), 0);\n    }\n\n    #[test]\n    fn test_replace_constrained_witness_suboptimal() {\n        // Keep x and result\n        let bus_interactions = vec![BusInteraction {\n            bus_id: constant(1),\n            payload: vec![var(\"result\"), var(\"x\")],\n            multiplicity: constant(1),\n        }];\n\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(vec![\n                var(\"y\") - (var(\"x\") + constant(3)),\n                var(\"z\") - (var(\"y\") + constant(2)),\n                var(\"result\") - (var(\"z\") + constant(1)),\n            ])\n            .with_bus_interactions(bus_interactions)\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n        // 1) y = x + 3\n        // 2) z = y + 2 ⇒ z = (x + 3) + 2 = x + 5\n        // 3) result = z + 1 ⇒ result = (x + 5) + 1 = x + 6\n        let bus_interactions = constraint_system.bus_interactions();\n        let [BusInteraction { payload, .. }] = bus_interactions else {\n            panic!();\n        };\n        let [result, x] = payload.as_slice() else {\n            panic!();\n        };\n        assert_eq!(result.to_string(), \"result\");\n        assert_eq!(x.to_string(), \"result - 6\");\n    }\n\n    #[test]\n    fn test_replace_constrained_witness_columns_max_degree_limit() {\n        let constraint_system = ConstraintSystem::default()\n            .with_constraints(vec![\n                var(\"a\") - (var(\"b\") + constant(1)),\n                var(\"c\") - (var(\"a\") * var(\"a\")),\n                var(\"d\") - (var(\"c\") * var(\"a\")),\n                var(\"e\") - (var(\"d\") * var(\"a\")),\n                var(\"f\") - (var(\"e\") + constant(5)),\n                var(\"result\") - (var(\"f\") * constant(2)),\n            ])\n            .with_bus_interactions(\n                // Get all variables\n                vec![BusInteraction {\n                    bus_id: constant(1),\n                    payload: vec![\n                        var(\"a\"),\n                        var(\"b\"),\n                        var(\"c\"),\n                        var(\"d\"),\n                        var(\"e\"),\n                        var(\"f\"),\n                        var(\"result\"),\n                    ],\n                    multiplicity: constant(1),\n                }],\n            )\n            .into();\n\n        let (constraint_system, _) =\n            replace_constrained_witness_columns(constraint_system, bounds(3, 3));\n\n        let constraints = constraint_system.algebraic_constraints();\n        let [identity] = constraints else {\n            panic!();\n        };\n        let bus_interactions = constraint_system.bus_interactions();\n        let [BusInteraction { payload, .. }] = bus_interactions else {\n            panic!();\n        };\n        let [a, b, c, d, e, f, result] = payload.as_slice() else {\n            panic!();\n        };\n        assert_eq!(a.to_string(), \"a\");\n        assert_eq!(b.to_string(), \"a - 1\");\n        // From second identity: c = a * a\n        // In-lining c would violate the degree bound, so it is kept as a symbol\n        // with a constraint to enforce the equality.\n        assert_eq!(c.to_string(), \"c\");\n        assert_eq!(identity.to_string(), \"-((a) * (a) - c) = 0\");\n        // From third identity: d = c * a\n        assert_eq!(d.to_string(), \"(c) * (a)\");\n        // From fourth identity: e = d * a\n        assert_eq!(e.to_string(), \"((c) * (a)) * (a)\");\n        // From fifth identity: f = e + 5\n        assert_eq!(f.to_string(), \"((c) * (a)) * (a) + 5\");\n        // From sixth identity: result = f * 2\n        assert_eq!(result.to_string(), \"((2 * c) * (a)) * (a) + 10\");\n    }\n\n    #[test]\n    fn test_inline_max_degree_suboptimal_greedy() {\n        // Show how constraint order affects optimization results\n\n        // Define the constraints in both orders\n        let mut optimal_order_identities = Vec::new();\n        let mut suboptimal_order_identities = Vec::new();\n\n        // a = b * b * b\n        let constraint1 = var(\"a\") - var(\"b\") * var(\"b\") * var(\"b\");\n        // b = c + d\n        let constraint2 = var(\"b\") - (var(\"c\") + var(\"d\"));\n        // a * c * c = 10\n        let constraint3 = var(\"a\") * var(\"c\") * var(\"c\") - constant(10);\n        // c = d * d\n        let constraint4 = var(\"c\") - var(\"d\") * var(\"d\");\n        // a + b + c + d = 100\n        let constraint5 = var(\"a\") + var(\"b\") + var(\"c\") + var(\"d\") - constant(100);\n\n        // Optimal order\n        optimal_order_identities.push(constraint1.clone()); // a = b * b * b\n        optimal_order_identities.push(constraint2.clone()); // b = c + d\n        optimal_order_identities.push(constraint3.clone()); // a * c * c = 10\n        optimal_order_identities.push(constraint4.clone()); // c = d * d\n        optimal_order_identities.push(constraint5.clone()); // a + b + c + d = 100\n\n        // Suboptimal order\n        suboptimal_order_identities.push(constraint5.clone()); // a + b + c + d = 100\n        suboptimal_order_identities.push(constraint3.clone()); // a * c * c = 10\n        suboptimal_order_identities.push(constraint1.clone()); // a = b * b * b\n        suboptimal_order_identities.push(constraint2.clone()); // b = c + d\n        suboptimal_order_identities.push(constraint4.clone()); // c = d * d\n\n        let optimal_system = ConstraintSystem::default()\n            .with_constraints(optimal_order_identities)\n            .into();\n\n        let suboptimal_system = ConstraintSystem::default()\n            .with_constraints(suboptimal_order_identities)\n            .into();\n\n        // Apply the same optimization to both systems\n        let (optimal_system, _) = replace_constrained_witness_columns(optimal_system, bounds(5, 5));\n\n        let (suboptimal_system, _) =\n            replace_constrained_witness_columns(suboptimal_system, bounds(5, 5));\n\n        // Assert the difference in optimization results\n        assert_eq!(optimal_system.algebraic_constraints().len(), 3);\n        assert_eq!(suboptimal_system.algebraic_constraints().len(), 4);\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/lib.rs",
    "content": "//! Tooling used for analysis and solving of constraints.\n\npub mod algebraic_constraint;\npub mod bus_interaction_handler;\npub mod constraint_system;\npub mod effect;\npub mod grouped_expression;\npub mod indexed_constraint_system;\npub mod inliner;\npub mod range_constraint;\npub mod reachability;\npub mod rule_based_optimizer;\npub mod runtime_constant;\npub mod solver;\npub mod symbolic_expression;\npub mod system_splitter;\npub mod test_utils;\npub mod utils;\npub mod variable_update;\n"
  },
  {
    "path": "constraint-solver/src/range_constraint.rs",
    "content": "use std::fmt::{Debug, Display, Formatter};\nuse std::{cmp, ops};\n\nuse num_traits::Zero;\n\nuse powdr_number::{log2_exact, FieldElement, LargeInt};\n\n/// In an abstract way, a RangeConstraint is just a set of values. It is mainly used to\n/// combine the effects of multiple AlgebraicConstraints on the same variable.\n///\n/// Currently, we can represent interval ranges (both \"wrapping\" and \"non-wrapping\" ones)\n/// and bit masks. The actual constraint is the conjunction of the two.\n///\n/// The idea behind wrapping intervals is that we want to represent both signed and\n/// unsigned numbers. Furthermore, by supporting wrapping intervals we do not lose\n/// any information when adding or substracting constants.\n///\n/// The semantics and correctness of RangeConstraints is mainly defined by the following notion:\n///\n/// We say a RangeConstraint `r` on an expression `e` is `valid` in a ConstraintSystem\n/// if for every satisfying assignment of the ConstraintSystem, the value of `e`\n/// under this assignment is allowed by `r`.\n///\n/// All the operations on RangeConstraints (like combine_sum, conjunction, ...) preserve\n/// validity, i.e. if we have an expression `e1 + e2` and we know that `r1` is a valid\n/// RangeConstraint for `e1` and `r2` is a valid RangeConstraint for `e2`, then\n/// the result of `r1.combine_sum(r2)` is a valid RangeConstraint for `e1 + e2`.\n///\n/// In particular, a fully unconstrained RangeConstraint is always valid for every expression.\n/// in this way, range constraints are an over-approximation, i.e. they can be less strict\n/// than the expressions they model. They might allow a value that is actually not\n/// possible, but if the range constraint disallows a value, this value is definitely\n/// not possible. This is consistent because e.g. an algebraic constraint in isolation\n/// also over-approximates in contrast to this constraint being in the context\n/// of the full system.\n///\n/// Finally, please be aware that same constraint can have multiple representations.\n#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]\npub struct RangeConstraint<T: FieldElement> {\n    /// Bit-mask. A value `x` is allowed only if `x & mask == x` (when seen as unsigned integer).\n    mask: T::Integer,\n    /// Min-max inclusive range. Note that `max` can be smaller than `min`. In this case the range wraps.\n    /// If min <= max (seen as unsigned integers), then the constraint on `x` is `min <= x && x <= max`.\n    /// If min > max, then the constraint is `min <= x || x <= max`.\n    min: T,\n    max: T,\n}\n\nimpl<T: FieldElement> RangeConstraint<T> {\n    /// Constraint that allows no higher bits set than the one given\n    /// (counting from zero).\n    pub fn from_max_bit(max_bit: usize) -> Self {\n        Self::from_mask(mask_from_bits::<T>(max_bit + 1))\n    }\n\n    /// Constraint that forces several bits to be set to zero.\n    /// Semantics: x & mask == x\n    pub fn from_mask<M: Into<T::Integer>>(mask: M) -> Self {\n        let mask = mask.into();\n        let max = T::from(cmp::min(mask, (T::from(-1)).to_integer()));\n        Self {\n            mask,\n            min: T::zero(),\n            max,\n        }\n    }\n\n    /// Constraint that only allows this exact value.\n    pub fn from_value(value: T) -> Self {\n        Self {\n            mask: value.to_integer(),\n            min: value,\n            max: value,\n        }\n    }\n\n    /// Constraint that allows the values `min`, `min + 1`, ..., `max`.\n    /// Since this sequence can wrap around the field modulus, it means that\n    /// `x` is allowed if and only if:\n    /// - min <= x && x <= max  if min <= max\n    /// - min <= x || x <= max  if min > max,\n    #[inline]\n    pub fn from_range(min: T, max: T) -> Self {\n        let mask = if min <= max {\n            mask_from_bits::<T>(max.to_integer().num_bits())\n        } else {\n            Self::unconstrained().mask\n        };\n        Self { mask, min, max }\n    }\n\n    /// Returns a constraint that allows any value.\n    pub fn unconstrained() -> Self {\n        Self::from_range(T::from(0), T::from(-1))\n    }\n\n    /// Returns true if the range constraint does not impose any\n    /// restrictions on the values.\n    pub fn is_unconstrained(&self) -> bool {\n        let un = Self::unconstrained();\n        self.range_width() == un.range_width() && (self.mask & un.mask) == un.mask\n    }\n\n    /// Returns a bit mask. This might be drastically under-fitted in case\n    /// the constraint is more resembling an interval.\n    /// Semantics: X & mask == X holds for all possible values of X.\n    pub fn mask(&self) -> &T::Integer {\n        &self.mask\n    }\n\n    /// Returns the interval part [min..=max] of the Range Constraint.\n    /// Note that `max` can be smaller than `min`. In this case the range wraps.\n    /// Semantics, with (min, max) = range():\n    /// If min <= max, this means min <= x && x <= max.\n    /// If min > max, this means min <= x || x <= max.\n    pub fn range(&self) -> (T, T) {\n        (self.min, self.max)\n    }\n\n    /// Returns the number of elements between the min and the max value, disregarding the mask and\n    /// potentially other constraints.\n    pub fn range_width(&self) -> T::Integer {\n        range_width(self.min, self.max)\n    }\n\n    /// Returns (an upper bound for) the number of field elements included in the constraint.\n    pub fn size_estimate(&self) -> T::Integer {\n        self.range_width()\n    }\n\n    pub fn allows_value(&self, v: T) -> bool {\n        let in_range = if self.min <= self.max {\n            self.min <= v && v <= self.max\n        } else {\n            self.min <= v || v <= self.max\n        };\n        let in_mask = v.to_integer() & self.mask == v.to_integer();\n        in_range && in_mask\n    }\n\n    /// The range constraint of the sum of two expressions:\n    /// If `r1` is a valid RangeConstraint for `e1` and `r2` is a valid RangeConstraint for `e2`,\n    /// then `r1.combine_sum(r2)` is a valid RangeConstraint for `e1 + e2`.\n    pub fn combine_sum(&self, other: &Self) -> Self {\n        let unconstrained = Self::unconstrained();\n        // TODO we could use \"add_with_carry\" to see if this created an overflow.\n        // it might even be enough to check if certain bits are set in the masks.\n        let mut mask = if self.mask.to_arbitrary_integer() + other.mask.to_arbitrary_integer()\n            >= T::modulus().to_arbitrary_integer()\n        {\n            unconstrained.mask\n        } else {\n            // This could be made stricter.\n            (self.mask + other.mask) | self.mask | other.mask\n        };\n\n        let (min, max) = if self.range_width().to_arbitrary_integer()\n            + other.range_width().to_arbitrary_integer()\n            <= unconstrained.range_width().to_arbitrary_integer()\n        {\n            (self.min + other.min, self.max + other.max)\n        } else {\n            unconstrained.range()\n        };\n        if min <= max {\n            mask &= Self::from_range(min, max).mask;\n        }\n        Self { min, max, mask }\n    }\n\n    /// The range constraint of the product of two expressions:\n    /// If `r1` is a valid RangeConstraint for `e1` and `r2` is a valid RangeConstraint for `e2`,\n    /// then `r1.combine_product(r2)` is a valid RangeConstraint for `e1 * e2`.\n    pub fn combine_product(&self, other: &Self) -> Self {\n        if let Some(v) = other.try_to_single_value() {\n            self.multiple(v)\n        } else if let Some(v) = self.try_to_single_value() {\n            other.multiple(v)\n        } else if self.min <= self.max\n            && other.min <= other.max\n            && self.max.to_arbitrary_integer() * other.max.to_arbitrary_integer()\n                < T::modulus().to_arbitrary_integer()\n        {\n            Self::from_range(self.min * other.min, self.max * other.max)\n        } else {\n            Self::unconstrained()\n        }\n    }\n\n    /// If `Self` is a valid range constraint on an expression `e`, returns\n    /// a valid range constraint for `e * e`.\n    pub fn square(&self) -> Self {\n        if self.min > self.max {\n            // If we have \"negative\" values, make sure that the square\n            // is non-negative.\n            let max_abs = std::cmp::max(-self.min, self.max);\n            if max_abs.to_arbitrary_integer() * max_abs.to_arbitrary_integer()\n                < T::modulus().to_arbitrary_integer()\n            {\n                return Self::from_range(T::zero(), max_abs * max_abs);\n            }\n        }\n\n        self.combine_product(self)\n    }\n\n    /// Returns the conjunction of this constraint and the other.\n    /// This operation is not lossless, but if `r1` and `r2` allow\n    /// a value `x`, then `r1.conjunction(r2)` also allows `x`.\n    /// Furthermore, if `r1` and `r2` are valid RangeConstraints for\n    /// the same expression `e`, then `r1.conjunction(r2)` is also a valid\n    /// RangeConstraint for `e`.\n    pub fn conjunction(&self, other: &Self) -> Self {\n        let mut mask = self.mask & other.mask;\n        // We might lose information because the intersection of two potentially wrapping\n        // intervals can be more than one (potentially wrapping) intervals.\n        let (mut min, mut max) =\n            interval_intersection((self.min, self.max), (other.min, other.max))\n                .unwrap_or((0.into(), 0.into()));\n\n        // Now try to derive better values for the mask from the new range\n        // and vice-versa.\n        if mask < T::modulus() {\n            if min <= max {\n                // If we adjust both min and max, the right way could be\n                // to have an empty range. On the other hand, this should not\n                // be incorrect.\n                min = cmp::min(mask.into(), min);\n                max = cmp::min(mask.into(), max);\n            } else if min.to_integer() > mask {\n                min = T::zero();\n                max = cmp::min(mask.into(), max);\n            } else {\n                // max < min <= mask\n                // the proper intersection here cannot always be represented by\n                // a single interval. Let's just leave it as it is.\n            }\n        }\n        if min <= max {\n            mask &= Self::from_range(min, max).mask;\n        }\n\n        Self { min, max, mask }\n    }\n\n    /// Returns the disjunction of this constraint and the other.\n    /// This operation is not lossless, but if `r1` or `r2` allow\n    /// a value `x`, then `r1.disjunction(r2)` also allows `x`.\n    /// Furthermore, if `r1` OR `r2` is a valid RangeConstraint for\n    /// the same expression `e`, then `r1.disjunction(r2)` is a valid\n    /// RangeConstraint for `e`.\n    pub fn disjunction(&self, other: &Self) -> Self {\n        let mask = self.mask | other.mask;\n        match (self.min <= self.max, other.min <= other.max) {\n            (true, true) => Self {\n                min: cmp::min(self.min, other.min),\n                max: cmp::max(self.max, other.max),\n                mask,\n            },\n            (true, false) | (false, true) => {\n                // These cases are too complicated - we could refine them in the future.\n                Self::from_mask(mask)\n            }\n            (false, false) => {\n                let min = cmp::min(self.min, other.min);\n                let max = cmp::max(self.max, other.max);\n                if min <= max {\n                    // The ranges cover the full field.\n                    Self::from_mask(mask)\n                } else {\n                    Self { min, max, mask }\n                }\n            }\n        }\n    }\n\n    /// The constraint of an integer multiple of an expression.\n    /// If `r` is a valid RangeConstraint for `e`, then `r.multiple(factor)`\n    /// is a valid RangeConstraint for `factor * e`.\n    pub fn multiple(&self, factor: T) -> Self {\n        let mask = log2_exact(factor.to_arbitrary_integer()).and_then(|exponent| {\n            (self.mask.to_arbitrary_integer() << exponent < T::modulus().to_arbitrary_integer())\n                .then(|| self.mask << exponent)\n        });\n        let (min, max) = if factor.is_in_lower_half() {\n            range_multiple(self.min, self.max, factor)\n        } else {\n            range_multiple(-self.max, -self.min, -factor)\n        };\n        Self {\n            min,\n            max,\n            mask: mask.unwrap_or_else(|| Self::from_range(min, max).mask),\n        }\n    }\n\n    /// If only a single value satisfies this condition, returns this value.\n    pub fn try_to_single_value(&self) -> Option<T> {\n        if self.min == self.max && self.min.to_integer() & self.mask == self.min.to_integer() {\n            Some(self.min)\n        } else {\n            None\n        }\n    }\n\n    /// If this function returns true, then no value can satisfy both range constraints at the same time.\n    /// If it returns false, this might also be the case, but we cannot be sure.\n    pub fn is_disjoint(&self, other: &RangeConstraint<T>) -> bool {\n        // True if the intersection allows zero.\n        let zero_allowed = self.allows_value(T::zero()) && other.allows_value(T::zero());\n        // True if the intersection is empty when looking at the masks (and zero) only.\n        let masks_disjoint = !zero_allowed && (self.mask & other.mask).is_zero();\n        // True if the intersection is empty when looking at ranges only.\n        let intervals_disjoint =\n            interval_intersection((self.min, self.max), (other.min, other.max)).is_none();\n        masks_disjoint || intervals_disjoint\n    }\n\n    /// Returns the allowed values of this range constraint.\n    /// Panics if the range width is larger than 2^32 (in which case you\n    /// probably don't want to call this function).\n    pub fn allowed_values(&self) -> impl Iterator<Item = T> + '_ {\n        (0..self.range_width().try_into_u32().unwrap())\n            .map(move |offset| self.min + T::from(offset))\n            .filter(|value| self.allows_value(*value))\n    }\n}\n\nimpl<T: FieldElement> Default for RangeConstraint<T> {\n    fn default() -> Self {\n        Self::unconstrained()\n    }\n}\n\n/// The number of elements in an (inclusive) min/max range.\n/// Works both if min is smaller than max and if it is larger (the inverted interval).\nfn range_width<T: FieldElement>(min: T, max: T) -> T::Integer {\n    if max + T::one() == min {\n        T::modulus()\n    } else {\n        (max - min + T::one()).to_integer()\n    }\n}\n\n#[inline]\nfn mask_from_bits<T: FieldElement>(bits: usize) -> T::Integer {\n    if bits == 0 {\n        T::Integer::zero()\n    } else {\n        let max = !T::Integer::zero();\n        let max_bits = T::Integer::NUM_BITS;\n        assert!(bits <= max_bits);\n        max >> (max_bits - bits)\n    }\n}\n\n/// If an expression `x` is in the range `[min, max]`, returns\n/// an a range `[min', max']` such that `factor * x` is in that range.\n///\n/// Inverted ranges are possible for both the input and the output.\nfn range_multiple<T: FieldElement>(min: T, max: T, factor: T) -> (T, T) {\n    // This is correct by iterated addition.\n    if range_width(min, max).to_arbitrary_integer() * factor.to_arbitrary_integer()\n        <= T::modulus().to_arbitrary_integer()\n    {\n        (min * factor, max * factor)\n    } else {\n        // The range that allows all values\n        (T::one(), T::zero())\n    }\n}\n\n/// Computes the intersection of two intervals.\n/// There are cases where the intersection cannot be represented as a single internal.\n/// in that case, it returns the smaller of the two inputs (which is a correct\n/// range constraint in the sense that they can always be under-approximations,\n/// but it loses some information).\n/// If the intersection is empty, returns None.\nfn interval_intersection<T: FieldElement>(a: (T, T), b: (T, T)) -> Option<(T, T)> {\n    // We shift both intervals until they are both non-wrapping intervals.\n    // If we do not succeed after shifting both of them by the smallest amount,\n    // it means that the intersection cannot be expressed as a single interval.\n    // In that case we just choose the smaller of the two inputs.\n    match [a.0, b.0].into_iter().find_map(|shift| {\n        let a_shifted = shifted_interval(a, -shift);\n        let b_shifted = shifted_interval(b, -shift);\n        (a_shifted.0 <= a_shifted.1 && b_shifted.0 <= b_shifted.1)\n            .then_some((shift, (a_shifted, b_shifted)))\n    }) {\n        Some((shift, (a_shifted, b_shifted))) => {\n            let intersection = (\n                cmp::max(a_shifted.0, b_shifted.0),\n                cmp::min(a_shifted.1, b_shifted.1),\n            );\n            // If min is larger than max, the intersection is empty.\n            (intersection.0 <= intersection.1).then_some(shifted_interval(intersection, shift))\n        }\n        None => {\n            // The intersection consists of two intervals. We cannot represent that,\n            // so we return the smaller of the input intervals.\n            if range_width(a.0, a.1) <= range_width(b.0, b.1) {\n                Some(a)\n            } else {\n                Some(b)\n            }\n        }\n    }\n}\n\nfn shifted_interval<T: FieldElement>((min, max): (T, T), shift: T) -> (T, T) {\n    (min + shift, max + shift)\n}\n\nimpl<T: FieldElement> ops::Neg for RangeConstraint<T> {\n    type Output = Self;\n\n    fn neg(self) -> Self::Output {\n        let (min, max) = self.range();\n        Self::from_range(-max, -min)\n    }\n}\n\nimpl<T: FieldElement> Display for RangeConstraint<T> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"[{}, {}] & 0x{:x}\",\n            format_negated(self.min),\n            format_negated(self.max),\n            self.mask()\n        )\n    }\n}\n\nfn format_negated<T: FieldElement>(value: T) -> String {\n    if value.is_in_lower_half() {\n        value.to_string()\n    } else {\n        format!(\"-{}\", -value)\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use itertools::Itertools;\n    use powdr_number::{BabyBearField, GoldilocksField};\n    use pretty_assertions::assert_eq;\n\n    use super::*;\n\n    type RCg = RangeConstraint<GoldilocksField>;\n\n    #[test]\n    fn from_max_bit() {\n        assert_eq!(*RCg::from_max_bit(0).mask(), 1u64.into());\n        assert_eq!(*RCg::from_max_bit(1).mask(), 3u64.into());\n        assert_eq!(*RCg::from_max_bit(63).mask(), (u64::MAX).into());\n    }\n\n    #[test]\n    fn from_value() {\n        assert_eq!(\n            RCg::from_value(9.into()),\n            RCg {\n                min: 9.into(),\n                max: 9.into(),\n                mask: 9u32.into()\n            }\n        );\n    }\n\n    #[test]\n    fn from_range() {\n        assert_eq!(\n            RCg::from_range(3.into(), 9.into()),\n            RCg {\n                min: 3.into(),\n                max: 9.into(),\n                mask: 15u32.into()\n            }\n        );\n        assert_eq!(\n            RCg::from_range(9.into(), 3.into()),\n            RCg {\n                min: 9.into(),\n                max: 3.into(),\n                mask: u64::MAX.into()\n            }\n        );\n    }\n\n    #[test]\n    fn range_width() {\n        assert_eq!(RCg::from_value(7.into()).range_width(), 1u32.into());\n        assert_eq!(\n            RCg::from_range(3.into(), 7.into()).range_width(),\n            5u32.into()\n        );\n        assert_eq!(\n            RCg::from_range(8.into(), 2.into()).range_width(),\n            // This is the range above, just inverted.\n            // So we should have the whole field minus five.\n            GoldilocksField::from(-5).to_integer()\n        );\n        assert_eq!(\n            RCg::from_mask(0xf00fu32).range_width(),\n            (0xf00fu32 + 1).into()\n        );\n    }\n\n    #[test]\n    fn combine_sum() {\n        assert_eq!(\n            RCg::from_range(3.into(), 7.into())\n                .combine_sum(&RCg::from_range(15.into(), 300.into())),\n            RCg {\n                min: 18.into(),\n                max: 307.into(),\n                mask: 511u32.into()\n            }\n        );\n        assert_eq!(\n            RCg::from_mask(0x1100u32).combine_sum(&RCg::from_mask(0xffu32)),\n            RCg {\n                min: 0.into(),\n                max: 0x11ffu32.into(),\n                mask: 0x11ffu32.into()\n            }\n        );\n        assert_eq!(\n            RCg::from_mask(0x1110u32).combine_sum(&RCg::from_mask(0xffu32)),\n            RCg {\n                min: 0.into(),\n                max: 0x120fu32.into(),\n                mask: 0x13ffu32.into()\n            }\n        );\n\n        // Test overflow of masks. Modulus is: 0xffffffff00000001\n        assert!(RCg::from_mask(0xefffffff00000001u64)\n            .combine_sum(&RCg::from_mask(0x7ffffffff0000000u64))\n            .is_unconstrained());\n    }\n\n    #[test]\n    fn combine_sum_around_modulus() {\n        let modulus = 0xffffffff00000001u64;\n        // Test min-max range width around modulus\n        let half_modulus_range = RCg::from_range(7.into(), (modulus / 2 + 6).into());\n        assert_eq!(\n            half_modulus_range.range_width() + half_modulus_range.range_width() + 1u32.into(),\n            modulus.into(),\n        );\n\n        // Sum of range widths is one less than modulus.\n        assert_eq!(\n            half_modulus_range.combine_sum(&half_modulus_range),\n            RCg {\n                min: 14.into(),\n                max: 11.into(), // (modulus - 1) / 2 * 2 + 12 - modulus = 11\n                mask: u64::MAX.into(),\n            }\n        );\n\n        // Sum of range widths is equal to modulus.\n        let two_range = RCg::from_range(50.into(), 51.into());\n        let half_modulus_plus_one_range = half_modulus_range.combine_sum(&two_range);\n        assert_eq!(\n            half_modulus_range.range_width() + half_modulus_plus_one_range.range_width(),\n            modulus.into(),\n        );\n        assert_eq!(\n            half_modulus_range.combine_sum(&half_modulus_plus_one_range),\n            RCg {\n                min: 64.into(),\n                max: 62.into(),\n                mask: u64::MAX.into(),\n            }\n        );\n\n        // Sum of range widths is larger than modulus.\n        let two_range = RCg::from_range(50.into(), 51.into());\n        let half_modulus_plus_one_range = half_modulus_range.combine_sum(&two_range);\n        assert!(half_modulus_range\n            .combine_sum(&half_modulus_plus_one_range.combine_sum(&two_range))\n            .is_unconstrained());\n    }\n\n    #[test]\n    fn mul_add() {\n        let a = RangeConstraint::<GoldilocksField>::from_mask(0x1u32);\n        let b = RangeConstraint::from_mask(0xffu32);\n        let c = a.multiple(512.into()).combine_sum(&b);\n        assert_eq!(c, RangeConstraint::from_mask(0x2ff_u32));\n        let d = a.multiple(-GoldilocksField::from(1)).combine_sum(&b);\n        assert_eq!(\n            d,\n            RangeConstraint::from_range(-GoldilocksField::from(1), 0xff.into())\n        );\n    }\n\n    #[test]\n    fn multiple_negative() {\n        let a: RangeConstraint<GoldilocksField> = RangeConstraint::from_range(0.into(), 12.into());\n        assert_eq!(*a.mask(), 0xfu32.into());\n        let b = a.multiple((-3).into());\n        assert_eq!(*b.mask(), u64::MAX.into());\n        assert_eq!(b.range(), (-GoldilocksField::from(36), 0.into()));\n    }\n\n    #[test]\n    fn multiple_overflow() {\n        let modulus = 0xffffffff00000001u64;\n        // Test min-max range width around modulus\n        let max_value = (modulus / 4 + 6).into();\n        let a = RCg::from_range(7.into(), max_value);\n        assert!(\n            a.range_width().to_arbitrary_integer()\n                * GoldilocksField::from(4u32).to_arbitrary_integer()\n                <= GoldilocksField::modulus().to_arbitrary_integer()\n        );\n        assert!(\n            a.range_width().to_arbitrary_integer()\n                * GoldilocksField::from(5u32).to_arbitrary_integer()\n                > GoldilocksField::modulus().to_arbitrary_integer()\n        );\n        assert_eq!(\n            a.multiple(4.into()),\n            RangeConstraint {\n                min: 28.into(),\n                max: max_value * GoldilocksField::from(4),\n                mask: u64::MAX.into()\n            }\n        );\n        assert_eq!(\n            a.multiple(5.into()),\n            RangeConstraint {\n                min: 1.into(),\n                max: 0.into(),\n                mask: u64::MAX.into()\n            }\n        );\n    }\n\n    #[test]\n    fn combinations() {\n        let a: RangeConstraint<GoldilocksField> = RangeConstraint::from_max_bit(7);\n        assert_eq!(a, RangeConstraint::from_mask(0xff_u32));\n        let b = a.multiple(256.into());\n        assert_eq!(b, RangeConstraint::from_mask(0xff00_u32));\n        assert_eq!(b.combine_sum(&a), RangeConstraint::from_mask(0xffff_u32));\n    }\n\n    #[test]\n    fn weird_combinations() {\n        let a: RangeConstraint<GoldilocksField> = RangeConstraint::from_mask(0xf00f_u32);\n        let b = a.multiple(256.into());\n        assert_eq!(b, RangeConstraint::from_mask(0xf00f00_u32));\n        assert_eq!(b.combine_sum(&a), RangeConstraint::from_mask(0xf0ff0f_u32));\n    }\n\n    #[test]\n    fn interval_intersections() {\n        type F = GoldilocksField;\n        fn commutativity_test(a: (F, F), b: (F, F)) -> Option<(F, F)> {\n            let direct = interval_intersection(a, b);\n            let inverse = interval_intersection(b, a);\n            assert_eq!(direct, inverse);\n\n            direct\n        }\n\n        // Plain, no wrapping:\n\n        // a is contained in b\n        {\n            let a = (50.into(), 60.into());\n            assert_eq!(commutativity_test(a, (10.into(), 100.into())), Some(a));\n        }\n\n        // a has an intersection with b\n        assert_eq!(\n            commutativity_test((10.into(), 60.into()), (40.into(), 100.into())),\n            Some((40.into(), 60.into()))\n        );\n\n        // a and b does not intersect\n        assert_eq!(\n            commutativity_test((10.into(), 40.into()), (60.into(), 100.into())),\n            None\n        );\n\n        // Wrapping intervals:\n\n        // a intersects with b both at the beginning and at the end\n        // (should return the smallest of the two ranges)\n        {\n            let a = (10.into(), 100.into());\n            assert_eq!(commutativity_test(a, (90.into(), 20.into())), Some(a));\n        }\n\n        // a intersects with the beginning of b, and almost intersects with the end\n        assert_eq!(\n            commutativity_test((21.into(), 100.into()), (90.into(), 20.into())),\n            Some((90.into(), 100.into()))\n        );\n\n        // a intersects with the end of b, and almost intersects with the beginning\n        assert_eq!(\n            commutativity_test((10.into(), 89.into()), (90.into(), 20.into())),\n            Some((10.into(), 20.into()))\n        );\n\n        // an intersection that contains zero\n        assert_eq!(\n            commutativity_test((F::from(-50), 10.into()), (F::from(-10), 50.into())),\n            Some((F::from(-10), 10.into()))\n        );\n\n        // a intersects with b right before zero\n        assert_eq!(\n            commutativity_test((F::from(-50), F::from(-10)), (F::from(-20), 20.into())),\n            Some((F::from(-20), F::from(-10)))\n        );\n\n        // a intersects with b right after zero\n        assert_eq!(\n            commutativity_test((10.into(), 50.into()), (F::from(-20), 20.into())),\n            Some((10.into(), 20.into()))\n        );\n\n        // a is contained in b, both contains 0\n        {\n            let a = (F::from(-20), 20.into());\n            assert_eq!(commutativity_test(a, (F::from(-50), 90.into())), Some(a));\n        }\n\n        // a is contained in b before 0\n        {\n            let a = (F::from(-20), F::from(-10));\n            assert_eq!(commutativity_test(a, (F::from(-50), 90.into())), Some(a));\n        }\n\n        // a is contained in b after 0\n        {\n            let a = (10.into(), 20.into());\n            assert_eq!(commutativity_test(a, (F::from(-50), 90.into())), Some(a));\n        }\n    }\n\n    #[test]\n    fn allows_value() {\n        type F = GoldilocksField;\n        let a = RangeConstraint::<F>::from_range(20.into(), 10.into());\n        assert!(a.allows_value(5.into()));\n        assert!(a.allows_value(10.into()));\n        assert!(!a.allows_value(15.into()));\n        assert!(a.allows_value(20.into()));\n        assert!(a.allows_value(25.into()));\n        let b = RangeConstraint::<F>::from_range(10.into(), 20.into());\n        assert!(!b.allows_value(5.into()));\n        assert!(b.allows_value(10.into()));\n        assert!(b.allows_value(15.into()));\n        assert!(b.allows_value(20.into()));\n        assert!(!b.allows_value(25.into()));\n    }\n\n    #[test]\n    fn conjunction() {\n        // This mostly tests the refinement of the bounds from min-max to mask and vice-versa.\n\n        type F = GoldilocksField;\n        let x = RangeConstraint::<F>::from_range(100000.into(), 70.into())\n            .conjunction(&RangeConstraint::from_mask(0xfffu32));\n        assert_eq!(\n            x,\n            RangeConstraint {\n                min: 0.into(),\n                max: 70.into(),\n                mask: 127u32.into(), // This mask is refined from the max value\n            },\n        );\n\n        let y = RangeConstraint::<F>::from_mask(0xfff000u32)\n            .conjunction(&RangeConstraint::from_mask(0xff00u32));\n        assert_eq!(\n            y,\n            RangeConstraint {\n                min: 0.into(),\n                max: 0xf000u32.into(), // this max value is derived from the mask.\n                mask: 0xf000u32.into(),\n            },\n        );\n    }\n\n    #[test]\n    fn disjunction() {\n        type F = GoldilocksField;\n        let a = RangeConstraint::<F>::from_range(20.into(), 10.into());\n        let b = RangeConstraint::<F>::from_range(30.into(), 15.into());\n        let d = a.disjunction(&b);\n        assert!(d.allows_value(5.into()));\n        assert!(d.allows_value(10.into()));\n        assert!(d.allows_value(15.into()));\n        assert!(!d.allows_value(18.into()));\n        assert!(d.allows_value(20.into()));\n        assert!(d.allows_value(25.into()));\n    }\n\n    #[test]\n    fn disjunction_combinations() {\n        type F = GoldilocksField;\n        let lower = [10, 10000, 100060];\n        let upper = [20, 10006, 100070];\n        let test = [\n            5, 10, 15, 20, 900, 10000, 10004, 10006, 10010, 100055, 100060, 100065, 100070, 100075,\n        ]\n        .iter()\n        .map(|t| F::from(*t))\n        .collect_vec();\n        for (l1, u1) in lower.iter().cartesian_product(upper.iter()) {\n            for (l2, u2) in lower.iter().cartesian_product(upper.iter()) {\n                let a = RangeConstraint::<F>::from_range((*l1).into(), (*u1).into());\n                let b = RangeConstraint::<F>::from_range((*l2).into(), (*u2).into());\n                let c = a.disjunction(&b);\n                for t in &test {\n                    // Range constraints are allowed to be less strict, so we can only test one direction.\n                    if !c.allows_value(*t) {\n                        assert!(!a.allows_value(*t) || !b.allows_value(*t));\n                    }\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn is_disjoint() {\n        type F = GoldilocksField;\n        let a = RangeConstraint::<F>::from_range(10.into(), 20.into());\n        let b = RangeConstraint::<F>::from_range(20.into(), 30.into());\n        assert!(!a.is_disjoint(&b));\n        let b = RangeConstraint::<F>::from_range(21.into(), 30.into());\n        assert!(a.is_disjoint(&b));\n        let b = RangeConstraint::<F>::from_range(21.into(), 9.into());\n        assert!(a.is_disjoint(&b));\n        let b = RangeConstraint::<F>::from_range(21.into(), 10.into());\n        assert!(!a.is_disjoint(&b));\n\n        let b = RangeConstraint::<F>::from_mask(0x100u32);\n        assert!(b.range() == (0.into(), 0x100u32.into()));\n        assert!(a.is_disjoint(&b));\n\n        let c = RangeConstraint::<F>::from_mask(0xffu32);\n        // They are not disjoint, because they both allow zero.\n        assert!(!c.is_disjoint(&b));\n        let d = c.conjunction(&RangeConstraint::from_range(1.into(), 5000.into()));\n        assert!(d.is_disjoint(&b));\n    }\n\n    #[test]\n    fn is_unconstrained() {\n        type F = BabyBearField;\n        assert!(RangeConstraint::<F>::unconstrained().is_unconstrained());\n        let a = RangeConstraint::<F>::from_range(0.into(), F::from(0) - F::from(1));\n        assert!(a.is_unconstrained());\n        let b = RangeConstraint::<F>::from_range(5.into(), 4.into());\n        assert!(b.is_unconstrained());\n        let c = RangeConstraint::<F>::from_mask(!F::from(0).to_integer());\n        assert!(c.is_unconstrained());\n        let x = RangeConstraint::<F>::from_range(0.into(), F::from(10));\n        assert!(!x.is_unconstrained());\n        let y = RangeConstraint::<F>::from_range(F::from(-1), F::from(0));\n        assert!(!y.is_unconstrained());\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/reachability.rs",
    "content": "use std::collections::HashSet;\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\n\nuse crate::indexed_constraint_system::IndexedConstraintSystem;\nuse crate::runtime_constant::RuntimeConstant;\n\n/// Returns the set of all variables reachable from an initial set via shared constraints\n/// (algebraic constraints and bus interactions).\n/// The returned set also contains the initial variables.\npub fn reachable_variables<T, V>(\n    initial_variables: impl IntoIterator<Item = V>,\n    constraint_system: &IndexedConstraintSystem<T, V>,\n) -> HashSet<V>\nwhere\n    T: RuntimeConstant,\n    V: Clone + Ord + Hash + Display,\n{\n    reachable_variables_except_blocked(initial_variables, std::iter::empty(), constraint_system)\n}\n\n/// Returns the set of all variables reachable from an initial set via shared constraints\n/// (algebraic constraints and bus interactions).\n/// The set of blocking variables is a barrier that stops the reachability search, in the\n/// sense that we consider constraints that can also contain blocking variables, but we\n/// only continue the search from the non-blocking variables in constraints.\n/// The returned set contains reachable blocking variables and the initial variables.\npub fn reachable_variables_except_blocked<T, V>(\n    initial_variables: impl IntoIterator<Item = V>,\n    blocking_variables: impl IntoIterator<Item = V>,\n    constraint_system: &IndexedConstraintSystem<T, V>,\n) -> HashSet<V>\nwhere\n    T: RuntimeConstant,\n    V: Clone + Ord + Hash + Display,\n{\n    let mut reachable_variables = initial_variables.into_iter().collect::<HashSet<_>>();\n    let blocking_variables = blocking_variables.into_iter().collect::<HashSet<_>>();\n\n    loop {\n        let size_before = reachable_variables.len();\n        let reachable_variables_vec = reachable_variables.iter().cloned().collect_vec();\n        for constraint in\n            constraint_system.constraints_referencing_variables(&reachable_variables_vec)\n        {\n            if constraint\n                .referenced_unknown_variables()\n                .any(|var| reachable_variables.contains(var) && !blocking_variables.contains(var))\n            {\n                // This constraint is connected to a reachable variable,\n                // add all variables of this constraint.\n                reachable_variables.extend(constraint.referenced_unknown_variables().cloned());\n            }\n        }\n        if reachable_variables.len() == size_before {\n            break;\n        }\n    }\n    reachable_variables\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/driver.rs",
    "content": "use std::collections::{HashMap, HashSet};\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse powdr_number::FieldElement;\n\nuse crate::range_constraint::RangeConstraint;\nuse crate::rule_based_optimizer::new_var_generator::NewVarRequest;\nuse crate::{\n    algebraic_constraint::AlgebraicConstraint,\n    constraint_system::{\n        BusInteraction, BusInteractionHandler, ComputationMethod, ConstraintSystem, DerivedVariable,\n    },\n    grouped_expression::{GroupedExpression, RangeConstraintProvider},\n    indexed_constraint_system::IndexedConstraintSystem,\n    inliner::DegreeBound,\n    rule_based_optimizer::{\n        environment::Environment,\n        item_db::ItemDB,\n        new_var_generator::NewVarGenerator,\n        rules,\n        types::{Action, Expr, Var},\n    },\n    runtime_constant::VarTransformable,\n};\n\npub type VariableAssignment<T, V> = (V, GroupedExpression<T, V>);\n\n/// Perform rule-based optimization on the given constraint system. Returns the modified\n/// system and a list of variable assignments that were made during the optimization.\n/// The rules can also alter algebraic constraints and bus interactions, those alterations\n/// will not be visible in the list of substitutions.\n///\n/// If a degree bound is NOT given, then the degrees of the returned system will not increase.\n/// If it is given, then the degrees may increase, but will stay within the bound.\n///\n/// The function `new_var` can be used to generate a fresh variable, each call should\n/// return a fresh variable and the parameter can be used as a name suggestion.\npub fn rule_based_optimization<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    mut system: IndexedConstraintSystem<T, V>,\n    range_constraints: impl RangeConstraintProvider<T, V>,\n    bus_interaction_handler: impl BusInteractionHandler<T> + Clone,\n    new_var: &mut impl FnMut(&str) -> V,\n    degree_bound: Option<DegreeBound>,\n) -> (IndexedConstraintSystem<T, V>, Vec<VariableAssignment<T, V>>) {\n    let mut assignments = vec![];\n    let mut var_mapper = system\n        .referenced_unknown_variables()\n        .cloned()\n        // Sorting is important here so that the order for V translates\n        // to the same order on Var.\n        .sorted()\n        .collect::<ItemDB<V, Var>>();\n\n    // The expression database will be used to map expressions and their IDs.\n    // New expressions are created during rule execution and thus new IDs need\n    // to be allocated. Because of lifetime issues, we pass it into\n    // `env` and extract it again after the rules have run.\n    let mut expr_db = Some(ItemDB::<GroupedExpression<T, Var>, Expr>::default());\n\n    let mut range_constraints_on_vars: HashMap<Var, RangeConstraint<T>> = system\n        .referenced_unknown_variables()\n        .map(|v| (var_mapper.id(v), range_constraints.get(v)))\n        .filter(|(_, rc)| !rc.is_unconstrained())\n        .collect();\n\n    loop {\n        // Transform the constraint system into a simpler representation\n        // using IDs for variables and expressions.\n        let (algebraic_constraints, bus_interactions) =\n            transform_constraint_system(&system, &var_mapper, expr_db.as_mut().unwrap());\n\n        let duplicate_vars = system\n            .referenced_unknown_variables()\n            .map(|v| var_mapper.id(v))\n            .duplicates()\n            .collect::<HashSet<_>>();\n        let single_occurrence_vars = system\n            .referenced_unknown_variables()\n            .map(|v| var_mapper.id(v))\n            .collect::<HashSet<_>>()\n            .difference(&duplicate_vars)\n            .copied()\n            .collect::<HashSet<_>>();\n\n        // Create the \"environment\" singleton that can be used by the rules\n        // to query information from the outside world.\n        let env = Environment::<T>::new(\n            expr_db.take().unwrap(),\n            var_mapper\n                .iter()\n                .map(|(id, var)| (id, var.to_string()))\n                .collect(),\n            single_occurrence_vars,\n            // The NewVarGenerator will be used to generate fresh variables.\n            // because of lifetime and determinism issues, we pass the next ID that\n            // the var_mapper would use here and then re-create the\n            // variables in a deterministic sequence further down.\n            NewVarGenerator::new(var_mapper.next_free_id()),\n        );\n\n        // Create the rule system and populate it with the initial facts.\n        let mut rt = rules::Crepe::default();\n\n        // It would be better to handle bus interactions inside the rule system,\n        // but it is difficult because of the vector and the combinatorial\n        // explosion of the range constraints, so we just determine the range constraints\n        // on the bus interaction fields now.\n        rt.extend(\n            system\n                .bus_interactions()\n                .iter()\n                .zip(bus_interactions)\n                .flat_map(|(bus_inter, bus_inter_transformed)| {\n                    let updated_rcs = bus_interaction_handler\n                        .handle_bus_interaction(bus_inter.to_range_constraints(&range_constraints))\n                        .fields()\n                        .cloned()\n                        .collect_vec();\n                    bus_inter_transformed\n                        .fields()\n                        .cloned()\n                        .zip(updated_rcs)\n                        .collect_vec()\n                })\n                .filter(|(_, rc)| !rc.is_unconstrained())\n                .into_grouping_map()\n                .reduce(|rc1, _, rc2| rc1.conjunction(&rc2))\n                .into_iter()\n                .map(|(e, rc)| rules::InitialRangeConstraintOnExpression(e, rc)),\n        );\n        rt.extend(\n            range_constraints_on_vars\n                .iter()\n                .map(|(var, rc)| rules::RangeConstraintOnVar(*var, *rc)),\n        );\n        rt.extend(\n            algebraic_constraints\n                .iter()\n                .copied()\n                .map(rules::InitialAlgebraicConstraint),\n        );\n        rt.extend(std::iter::once(rules::Env(&env)));\n\n        // Uncomment this to get a runtime profile of the individual\n        // rules.\n        // let ((actions, large_actions), profile) = rt.run_with_profiling();\n        // profile.report();\n        let (actions, large_actions) = rt.run();\n        let (expr_db_, new_var_generator) = env.terminate();\n\n        let mut progress = false;\n        // Try to execute the actions that were determined by the rules.\n        // Since the rules are \"non-deterministic\", some actions might conflict\n        // (imagine x := 7, x := y and y := 7, they are all consistent but\n        // some will fail depending on the order in which they are applied).\n        // We try to ensure that at least the outcome is deterministic by\n        // sorting the actions.\n\n        // Collect replacement actions to process them in batch\n        let mut replacement_actions = Vec::new();\n\n        // Data structure to determine and record the final deterministic IDs of new variables\n        let mut new_vars = new_var_generator.requests();\n\n        for action in actions.into_iter().map(|a| a.0).sorted() {\n            match action {\n                Action::UpdateRangeConstraintOnVar(var, rc) => {\n                    let existing_rc = range_constraints_on_vars\n                        .get(&var)\n                        .cloned()\n                        .unwrap_or_default();\n                    let new_rc = existing_rc.conjunction(&rc);\n                    if new_rc != existing_rc {\n                        if let Some(val) = new_rc.try_to_single_value() {\n                            system.substitute_by_known(&var_mapper[var], &val);\n                            assignments.push((\n                                var_mapper[var].clone(),\n                                GroupedExpression::from_number(val),\n                            ));\n                        } else {\n                            range_constraints_on_vars.insert(var, new_rc);\n                        }\n                        progress = true;\n                    }\n                }\n                Action::SubstituteVariableByConstant(var, val) => {\n                    system.substitute_by_known(&var_mapper[var], &val);\n                    assignments\n                        .push((var_mapper[var].clone(), GroupedExpression::from_number(val)));\n                    progress = true;\n                }\n                Action::SubstituteVariableByVariable(v1, v2) => {\n                    assignments.push((\n                        var_mapper[v1].clone(),\n                        GroupedExpression::from_unknown_variable(var_mapper[v2].clone()),\n                    ));\n                    system.substitute_by_unknown(\n                        &var_mapper[v1],\n                        &GroupedExpression::from_unknown_variable(var_mapper[v2].clone()),\n                    );\n                    progress = true;\n                }\n                Action::ReplaceAlgebraicConstraintBy(e1, replacement) => {\n                    replacement_actions.push(ReplacementAction::new(\n                        [e1],\n                        [replacement],\n                        &mut |e| {\n                            undo_variable_transform_and_recreate_new_variables(\n                                &expr_db_[e],\n                                &mut var_mapper,\n                                &mut new_vars,\n                                &mut system,\n                                new_var,\n                            )\n                        },\n                    ));\n                }\n            }\n        }\n        for action in large_actions.into_iter().map(|a| a.0).sorted() {\n            replacement_actions.push(ReplacementAction::new(\n                action.to_replace.iter().flatten().copied(),\n                action.replace_by.iter().flatten().copied(),\n                &mut |e| {\n                    undo_variable_transform_and_recreate_new_variables(\n                        &expr_db_[e],\n                        &mut var_mapper,\n                        &mut new_vars,\n                        &mut system,\n                        new_var,\n                    )\n                },\n            ));\n        }\n\n        replacement_actions.sort();\n        progress |=\n            batch_replace_algebraic_constraints(&mut system, replacement_actions, degree_bound);\n\n        if !progress {\n            break;\n        }\n        expr_db = Some(expr_db_);\n    }\n    system.retain_algebraic_constraints(|c| !c.is_redundant());\n    (system, assignments)\n}\n\n/// Mainly transforms a `GroupedExpression<T, Var>` back into a `GroupedExpression<T, V>`, but also re-creates\n/// any variables that were newly generated inside the expression and adds potential computation methods\n/// to the constraint system.\n/// This is needed in order to ensure a deterministic creation order for new variables.\nfn undo_variable_transform_and_recreate_new_variables<\n    T: FieldElement,\n    V: Hash + Eq + Ord + Clone + Display,\n>(\n    expr: &GroupedExpression<T, Var>,\n    var_mapper: &mut ItemDB<V, Var>,\n    new_vars: &mut HashMap<Var, NewVarRequest<T>>,\n    system: &mut IndexedConstraintSystem<T, V>,\n    new_var_callback: &mut impl FnMut(&str) -> V,\n) -> GroupedExpression<T, V> {\n    expr.transform_var_type(&mut |v| {\n        let v = if let Some(request) = &mut new_vars.get_mut(v) {\n            if request.final_id.is_none() {\n                // We have not assigned a final ID yet, request a new variable from the global\n                // callback and insert it into the variable ID database to get a new ID.\n                let v = new_var_callback(&request.prefix);\n                request.final_id = Some(var_mapper.insert(&v));\n                let computation_method = undo_variable_transform_in_computation_method(\n                    &request.computation_method,\n                    var_mapper,\n                );\n                system.extend(ConstraintSystem {\n                    derived_variables: vec![DerivedVariable {\n                        variable: v.clone(),\n                        computation_method,\n                    }],\n                    ..Default::default()\n                });\n            }\n            request.final_id.unwrap()\n        } else {\n            *v\n        };\n        var_mapper[v].clone()\n    })\n}\n\n/// A single replacement operation: replace `replace` constraints with `replace_by` constraints.\n#[derive(PartialEq, Eq, PartialOrd, Ord)]\npub(crate) struct ReplacementAction<T, V> {\n    /// Constraints to be replaced.\n    pub(crate) replace: Vec<GroupedExpression<T, V>>,\n    /// Replacement constraints.\n    pub(crate) replace_by: Vec<GroupedExpression<T, V>>,\n}\n\nimpl<T: FieldElement, V: Hash + Eq + Ord + Clone + Display> ReplacementAction<T, V> {\n    /// Creates a new ReplacementAction from expression IDs, performing variable transformation.\n    fn new(\n        replace: impl IntoIterator<Item = Expr>,\n        replace_by: impl IntoIterator<Item = Expr>,\n        mut transform: &mut impl FnMut(Expr) -> GroupedExpression<T, V>,\n    ) -> Self {\n        let replace = replace.into_iter().map(&mut transform).collect_vec();\n        let replace_by = replace_by.into_iter().map(&mut transform).collect_vec();\n        Self {\n            replace,\n            replace_by,\n        }\n    }\n}\n\n/// Checks if a replacement action satisfies the degree bound constraints.\n/// Returns true if the replacement is allowed, false otherwise.\n///\n/// If degree_bound is None, the replacement is only allowed if the degree does not increase.\n/// If degree_bound is Some(bound), the replacement is allowed if the new degree stays within the bound.\nfn is_replacement_within_degree_bound<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    replacement: &ReplacementAction<T, V>,\n    degree_bound: Option<DegreeBound>,\n) -> bool {\n    let max_old_degree = replacement\n        .replace\n        .iter()\n        .map(|e| e.degree())\n        .max()\n        .unwrap_or(0);\n    let max_new_degree = replacement\n        .replace_by\n        .iter()\n        .map(|e| e.degree())\n        .max()\n        .unwrap_or(0);\n\n    // Check if the degree increase is acceptable\n    let degree_increase = max_new_degree > max_old_degree;\n    match degree_bound {\n        None => !degree_increase,\n        Some(bound) => max_new_degree <= bound.identities,\n    }\n}\n\n/// Batch replaces multiple sets of algebraic constraints in a single pass through the constraint system.\n/// Returns true if at least one replacement was successful.\n///\n/// If degree_bound is None, replacements are only done if the degree does not increase.\n/// If degree_bound is Some(bound), replacements are only done if the degree stays within the bound.\n///\n/// Consults the `new_var_generator` and re-assigns the IDs of all generated variables such that they\n/// are deterministically generated.\npub(crate) fn batch_replace_algebraic_constraints<\n    T: FieldElement,\n    V: Hash + Eq + Ord + Clone + Display,\n>(\n    system: &mut IndexedConstraintSystem<T, V>,\n    replacements: Vec<ReplacementAction<T, V>>,\n    degree_bound: Option<DegreeBound>,\n) -> bool {\n    // Filter out replacements that violate degree bounds\n    // and also filter out duplicate left hand sides.\n    let valid_replacements: Vec<_> = replacements\n        .into_iter()\n        .filter(|replacement| {\n            let within_bound = is_replacement_within_degree_bound(replacement, degree_bound);\n            if !within_bound {\n                log::debug!(\n                    \"Skipping replacement of {} by {} due to degree constraints.\",\n                    replacement.replace.iter().format(\", \"),\n                    replacement.replace_by.iter().format(\", \")\n                );\n            }\n            within_bound\n        })\n        .map(|replacement| ReplacementAction {\n            replace: replacement.replace.into_iter().unique().collect(),\n            replace_by: replacement.replace_by,\n        })\n        .collect();\n\n    // Build a map from constraints to search for to their index in the replacement list.\n    // Note that the same expression can be present in multiple lists!\n    let replace_to_index: HashMap<&GroupedExpression<T, V>, Vec<usize>> = valid_replacements\n        .iter()\n        .enumerate()\n        .flat_map(|(i, r)| r.replace.iter().map(move |e| (e, i)))\n        .into_group_map();\n\n    // Compute which of the expressions to search for have been found for each replacement action.\n    let mut replacement_found: Vec<HashSet<&GroupedExpression<T, V>>> =\n        vec![Default::default(); valid_replacements.len()];\n\n    for constraint in system.algebraic_constraints() {\n        if let Some(replacement_indices) = replace_to_index.get(&constraint.expression) {\n            for &i in replacement_indices {\n                replacement_found[i].insert(&constraint.expression);\n            }\n        }\n    }\n\n    let mut constraints_to_remove: HashSet<&GroupedExpression<T, V>> = HashSet::new();\n    let mut replacement_constraints = Vec::new();\n\n    for (index, replacement) in valid_replacements.iter().enumerate() {\n        if replacement_found[index].len() != replacement.replace.len() {\n            log::debug!(\n                \"Incomplete replacement: wanted to replace {} but found only {}/{} constraints in the system.\",\n                replacement.replace.iter().format(\", \"),\n                replacement_found[index].len(),\n                replacement.replace.len()\n            );\n            continue;\n        }\n\n        // Check if any of this replacement's constraints to replace have already been claimed\n        let has_conflict = replacement\n            .replace\n            .iter()\n            .any(|replace_expr| constraints_to_remove.contains(replace_expr));\n\n        if has_conflict {\n            log::debug!(\n                \"Skipping replacement of {} due to conflict with earlier replacement.\",\n                replacement.replace.iter().format(\", \")\n            );\n        } else {\n            // No conflict, this replacement can proceed\n            constraints_to_remove.extend(replacement.replace.iter());\n            replacement_constraints.extend(replacement.replace_by.iter().cloned());\n        }\n    }\n\n    if constraints_to_remove.is_empty() {\n        // All replacements were skipped due to conflicts\n        return false;\n    }\n\n    // Remove old constraints and add new ones\n    system.retain_algebraic_constraints(|c| !constraints_to_remove.contains(&c.expression));\n    system.add_algebraic_constraints(\n        replacement_constraints\n            .into_iter()\n            .map(AlgebraicConstraint::assert_zero),\n    );\n\n    true\n}\n\n/// Transform the constraint system such that variables and expressions are\n/// assigned IDs.\nfn transform_constraint_system<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    system: &IndexedConstraintSystem<T, V>,\n    var_mapper: &ItemDB<V, Var>,\n    expression_db: &mut ItemDB<GroupedExpression<T, Var>, Expr>,\n) -> (Vec<Expr>, Vec<BusInteraction<Expr>>) {\n    let algebraic_constraints = system\n        .system()\n        .algebraic_constraints\n        .iter()\n        .map(|c| transform_variables(&c.expression, var_mapper))\n        .map(|e| expression_db.insert_owned(e))\n        .collect_vec();\n    let bus_interactions: Vec<BusInteraction<Expr>> = system\n        .system()\n        .bus_interactions\n        .iter()\n        .map(|bus_inter| {\n            bus_inter\n                .fields()\n                .map(|f| transform_variables(f, var_mapper))\n                .map(|e| expression_db.insert_owned(e))\n                .collect()\n        })\n        .collect_vec();\n    (algebraic_constraints, bus_interactions)\n}\n\n/// Transform the variable type in the expression to use `Var` instead of `V`.\nfn transform_variables<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    expr: &GroupedExpression<T, V>,\n    var_mapper: &ItemDB<V, Var>,\n) -> GroupedExpression<T, Var> {\n    expr.transform_var_type(&mut |v| var_mapper.id(v))\n}\n\n/// Undo the effect of `transform_variables`, transforming from `Var` back to `V`.\nfn undo_variable_transform<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    expr: &GroupedExpression<T, Var>,\n    var_mapper: &ItemDB<V, Var>,\n) -> GroupedExpression<T, V> {\n    expr.transform_var_type(&mut |v| var_mapper[*v].clone())\n}\n\n/// Undo the effect of `transform_variables` on a computation method.\nfn undo_variable_transform_in_computation_method<\n    T: FieldElement,\n    V: Hash + Eq + Ord + Clone + Display,\n>(\n    method: &ComputationMethod<T, GroupedExpression<T, Var>>,\n    var_mapper: &ItemDB<V, Var>,\n) -> ComputationMethod<T, GroupedExpression<T, V>> {\n    match method {\n        ComputationMethod::Constant(c) => ComputationMethod::Constant(*c),\n        ComputationMethod::QuotientOrZero(numerator, denominator) => {\n            ComputationMethod::QuotientOrZero(\n                undo_variable_transform(numerator, var_mapper),\n                undo_variable_transform(denominator, var_mapper),\n            )\n        }\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/environment.rs",
    "content": "use std::{\n    cell::RefCell,\n    collections::{HashMap, HashSet},\n    hash::Hash,\n};\n\nuse itertools::{EitherOrBoth, Itertools};\nuse powdr_number::FieldElement;\n\nuse crate::{\n    constraint_system::ComputationMethod,\n    grouped_expression::GroupedExpression,\n    rule_based_optimizer::{\n        item_db::ItemDB,\n        new_var_generator::NewVarGenerator,\n        types::{Expr, Var},\n    },\n    runtime_constant::VarTransformable,\n};\n\n/// The Environment in the main method to access information about\n/// the constraint system. It allows rules to translate\n/// the opaque Expr identifiers into GroupedExpressions and perform\n/// actions on them.\n/// It is available to the rules as a singleton with interior mutability.\npub struct Environment<T: FieldElement> {\n    expressions: RefCell<ItemDB<GroupedExpression<T, Var>, Expr>>,\n    var_to_string: HashMap<Var, String>,\n\n    /// Variables that only occurr once in the system\n    /// (also only once in the constraint they occur in).\n    single_occurrence_variables: HashSet<Var>,\n    new_var_generator: RefCell<NewVarGenerator<T>>,\n}\n\nimpl<T: FieldElement> PartialEq for Environment<T> {\n    fn eq(&self, _other: &Self) -> bool {\n        // Environment is a singleton.\n        true\n    }\n}\n\nimpl<T: FieldElement> Eq for Environment<T> {}\n\nimpl<T: FieldElement> PartialOrd for Environment<T> {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        // Environment is a singleton.\n        Some(self.cmp(other))\n    }\n}\n\nimpl<T: FieldElement> Ord for Environment<T> {\n    fn cmp(&self, _other: &Self) -> std::cmp::Ordering {\n        // Environment is a singleton.\n        std::cmp::Ordering::Equal\n    }\n}\n\nimpl<T: FieldElement> Hash for Environment<T> {\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        // Environment is a singleton.\n        0.hash(state);\n    }\n}\n\nimpl<T: FieldElement> Environment<T> {\n    pub fn new(\n        expressions: ItemDB<GroupedExpression<T, Var>, Expr>,\n        var_to_string: HashMap<Var, String>,\n        single_occurrence_variables: HashSet<Var>,\n        new_var_generator: NewVarGenerator<T>,\n    ) -> Self {\n        Self {\n            expressions: RefCell::new(expressions),\n            var_to_string,\n            single_occurrence_variables,\n            new_var_generator: RefCell::new(new_var_generator),\n        }\n    }\n\n    /// Re-extract re-usable components after the rules have run.\n    pub fn terminate(self) -> (ItemDB<GroupedExpression<T, Var>, Expr>, NewVarGenerator<T>) {\n        (\n            self.expressions.into_inner(),\n            self.new_var_generator.into_inner(),\n        )\n    }\n\n    /// Turns a GroupedExpression into the corresponding Expr,\n    /// allocating a new ID if it is not yet present.\n    /// Use this function when you only have a reference to the expression.\n    pub fn insert(&self, expr: &GroupedExpression<T, Var>) -> Expr {\n        self.expressions.borrow_mut().insert(expr)\n    }\n\n    /// Turns a GroupedExpression into the corresponding Expr,\n    /// allocating a new ID if it is not yet present.\n    /// Use this function instead of `insert` when you own the expression.    \n    #[allow(dead_code)]\n    pub fn insert_owned(&self, expr: GroupedExpression<T, Var>) -> Expr {\n        self.expressions.borrow_mut().insert_owned(expr)\n    }\n\n    /// Turns an Expr into an owned GroupedExpression.\n    /// This is expensive since it clones the expression.\n    pub fn extract(&self, expr: Expr) -> GroupedExpression<T, Var> {\n        self.expressions.borrow()[expr].clone()\n    }\n\n    pub fn new_var(\n        &self,\n        prefix: &str,\n        method: ComputationMethod<T, GroupedExpression<T, Var>>,\n    ) -> Var {\n        self.new_var_generator.borrow_mut().generate(prefix, method)\n    }\n\n    pub fn single_occurrence_variables(&self) -> impl Iterator<Item = &Var> {\n        self.single_occurrence_variables.iter()\n    }\n\n    /// Split Expr into head and tail, i.e., expr = head + tail\n    pub fn try_split_into_head_tail(&self, expr: Expr) -> Option<(Expr, Expr)> {\n        let db = self.expressions.borrow();\n        let expr = db[expr].clone();\n        drop(db);\n        let (head, tail) = expr.try_split_head_tail()?;\n        Some((self.insert_owned(head), self.insert_owned(tail)))\n    }\n\n    #[allow(dead_code)]\n    /// If this returns Some(n) then the expression is affine\n    /// and contains n variables.\n    pub fn affine_var_count(&self, expr: Expr) -> Option<usize> {\n        let db = self.expressions.borrow();\n        let expr = &db[expr];\n        expr.is_affine().then(|| expr.linear_components().len())\n    }\n\n    /// If this returns Some((coeff, var, offset)) then the expression is affine\n    /// and equals `coeff * var + offset`.\n    pub fn try_to_affine(&self, expr: Expr) -> Option<(T, Var, T)> {\n        let db = self.expressions.borrow();\n        let expr = &db[expr];\n        if !expr.is_affine() {\n            return None;\n        }\n        let (var, coeff) = expr.linear_components().exactly_one().ok()?;\n        Some((*coeff, *var, *expr.constant_offset()))\n    }\n\n    pub fn try_to_number(&self, expr: Expr) -> Option<T> {\n        let db = self.expressions.borrow();\n        let expr = &db[expr];\n        expr.try_to_number()\n    }\n\n    /// Runs the function `f` on the expression identified by `expr`,\n    /// passing `args` as additional arguments.\n    /// This function is needed because we cannot return\n    /// references to GroupedExpression due to the interior mutability.\n    pub fn on_expr<Args, Ret>(\n        &self,\n        expr: Expr,\n        args: Args,\n        f: impl Fn(&GroupedExpression<T, Var>, Args) -> Ret,\n    ) -> Ret {\n        let db = self.expressions.borrow();\n        let expr = &db[expr];\n        f(expr, args)\n    }\n\n    /// If this returns Some(e1, e2) then the expression equals e1 * e2.\n    pub fn try_as_single_product(&self, expr: Expr) -> Option<(Expr, Expr)> {\n        let (l, r) = {\n            let db = self.expressions.borrow();\n            let (l, r) = db[expr].try_as_single_product()?;\n            (l.clone(), r.clone())\n        };\n        // TODO eventually, l and r are cloned.\n        // if we change GroupedExpression to use `Expr` for the recursion, we do not\n        // have to insert everything multiple times.\n        Some((self.insert(&l), self.insert(&r)))\n    }\n\n    /// If this returns Some((v1, v2, factor)), then\n    /// a is obtained from b * factor by substituting v2 by v1.\n    pub fn differ_in_exactly_one_variable(&self, a_id: Expr, b_id: Expr) -> Option<(Var, Var, T)> {\n        let db = self.expressions.borrow();\n        let a = &db[a_id];\n        let b = &db[b_id];\n        if !a.is_affine()\n            || !b.is_affine()\n            || a.linear_components().len() != b.linear_components().len()\n            || a.linear_components().len() < 2\n        {\n            return None;\n        }\n        // First find the variables, ignoring the coefficients.\n        let (v1, v2) = a\n            .linear_components()\n            .merge_join_by(b.linear_components(), |(v1, _), (v2, _)| v1.cmp(v2))\n            .filter(|either| !matches!(either, EitherOrBoth::Both(_, _)))\n            .collect_tuple()?;\n        let (left_var, right_var, factor) = match (v1, v2) {\n            (EitherOrBoth::Left((lv, lc)), EitherOrBoth::Right((rv, rc)))\n            | (EitherOrBoth::Right((rv, rc)), EitherOrBoth::Left((lv, lc))) => {\n                (*lv, *rv, *lc / *rc)\n            }\n            _ => return None,\n        };\n        // Now verify that the other coefficients agree with the factor\n        if *a.constant_offset() != *b.constant_offset() * factor {\n            return None;\n        }\n        if !a\n            .linear_components()\n            .filter(|(v, _)| **v != left_var)\n            .map(|(_, c)| *c)\n            .eq(b\n                .linear_components()\n                .filter(|(v, _)| **v != right_var)\n                .map(|(_, bc)| *bc * factor))\n        {\n            return None;\n        }\n\n        Some((left_var, right_var, factor))\n    }\n\n    /// Substitutes the variable `var` by the constant `value` in the expression `e`\n    /// and returns the resulting expression.\n    #[allow(dead_code)]\n    pub fn substitute_by_known(&self, e: Expr, var: Var, value: T) -> Expr {\n        let expr = {\n            let db = self.expressions.borrow();\n            let mut expr = db[e].clone();\n            // expr.substitute_by_known(&var, &value);\n            expr.substitute_simple(&var, value);\n            expr\n        };\n        self.insert_owned(expr)\n    }\n\n    /// Substitutes the variable `var` by the variable `replacement` in the expression `e`\n    /// and returns the resulting expression.\n    #[allow(dead_code)]\n    pub fn substitute_by_var(&self, e: Expr, var: Var, replacement: Var) -> Expr {\n        let expr = {\n            let db = self.expressions.borrow();\n            let mut expr = db[e].clone();\n            expr.substitute_by_unknown(\n                &var,\n                &GroupedExpression::from_unknown_variable(replacement),\n            );\n            expr\n        };\n        self.insert_owned(expr)\n    }\n\n    #[allow(dead_code)]\n    pub fn format_expr(&self, expr: Expr) -> String {\n        let db = self.expressions.borrow();\n        db[expr]\n            .transform_var_type(&mut |v| self.format_var(*v))\n            .to_string()\n    }\n\n    #[allow(dead_code)]\n    pub fn format_var(&self, var: Var) -> String {\n        self.var_to_string\n            .get(&var)\n            .cloned()\n            .unwrap_or_else(|| var.to_string())\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/item_db.rs",
    "content": "use std::collections::HashMap;\nuse std::hash::Hash;\nuse std::ops::Index;\n\nuse derivative::Derivative;\n\n/// A database of items that are assigned consecutive identifiers\n/// and which can translate back and forth between identifiers\n/// and items.\n\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct ItemDB<Item, Ident> {\n    items: Vec<Item>,\n    reverse: HashMap<Item, usize>,\n    _phantom: std::marker::PhantomData<Ident>,\n}\n\nimpl<Item, Ident> FromIterator<Item> for ItemDB<Item, Ident>\nwhere\n    Item: Clone + Hash + Eq,\n{\n    fn from_iter<T: IntoIterator<Item = Item>>(iter: T) -> Self {\n        let items = iter.into_iter().collect::<Vec<_>>();\n        let reverse = items\n            .iter()\n            .enumerate()\n            .map(|(i, v)| (v.clone(), i))\n            .collect();\n        Self {\n            items,\n            reverse,\n            _phantom: std::marker::PhantomData,\n        }\n    }\n}\n\nimpl<Item, Ident> Index<Ident> for ItemDB<Item, Ident>\nwhere\n    Ident: Into<usize>,\n{\n    type Output = Item;\n    fn index(&self, index: Ident) -> &Self::Output {\n        &self.items[index.into()]\n    }\n}\n\nimpl<Item, Ident> ItemDB<Item, Ident>\nwhere\n    Item: Clone + Hash + Eq,\n    Ident: From<usize> + Copy,\n{\n    fn insert_owned_new(&mut self, item: Item) -> Ident {\n        let id = self.items.len();\n        self.items.push(item.clone());\n        self.reverse.insert(item, id);\n        Ident::from(id)\n    }\n\n    /// Inserts the item if not already present, returning its identifier.\n    /// Use this function over `insert_owned` when you only have a\n    /// reference to the item.\n    pub fn insert(&mut self, item: &Item) -> Ident {\n        if let Some(&id) = self.reverse.get(item) {\n            Ident::from(id)\n        } else {\n            self.insert_owned_new(item.clone())\n        }\n    }\n\n    /// Inserts the item if not already present, returning its identifier.\n    /// Use this function over `insert` when you have ownership of the item.\n    pub fn insert_owned(&mut self, item: Item) -> Ident {\n        if let Some(&id) = self.reverse.get(&item) {\n            Ident::from(id)\n        } else {\n            self.insert_owned_new(item)\n        }\n    }\n\n    pub fn id(&self, item: &Item) -> Ident {\n        self.reverse.get(item).map(|&id| Ident::from(id)).unwrap()\n    }\n\n    pub fn iter(&self) -> impl Iterator<Item = (Ident, &Item)> {\n        self.items\n            .iter()\n            .enumerate()\n            .map(|(i, item)| (Ident::from(i), item))\n    }\n\n    // TODO avoid using this (as pub)\n    pub fn next_free_id(&self) -> usize {\n        self.items.len()\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/mod.rs",
    "content": "mod driver;\nmod environment;\nmod item_db;\nmod new_var_generator;\nmod rules;\nmod types;\n\n#[cfg(test)]\nmod tests;\n\npub use driver::rule_based_optimization;\npub use driver::VariableAssignment;\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/new_var_generator.rs",
    "content": "use std::collections::HashMap;\n\nuse crate::{\n    constraint_system::ComputationMethod, grouped_expression::GroupedExpression,\n    rule_based_optimizer::types::Var,\n};\n\n/// A request for a new variable from the rule system. The variable will be assigned a tentative ID and name\n/// generated from the prefix. Both the ID and the name will be re-generated when the replacements are processed.\npub struct NewVarRequest<T> {\n    /// The final ID computed when the replacements are processed.\n    pub final_id: Option<Var>,\n    /// A prefix to be used for generating a descriptive name.\n    pub prefix: String,\n    /// The way to compute the variable during witness generation.\n    pub computation_method: ComputationMethod<T, GroupedExpression<T, Var>>,\n}\n\npub struct NewVarGenerator<T> {\n    counter: usize,\n    requests: HashMap<Var, NewVarRequest<T>>,\n}\n\nimpl<T> NewVarGenerator<T> {\n    pub fn new(initial_counter: usize) -> Self {\n        Self {\n            counter: initial_counter,\n            requests: Default::default(),\n        }\n    }\n\n    pub fn generate(\n        &mut self,\n        prefix: &str,\n        computation_method: ComputationMethod<T, GroupedExpression<T, Var>>,\n    ) -> Var {\n        let var = Var::from(self.counter);\n        self.requests.insert(\n            var,\n            NewVarRequest {\n                final_id: None,\n                prefix: prefix.to_string(),\n                computation_method,\n            },\n        );\n        self.counter += 1;\n        var\n    }\n\n    pub fn requests(self) -> HashMap<Var, NewVarRequest<T>> {\n        self.requests\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/rules.rs",
    "content": "#![allow(clippy::iter_over_hash_type)]\n// This is about a warning about interior mutability for the key\n// `Env`. We need it and it is probably fine.\n#![allow(clippy::mutable_key_type)]\n\nuse crepe::crepe;\nuse itertools::Itertools;\nuse num_traits::One;\nuse powdr_number::FieldElement;\n\nuse crate::{\n    constraint_system::ComputationMethod,\n    grouped_expression::{GroupedExpression, GroupedExpressionComponent},\n    range_constraint::RangeConstraint,\n    rule_based_optimizer::{\n        environment::Environment,\n        types::{Action, Expr, ReplaceConstraintsAction, Var},\n    },\n};\n\n// This file contains the set of datalog rules executed on the constraint system.\n// Facts/relations will be produced according to the rules from existing\n// facts until a fixed point is reached.\n// Facts marked by `@input` are provided as input to the rule engine,\n// and cannot be derived/extended by the rules.\n// Facts marked by `@output` are collected as output from the rules engine.\n// The only output is a set of Action rules to be applied to the constraint system.\n// Substitutions performed on constraints inside the rule system are not\n// automatically reflected in the constraint system to be optimized.\n//\n// The conditions of the rules are looped over / checked in the order in which they are\n// written. If all of them match, the \"head\" of the rule is executed and a new\n// fact is inserted into the database.\n// If non-trivial rust code is used as a condition, it is advisable to end the rule\n// after that condition and create a new \"intermediate\" fact for performance reasons.\n//\n// Since all rules are executed as long as they match, it is not possible to restrict\n// or somehow direct the fact derivation process. For example, if a variable replacement\n// is derived, new algebraic constraints will be created, but this does not mean that\n// the old constraints are removed. If we have a constraint that has many variables\n// and all of them are determined to be constant by other constraints, then the\n// derivation process will create all possible combinations of substitutions.\n// The same is true for range constraints: If we have a rule that requires a\n// range constraint for a variable, it will iterate over all range constraints\n// that have been derived for that variable over the course of executing the rules,\n// not just the most strict one.\n\ncrepe! {\n    @input\n    pub struct Env<'a, T: FieldElement>(pub &'a Environment<T>);\n\n    @input\n    pub struct InitialAlgebraicConstraint(pub Expr);\n\n    @input\n    pub struct InitialRangeConstraintOnExpression<T: FieldElement>(pub Expr, pub RangeConstraint<T>);\n\n    @input\n    pub struct RangeConstraintOnVar<T: FieldElement>(pub Var, pub RangeConstraint<T>);\n\n    struct AlgebraicConstraint(Expr);\n    AlgebraicConstraint(e) <- InitialAlgebraicConstraint(e);\n\n    // This rule is important: Just because a rule \"generates\" an Expr it does not\n    // mean that it automatically is an Expression. If we want to say something\n    // about all Exprs, we have to make sure to \"obtain\" them from Expression.\n    struct Expression(Expr);\n    Expression(e) <- AlgebraicConstraint(e);\n    Expression(e) <- InitialRangeConstraintOnExpression(e, _);\n\n    // ReplaceAlgebraicConstraintBy(old_expr, new_expr) => old_expr can equivalently\n    // be replaced by new_expr (and new_expression is in some way \"simpler\").\n    struct ReplaceAlgebraicConstraintBy(Expr, Expr);\n\n    // ReplaceAlgebraicConstraintsBy(e1, e2) =>\n    // the system that does not have the constraints in `e1` but has\n    // the new constraints in `e2` is equivalent.\n    struct ReplaceAlgebraicConstraintsBy([Option<Expr>; 10], [Option<Expr>; 5]);\n\n    //////////////////// BASIC SEMANTIC PROPERTIES OF EXRESSIONS //////////////////////\n\n\n    // EqualZero(e) => e = 0 for all satisfying assignments.\n    struct EqualZero(Expr);\n    EqualZero(e) <- AlgebraicConstraint(e);\n\n    //////////////////// STRUCTURAL PROPERTIES OF EXPRESSIONS //////////////////////\n\n    // ContainsVariable(e, v) => v appears inside e.\n    struct ContainsVariable(Expr, Var);\n    ContainsVariable(e, v) <-\n      Env(env),\n      Expression(e),\n      for v in env.on_expr(e, (), |e, _| e.referenced_unknown_variables().cloned().collect_vec());\n\n    struct Product(Expr, Expr, Expr);\n    Product(e, l, r) <-\n      Expression(e),\n      Env(env),\n      let Some((l, r)) = env.try_as_single_product(e);\n    Product(e, r, l) <- Product(e, l, r);\n    Expression(e) <- Product(_, e, _);\n    Expression(e) <- Product(_, _, e);\n\n    // AffineExpression(e, coeff, var, offset) => e = coeff * var + offset\n    struct AffineExpression<T: FieldElement>(Expr, T, Var, T);\n    AffineExpression(e, coeff, var, offset) <-\n      Expression(e),\n      Env(env),\n      let Some((coeff, var, offset)) = env.try_to_affine(e);\n\n    struct LinearExpression<T: FieldElement>(Expr, Var, T);\n    LinearExpression(e, var, coeff) <-\n      AffineExpression(e, coeff, var, T::zero());\n\n    struct Constant<T: FieldElement>(Expr, T);\n    Constant(e, value) <-\n      Expression(e),\n      Env(env),\n      let Some(value) = env.try_to_number(e);\n\n    // Split the expression into head and tail\n    // ExpressionSumHeadTail(e, h, t) => e = h + t\n    struct ExpressionSumHeadTail(Expr, Expr, Expr);\n    ExpressionSumHeadTail(e, head, tail) <-\n      Env(env),\n      Expression(e),\n      let Some((head, tail)) = env.try_split_into_head_tail(e);\n    Expression(head) <- ExpressionSumHeadTail(_, head, _);\n    Expression(tail) <- ExpressionSumHeadTail(_, _, tail);\n\n    // SimpleSum(e, f, c) => e is of the form f * v_1 + f * v_2 + ... + f * v_n + c,\n    //                      n >= 1\n    struct SimpleSum<T: FieldElement>(Expr, T, T);\n    SimpleSum(e, f, c) <-\n      ExpressionSumHeadTail(e, head, tail),\n      SimpleSum(tail, f, c),\n      LinearExpression(head, _, f);\n    SimpleSum(e, f, c) <- AffineExpression(e, f, _, c);\n\n    // IsAffine(e) => e is an affine expression, i.e. does not have super-linear parts.\n    struct IsAffine(Expr);\n    IsAffine(e) <-\n      Constant(e, _);\n    IsAffine(e) <-\n      ExpressionSumHeadTail(e, head, tail),\n      LinearExpression(head, _, _),\n      IsAffine(tail);\n\n    // HasSummand(e, summand) => summand is one of the summands of e.\n    struct HasSummand(Expr, Expr);\n    HasSummand(e, summand) <- ExpressionSumHeadTail(e, summand, _);\n    HasSummand(e, summand) <-\n      ExpressionSumHeadTail(e, _, tail),\n      HasSummand(tail, summand);\n\n    // DifferBySummand(e1, e2, s) => e1 = e2 + s and `s` is not a sum\n    // and not a constant.\n    // Note that `e1` and `e2` must \"pre-exist\" as expressions, i.e.\n    // this rule cannot be used to split out a linear summand\n    // from an expression but only to \"compare\" two expressions.\n    struct DifferBySummand(Expr, Expr, Expr);\n    DifferBySummand(e1, e2, s) <-\n      ExpressionSumHeadTail(e1, s, e2);\n    DifferBySummand(e1, e2, s) <-\n      DifferBySummand(tail1, tail2, s),\n      ExpressionSumHeadTail(e1, head, tail1),\n      ExpressionSumHeadTail(e2, head, tail2);\n\n    // AffinelyRelated(e1, f, e2, c) => e1 = f * e2 + c\n    // Note this is currently only implemented for affine e1 and e2.\n    // This only works if e1 and e2 have at least one variable\n    // and both e1 and e2 have to \"pre-exist\" as expressions.\n    // This means this rule cannot be used to subtract constants\n    // or multiply/divide by constants alone.\n    struct AffinelyRelated<T: FieldElement>(Expr, T, Expr, T);\n    AffinelyRelated(e1, f, e2, o1 - o2 * f) <-\n      AffineExpression(e1, f1, v, o1), // e1 = f1 * v + o1\n      AffineExpression(e2, f2, v, o2),\n      // Optimization: Compute f1 / f2 only once.\n      let f = f1 / f2;\n      // e2 = f2 * v + o2\n      // e1 = f1 * (e2 - o2) / f2 + o1 = e2 * (f1 / f2) + (o1 - o2 * f1 / f2)\n\n    AffinelyRelated(e1, f, e2, o) <-\n      AffinelyRelated(tail1, f, tail2, o),\n      // The swapped case and the equal will be computed by other rules.\n      ExpressionSumHeadTail(e1, head1, tail1),\n      LinearExpression(head1, v, f1),\n      ExpressionSumHeadTail(e2, head2, tail2),\n      LinearExpression(head2, v, f1 / f);\n\n    // HasProductSummand(e, l, r) => e contains a summand of the form l * r\n    struct HasProductSummand(Expr, Expr, Expr);\n    HasProductSummand(e, l, r) <-\n      HasSummand(e, summand),\n      Product(summand, l, r);\n    HasProductSummand(e, r, l) <- HasProductSummand(e, l, r);\n\n    // ProductConstraint(e, l, r) => e is an algebraic constraint of the form l * r = 0\n    struct ProductConstraint(Expr, Expr, Expr);\n    ProductConstraint(e, l, r) <-\n      AlgebraicConstraint(e),\n      Product(e, l, r);\n\n    // BooleanAndSubsetOfVars(e1, e2) => e1 and e2 are affine expressions only containing boolean variables and\n    //   all variables in e1 also appear in e2\n    struct BooleanAndSubsetOfVars(Expr, Expr);\n    BooleanAndSubsetOfVars(e1, e2) <-\n      AffineExpression(e1, _, v, _),\n      ContainsVariable(e2, v),\n      AffineAndAllVarsBoolean(e1),\n      AffineAndAllVarsBoolean(e2);\n    BooleanAndSubsetOfVars(e1, e2) <-\n      BooleanAndSubsetOfVars(tail1, tail2),\n      ExpressionSumHeadTail(e1, head1, tail1),\n      LinearExpression(head1, v, _),\n      BooleanVar(v),\n      ExpressionSumHeadTail(e2, head2, tail2),\n      LinearExpression(head2, v, _);\n    BooleanAndSubsetOfVars(e1, e2) <-\n      BooleanAndSubsetOfVars(e1, tail2),\n      ExpressionSumHeadTail(e2, _, tail2),\n      AffineAndAllVarsBoolean(e2);\n\n    // AffineAndAllVarsBoolean(e) => e is an affine expression and all variables in e are boolean variables\n    struct AffineAndAllVarsBoolean(Expr);\n    AffineAndAllVarsBoolean(e) <-\n      AffineExpression(e, _, v, _),\n      BooleanVar(v);\n    AffineAndAllVarsBoolean(e) <-\n      ExpressionSumHeadTail(e, head, tail),\n      AffineAndAllVarsBoolean(head),\n      AffineAndAllVarsBoolean(tail);\n\n    //////////////////////// RANGE CONSTRAINTS //////////////////////////\n\n    // Range constraints are tricky because they can easily lead to exponential behaviour.\n    // Because of that, we should never update a range constraint on a variable\n    // and only compute range constraints on expressions from smaller expressions.\n\n    struct RangeConstraintOnExpression<T: FieldElement>(Expr, RangeConstraint<T>);\n    RangeConstraintOnExpression(e, rc) <-\n      InitialRangeConstraintOnExpression(e, rc);\n    RangeConstraintOnExpression(e, rc.square()) <-\n      Product(e, l, r),\n      (l == r),\n      RangeConstraintOnExpression(l, rc);\n    RangeConstraintOnExpression(e, l_rc.combine_product(&r_rc)) <-\n      Product(e, l, r),\n      (l < r),\n      RangeConstraintOnExpression(l, l_rc),\n      RangeConstraintOnExpression(r, r_rc);\n    RangeConstraintOnExpression(e, v_rc.multiple(coeff)) <-\n      LinearExpression(e, v, coeff),\n      RangeConstraintOnVar(v, v_rc);\n    RangeConstraintOnExpression(e, head_rc.combine_sum(&tail_rc)) <-\n      ExpressionSumHeadTail(e, head, tail),\n      RangeConstraintOnExpression(head, head_rc),\n      RangeConstraintOnExpression(tail, tail_rc);\n    RangeConstraintOnExpression(e, RangeConstraint::from_value(value)) <-\n      Constant(e, value);\n\n    // UpdateRangeConstraintOnVar(v, rc) => rc is a valid range constraint for variable v\n    // This is an output predicate and might cause the rule system to re-run if\n    // the range constraint is better than the currently best known.\n    // Please avoid deriving new range constraints directly since this can easily\n    // lead to exponential behaviour.\n    struct UpdateRangeConstraintOnVar<T: FieldElement>(Var, RangeConstraint<T>);\n    // RC(coeff * var + offset) = rc <=>\n    // coeff * RC(var) + offset = rc <=>\n    // RC(var) = (rc - offset) / coeff\n    UpdateRangeConstraintOnVar(v, rc.combine_sum(&RangeConstraint::from_value(-offset)).multiple(T::one() / coeff)) <-\n      RangeConstraintOnExpression(e, rc),\n      AffineExpression(e, coeff, v, offset),\n      (coeff != T::zero());\n\n    // This derives boolean constraints on variables from `v * (v - 1) = 0`,\n    // but also works with `v * (v - 8) = 0` or similar.\n    UpdateRangeConstraintOnVar(v, RangeConstraint::from_value(c1).disjunction(&RangeConstraint::from_value(c2))) <-\n      ProductConstraint(_, l, r),\n      (l < r),\n      Solvable(l, v, c1),\n      Solvable(r, v, c2);\n\n    // BooleanVar(v) => v is 0 or 1\n    struct BooleanVar(Var);\n    BooleanVar(v) <- RangeConstraintOnVar(v, RangeConstraint::from_mask(1));\n\n    // BooleanExpressionConstraint(constr, e) => if constr is satisfied then e = 1 or e = 0\n    struct BooleanExpressionConstraint(Expr, Expr);\n    BooleanExpressionConstraint(constr, r) <-\n      ProductConstraint(constr, l, r),\n      // l = f * r + c, i.e. constr = (f * r + c) * r = 0\n      // <=> (r + c / f) * r = 0\n      // i.e. c / f = -1 <=> c = -f\n      AffinelyRelated(l, f, r, c),\n      (c == -f);\n\n    //////////////////////// SINGLE-OCCURRENCE VARIABLES //////////////////////////\n\n    // Combine multiple variables that only occur in the same algebraic constraint.\n    //\n    // The use-case here is for \"diff_inv_marker_...\" variables that each are the\n    // inverse of certain variables only if those variables are non-zero\n    // (and arbitrary otherwise).\n    // If the \"diff_inv_marker_...\" variables only occur once, they are essentially\n    // \"free\" variables and under some conditions, we can combine them into a single\n    // free variable and thus reduce the number of variables.\n    //\n    // Assume we have an algebraic constraint of the form `X * V1 + Y * V2 = R`,\n    // where `V1` and `V2` only occur in this constraint and only once.\n    // The only combination of values for `X`, `Y` and `R` where this is _not_ satisfiable\n    // is `X = 0`, `Y = 0`, `R != 0`. So the constraint is equivalent to the statement\n    // `(X = 0 and Y = 0) -> R = 0`.\n    //\n    // Consider the simpler case where both `X` and `Y` are non-negative such that\n    // `X + Y` does not wrap.\n    // Then `X = 0 and Y = 0` is equivalent to `X + Y = 0`. So we can replace the constraint\n    // by `(X + Y) * V3 = C`, where `V3` is a new variable that only occurs here.\n    //\n    // For the general case, where e.g. `X` can be negative, we replace it by `X * X`,\n    // if that value is still small enough.\n    struct SingleOccurrenceVariable(Var);\n    SingleOccurrenceVariable(v) <-\n      Env(env),\n      for v in env.single_occurrence_variables().cloned();\n    // SingleOccurrenceVariable(e, v) => v occurs only once in e and e is the\n    // only constraint it appears in.\n    struct SingleOccurrenceVariableInExpr(Expr, Var);\n    SingleOccurrenceVariableInExpr(e, v) <-\n      SingleOccurrenceVariable(v),\n      ContainsVariable(e, v),\n      AlgebraicConstraint(e);\n\n    // LargestSingleOccurrenceVariablePairInExpr(e, v1, v2) =>\n    // v1 and v2 are different variables that only occur in e and only once,\n    // and are the two largest variables with that property in e.\n    struct LargestSingleOccurrenceVariablePairInExpr(Expr, Var, Var);\n    LargestSingleOccurrenceVariablePairInExpr(e, v1, v2) <-\n      Env(env),\n      SingleOccurrenceVariableInExpr(e, v1),\n      SingleOccurrenceVariableInExpr(e, v2),\n      (v1 < v2),\n      (env\n        .single_occurrence_variables()\n        .filter(|v3| env.on_expr(e, (), |e, _| {\n            e.referenced_unknown_variables().any(|v| v == *v3)\n        }))\n        .all(|&v3| v3 == v1 || v3 == v2 || v3 < v1));\n\n    // FreeVariableCombinationCandidate(e, coeff1, v1, coeff2, v2, x1, x2)\n    // => e is the expression of an algebraic constraint and\n    // e = coeff1 * v1 * x1 + coeff2 * v2 * x2 + ...\n    // where v1 and v2 are different variables that only occur here and only once.\n    struct FreeVariableCombinationCandidate<T: FieldElement>(Expr, T, Var, Expr, T, Var, Expr);\n    FreeVariableCombinationCandidate(e, coeff1, v1, x1, coeff2, v2, x2) <-\n      // If we only consider the largest variable pair we could miss optimization opportunities,\n      // but at least the replacement becomes deterministic.\n      LargestSingleOccurrenceVariablePairInExpr(e, v1, v2),\n      AlgebraicConstraint(e),\n      HasProductSummand(e, x1, v1_e),\n      LinearExpression(v1_e, v1, coeff1),\n      HasProductSummand(e, x2, v2_e),\n      (x2 != v1_e),\n      (x1 != v2_e),\n      LinearExpression(v2_e, v2, coeff2);\n\n    ReplaceAlgebraicConstraintBy(e, replacement) <-\n      Env(env),\n      FreeVariableCombinationCandidate(e, coeff1, v1, x1, coeff2, v2, x2),\n      // Here, we have e = coeff1 * v1 * x1 + coeff2 * v2 * x2 + ...\n      RangeConstraintOnExpression(x1, rc1),\n      RangeConstraintOnExpression(x2, rc2),\n      let Some(replacement) = (|| {\n        // If the expression is not known to be non-negative, we square it.\n        let square_if_needed = |expr: Expr, rc: RangeConstraint<T>| {\n            let expr = env.extract(expr);\n            if rc.range().0 == T::zero() {\n                (expr, rc)\n            } else {\n                (expr.clone() * expr, rc.square())\n            }\n        };\n        let (x1, rc1) = square_if_needed(x1, rc1);\n        let (x2, rc2) = square_if_needed(x2, rc2);\n        if !rc1.range().0.is_zero() || !rc2.range().0.is_zero() {\n            return None;\n        }\n        let sum_rc = rc1.multiple(coeff1).combine_sum(&rc2.multiple(coeff2));\n        if !(sum_rc.range().0.is_zero() && sum_rc.range().1 < T::from(-1)) {\n            return None;\n        }\n        // Remove the summands with v1 and v2 from the expression.\n        let r = env.extract(e).into_summands().filter(|s|{\n            if let GroupedExpressionComponent::Quadratic(l, r) = s {\n                let mut vars = l.referenced_unknown_variables().chain(r.referenced_unknown_variables());\n                if vars.any(|v| v == &v1 || v == &v2) {\n                    return false;\n                }\n            };\n            true\n        }).map(GroupedExpression::from).sum::<GroupedExpression<T, Var>>();\n        let factor = x1.clone() * coeff1 + x2.clone() * coeff2;\n        let combined_var = env.new_var(\"free_var\", ComputationMethod::QuotientOrZero(-r.clone(), factor.clone()));\n        let replacement = r + GroupedExpression::from_unknown_variable(combined_var) * factor;\n        Some(env.insert_owned(replacement))\n      })();\n\n    //////////////////// EQUAL ZERO TEST ////////////////////////\n\n    // PlusMinusResult(e, e1, v2) =>\n    //   e = e1 * (2 * v2 - 1)\n    struct PlusMinusResult(Expr, Expr, Var);\n    PlusMinusResult(e, e1, v2) <-\n      Product(e, e1, r),\n      AffineExpression(r, coeff, v2, offset),\n        (coeff == T::from(2)),\n        (offset == T::from(-1));\n\n    // DiffMarkerConstraint(e, diff_marker, e2, cmp_result, diff_val) =>\n    //   e = diff_marker * (e2 * (2 * cmp_result - 1) + diff_val)\n    // (up to a factor)\n    struct DiffMarkerConstraint(Expr, Var, Expr, Var, Var);\n    DiffMarkerConstraint(e, diff_marker, e2, cmp_result, diff_val) <-\n      ProductConstraint(e, l, r),\n      LinearExpression(l, diff_marker, _),\n      // Note: the quadratic part has to be the head\n      ExpressionSumHeadTail(r, r1, r2),\n        PlusMinusResult(r1, e2, cmp_result),\n        LinearExpression(r2, diff_val, _);\n\n    // NegatedDiffMarkerConstraint(e, diff_marker, diff_expr, v, result, n) =>\n    //   e is the constraint diff_marker_expr * (v * (2 * result - 1)) = 0\n    //   and diff_marker_expr is of the form `1 - diff_marker1 - diff_marker2 - ...`\n    //   such that we have n variables and there is another\n    //   NegatedDiffMarkerConstraint with n-1 variables used to derive this one.\n    struct NegatedDiffMarkerConstraint(Expr, Var, Expr, Var, Var, u32);\n    NegatedDiffMarkerConstraint(e, diff_marker, l, v, result, 0) <-\n      ProductConstraint(e, l, r),\n      AffineExpression(l, T::from(-1), diff_marker, T::from(1)),\n      PlusMinusResult(r, r2, result),\n      LinearExpression(r2, v, T::from(-1));\n    NegatedDiffMarkerConstraint(e, diff_marker, l, v, result, n + 1) <-\n      ProductConstraint(e, l, r),\n        NegatedDiffMarkerConstraint(_, _, diff_marker_expr2, _, result, n),\n        DifferBySummand(l, diff_marker_expr2, diff_marker_e),\n          LinearExpression(diff_marker_e, diff_marker, T::from(-1)),\n      PlusMinusResult(r, r2, result),\n      LinearExpression(r2, v, T::from(-1));\n\n    // NegatedDiffMarkerConstraintFinal(e, diff_marker, l, result, n) =>\n    //   e is the constraint diff_marker_expr * (result) = 0\n    //   and diff_marker_expr is of the form `1 - diff_marker1 - diff_marker2 - ...`\n    //   such that we have n variables and there is another\n    //   NegatedDiffMarkerConstraint with n-1 variables used to derive this one.\n    struct NegatedDiffMarkerConstraintFinal(Expr, Var, Expr, Var, u32);\n    NegatedDiffMarkerConstraintFinal(e, diff_marker, l, result, n + 1) <-\n      ProductConstraint(e, l, r),\n        NegatedDiffMarkerConstraint(_, _, diff_marker_expr2, _, result, n),\n        DifferBySummand(l, diff_marker_expr2, diff_marker_e),\n          LinearExpression(diff_marker_e, diff_marker, T::from(-1)),\n      LinearExpression(r, result, T::from(1));\n\n    struct NegatedDiffMarkerConstraintFinalNegated(Expr, Var, Var, Var, u32);\n    NegatedDiffMarkerConstraintFinalNegated(e, diff_marker, v, result, n + 1) <-\n      ProductConstraint(e, l, r),\n        NegatedDiffMarkerConstraint(_, _, diff_marker_expr2, _, result, n),\n        DifferBySummand(l, diff_marker_expr2, diff_marker_e),\n          LinearExpression(diff_marker_e, diff_marker, T::from(-1)),\n      PlusMinusResult(r, r2, result),\n      AffineExpression(r2, T::from(-1), v, T::from(1));\n\n    // EqualZeroCheck(constrs, result, vars) =>\n    //   constrsexprs can be equivalently replaced by a constraint that models\n    //   result = 1 if all vars are zero, and result = 0 otherwise.\n    struct EqualZeroCheck([Expr; 10], Var, [Var; 4]);\n    EqualZeroCheck(constrs, result, vars) <-\n      // (1 - diff_marker__3_0) * (a__3_0 * (2 * cmp_result_0 - 1)) = 0\n      NegatedDiffMarkerConstraint(constr_0, diff_marker_3, _, a_3, result, 0),\n      // (1 - (diff_marker__2_0 + diff_marker__3_0)) * (a__2_0 * (2 * cmp_result_0 - 1)) = 0\n      NegatedDiffMarkerConstraint(constr_1, diff_marker_2, _, a_2, result, 1),\n      // (1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (a__1_0 * (2 * cmp_result_0 - 1)) = 0\n      NegatedDiffMarkerConstraint(constr_2, diff_marker_1, _, a_1, result, 2),\n      // (1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n      NegatedDiffMarkerConstraintFinal(constr_3, diff_marker_0, one_minus_diff_marker_sum, result, 3),\n      // (1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((1 - a__0_0) * (2 * cmp_result_0 - 1)) = 0\n      NegatedDiffMarkerConstraintFinalNegated(constr_4, diff_marker_0, a_0, result, 3),\n      // diff_marker__0_0 * ((a__0_0 - 1) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n      DiffMarkerConstraint(constr_5, diff_marker_0, a_0_e, result, diff_val),\n        AffineExpression(a_0_e, a_0_e_coeff, a_0, a_0_e_offset), (a_0_e_coeff == T::from(1)), (a_0_e_offset == T::from(-1)),\n      // diff_marker__1_0 * (a__1_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n      DiffMarkerConstraint(constr_6, diff_marker_1, a_1_e, result, diff_val),\n        LinearExpression(a_1_e, a_1, T::from(1)),\n      // diff_marker__2_0 * (a__2_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n      DiffMarkerConstraint(constr_7, diff_marker_2, a_2_e, result, diff_val),\n        LinearExpression(a_2_e, a_2, T::from(1)),\n      // diff_marker__3_0 * (a__3_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n      DiffMarkerConstraint(constr_8, diff_marker_3, a_3_e, result, diff_val),\n        LinearExpression(a_3_e, a_3, T::from(1)),\n      BooleanVar(result),\n      BooleanVar(diff_marker_0),\n      BooleanVar(diff_marker_1),\n      BooleanVar(diff_marker_2),\n      BooleanVar(diff_marker_3),\n      RangeConstraintOnVar(a_0, rc_a0),\n      RangeConstraintOnVar(a_1, rc_a1),\n      RangeConstraintOnVar(a_2, rc_a2),\n      RangeConstraintOnVar(a_3, rc_a3),\n      // The next is needed so that the constraint `result + sum_inv_var * sum_of_vars - 1 = 0`\n      // works. If there is a way to get the sum to be zero but not all variables are zero,\n      // then this constraint cannot be satisfied.\n      ( rc_a0.range().0 == T::zero() && rc_a1.range().0 == T::zero()\n        && rc_a2.range().0 == T::zero() && rc_a3.range().0 == T::zero()\n        && rc_a0.combine_sum(&rc_a1).combine_sum(&rc_a2).combine_sum(&rc_a3).range().1 < T::from(-1)),\n      // (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n      BooleanExpressionConstraint(constr_9, diff_marker_sum),\n      AffinelyRelated(diff_marker_sum, T::from(-1), one_minus_diff_marker_sum, T::from(1)),\n      let constrs = [constr_0, constr_1, constr_2, constr_3, constr_4, constr_5, constr_6, constr_7, constr_8, constr_9],\n      let vars = [a_0, a_1, a_2, a_3];\n\n    ReplaceAlgebraicConstraintsBy(extend_by_none(constrs), extend_by_none(replacement)) <-\n      Env(env),\n      EqualZeroCheck(constrs, result, vars),\n      let replacement = {\n        let result = GroupedExpression::from_unknown_variable(result);\n        assert!(vars.len() == 4);\n        let vars = vars.into_iter().map(|v| GroupedExpression::from_unknown_variable(v)).collect_vec();\n        let sum_of_vars = vars.iter().cloned().sum::<GroupedExpression<_, _>>();\n        let sum_inv_var = GroupedExpression::from_unknown_variable(\n          env.new_var(\"inv_of_sum\", ComputationMethod::QuotientOrZero(One::one(), sum_of_vars.clone()))\n        );\n        [\n          env.insert_owned(result.clone() * sum_of_vars.clone()),\n          env.insert_owned(result + sum_inv_var * sum_of_vars - One::one()),\n        ]\n      };\n\n    //////////////// COMBINE CONSTRAINTS WITH NON-NEGATIVE FACTORS /////////////////////\n\n    // If we have `x * a = 0` and `x * b = 0` and `a` and `b` are\n    // both non-negative and their sum is constrained, then we can replace\n    // both constraints by `x * (a + b) = 0`.\n    ReplaceAlgebraicConstraintsBy(extend_by_none([e1, e2]), replacement) <-\n      Env(env),\n      ProductConstraint(e1, x, a),\n      ProductConstraint(e2, x, b),\n      (e1 < e2),\n      RangeConstraintOnExpression(a, rc_a),\n      RangeConstraintOnExpression(b, rc_b),\n      (rc_a.range().0 == T::zero()\n        && rc_b.range().0 == T::zero() && !rc_a.combine_sum(&rc_b).is_unconstrained()),\n      let replacement = extend_by_none([env.insert_owned(env.extract(x) * (env.extract(a) + env.extract(b)))]);\n\n    //////////////////////// AFFINE SOLVING //////////////////////////\n\n    // Solvable(e, var, value) => (e = 0 => var = value)\n    // Note that e is not required to be a constraint here.\n    struct Solvable<T: FieldElement>(Expr, Var, T);\n    Solvable(e, var, -offset / coeff) <-\n      AffineExpression(e, coeff, var, offset);\n\n    // Assignment(var, v) => any satisfying assignment has var = v.\n    struct Assignment<T: FieldElement>(Var, T);\n    Assignment(var, v) <-\n      EqualZero(e),\n      Solvable(e, var, v);\n\n    ///////////////////////////////// NO-WRAP ZERO SUM //////////////////////////\n\n    // If an algebraic constraint head + tail = 0 has the following properties:\n    // 1. the range constraint of head is [0, a] with a < P - 1,\n    // 2. the range constraint of tail is [0, b] with b < P - 1,\n    // 3. a + b (as integers) < P - 1,\n    // then both head and tail must be zero.\n\n    // EntailsZeroHeadAndTail(e1, e2) => e1 = 0 and e2 = 0\n    struct EntailsZeroHeadAndTail(Expr, Expr);\n    EntailsZeroHeadAndTail(head, tail) <-\n      EqualZero(e),\n      ExpressionSumHeadTail(e, head, tail),\n      RangeConstraintOnExpression(head, rc_head),\n      RangeConstraintOnExpression(tail, rc_tail),\n      (rc_head.range().0 == T::from(0)),\n      (rc_tail.range().0 == T::from(0)),\n      (rc_head.range().1.to_integer() + rc_tail.range().1.to_integer() < T::from(-1).to_integer());\n\n    EqualZero(head) <- EntailsZeroHeadAndTail(head,_);\n    EqualZero(tail) <- EntailsZeroHeadAndTail(_, tail);\n\n\n    ///////////////////////////////// ONE-HOT FLAG ///////////////////////////\n\n    // ExactlyOneSet(e) => exactly one variable in e is one, all others are zero.\n    struct ExactlyOneSet(Expr);\n    ExactlyOneSet(e) <-\n      AlgebraicConstraint(e),\n      SimpleSum(e, f, c),\n      AffineAndAllVarsBoolean(e),\n      ((f + c).is_zero());\n\n    // We want to match expressions of the form f_1 * v_1 + f_2 * v_2 + ... + f_n * v_n + c = 0\n    // where all v_i are boolean and exactly one of the f_i equals -c.\n\n    // AffineSumCountCoeffs(e, None, f) => e is an affine expression where\n    //   the constant term is -f no variable has the coefficient f.\n    // AffineSumCountCoeffs(e, Some(v), f) => e is an affine expression where\n    //   the constant term is -f and exactly one variable has the coefficient f and\n    //   that variable is v.\n    struct AffineSumCountCoeffs<T: FieldElement>(Expr, Option<Var>, T);\n    AffineSumCountCoeffs(e, None, -c) <- Constant(e, c);\n    AffineSumCountCoeffs(e, Some(v), f) <-\n      AffineSumCountCoeffs(tail, None, f),\n      ExpressionSumHeadTail(e, head, tail),\n      LinearExpression(head, v, f);\n    AffineSumCountCoeffs(e, v1, f) <-\n      AffineSumCountCoeffs(tail, v1, f),\n      ExpressionSumHeadTail(e, head, tail),\n      LinearExpression(head, _, coeff),\n      (coeff != f);\n\n    Assignment(v, T::from((Some(v) == v2) as u32)) <-\n      ExactlyOneSet(e1),\n      AlgebraicConstraint(e1),\n      BooleanAndSubsetOfVars(e2, e1),\n      // At this point, we know that at most one of the variables in e2 is one,\n      // the rest is zero.\n      AlgebraicConstraint(e2),\n      AffineSumCountCoeffs(e2, v2, _),\n      // At this point, either no variable in e2 has coefficient -c (v2 == None)\n      // or exactly one variable (v2.unwrap()) has coefficient -c.\n      // In any case, the variable equal to v2.unwrap() is one, the rest zero.\n      HasSummand(e2, summand),\n      LinearExpression(summand, v, _);\n\n\n    ///////////////////////////////// OUTPUT ACTIONS //////////////////////////\n\n    struct Equivalence(Var, Var);\n\n    //------- quadratic equivalence -----\n\n    // QuadraticEquivalenceCandidate(E, expr, offset) =>\n    //   E = (expr * (expr + offset) = 0) is a constraint and\n    //   expr is affine with at least 2 variables.\n    struct QuadraticEquivalenceCandidate<T: FieldElement>(Expr, Expr, T);\n    QuadraticEquivalenceCandidate(e, r, o / f) <-\n       Env(env),\n       ProductConstraint(e, l, r),\n       AffinelyRelated(l, f, r, o), // l = f * r + o\n       IsAffine(l),\n       ({env.affine_var_count(l).unwrap_or(0) > 1});\n\n    // QuadraticEquivalenceCandidatePair(expr1, expr2, offset1 / coeff, v1, v2) =>\n    //  (expr1) * (expr1 + offset1) = 0 and (expr2) * (expr2 + offset2) = 0 are constraints,\n    //  expr1 is affine with at least 2 variables and is obtained from\n    //  expr2 * factor by substituting v2 by v1 (factor != 0),\n    //  offset1 == offset2 * factor and coeff is the coefficient of v1 in expr1.\n    //\n    //  This means that v1 is always equal to (-expr1 / coeff) or equal to\n    //  (-(expr1 + offset1) / coeff) = (-expr1 / coeff - offset1 / coeff).\n    //  Because of the above, also v2 is equal to\n    //  (-expr1 / coeff) or equal to (-(expr1 + offset1) / coeff) [Yes, expr1!].\n    struct QuadraticEquivalenceCandidatePair<T: FieldElement>(Expr, Expr, T, Var, Var);\n    QuadraticEquivalenceCandidatePair(expr1, expr2, offset1 / coeff, v1, v2) <-\n      Env(env),\n      QuadraticEquivalenceCandidate(_, expr1, offset1),\n      QuadraticEquivalenceCandidate(_, expr2, offset2),\n      (expr1 < expr2),\n      let Some((v1, v2, factor)) = env.differ_in_exactly_one_variable(expr1, expr2),\n      (offset1 == offset2 * factor),\n      let coeff = env.on_expr(expr1, (), |e, _| *e.coefficient_of_variable_in_affine_part(&v1).unwrap());\n\n    // QuadraticEquivalence(v1, v2) => v1 and v2 are equal in all satisfying assignments.\n    // Because of QuadraticEquivalenceCandidatePair, v1 is equal to X or X + offset,\n    // where X is some value that depends on other variables. Similarly, v2 is equal to X or X + offset.\n    // Because of the range constraints of v1 and v2, these two \"or\"s are exclusive ors.\n    // This means depending on the value of X, it is either X or X + offset.\n    // Since this \"decision\" only depens on X, both v1 and v2 are either X or X + offset at the same time\n    // and thus equal.\n    struct QuadraticEquivalence(Var, Var);\n    QuadraticEquivalence(v1, v2) <-\n      QuadraticEquivalenceCandidatePair(_, _, offset, v1, v2),\n      RangeConstraintOnVar(v1, rc),\n      RangeConstraintOnVar(v2, rc),\n      (rc.is_disjoint(&rc.combine_sum(&RangeConstraint::from_value(offset))));\n\n    Equivalence(v1, v2) <- QuadraticEquivalence(v1, v2);\n\n    @output\n    pub struct ActionRule<T: FieldElement>(pub Action<T>);\n    ActionRule(Action::UpdateRangeConstraintOnVar(v, rc)) <-\n      UpdateRangeConstraintOnVar(v, rc);\n    ActionRule(Action::SubstituteVariableByConstant(v, val)) <-\n      Assignment(v, val);\n    // Substitute the larger variable by the smaller.\n    ActionRule(Action::SubstituteVariableByVariable(v1, v2)) <-\n      Equivalence(v1, v2), (v1 > v2);\n    ActionRule(Action::SubstituteVariableByVariable(v2, v1)) <-\n      Equivalence(v1, v2), (v2 > v1);\n    ActionRule(Action::ReplaceAlgebraicConstraintBy(e1, e2)) <-\n      ReplaceAlgebraicConstraintBy(e1, e2);\n\n    @output\n    pub struct ReplaceConstraintsActionRule(pub ReplaceConstraintsAction);\n    ReplaceConstraintsActionRule(ReplaceConstraintsAction{ to_replace, replace_by }) <-\n      ReplaceAlgebraicConstraintsBy(to_replace, replace_by);\n}\n\nfn extend_by_none<const N1: usize, const N2: usize>(items: [Expr; N1]) -> [Option<Expr>; N2] {\n    let mut output = [None; N2];\n    for (i, item) in items.iter().enumerate() {\n        output[i] = Some(*item);\n    }\n    output\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/tests.rs",
    "content": "use std::fmt::Display;\nuse std::hash::Hash;\n\nuse crate::bus_interaction_handler::DefaultBusInteractionHandler;\nuse crate::rule_based_optimizer::driver::{batch_replace_algebraic_constraints, ReplacementAction};\nuse crate::{\n    algebraic_constraint,\n    constraint_system::{BusInteraction, BusInteractionHandler},\n    grouped_expression::{GroupedExpression, NoRangeConstraints},\n    indexed_constraint_system::IndexedConstraintSystem,\n    range_constraint::RangeConstraint,\n    rule_based_optimizer::driver::rule_based_optimization,\n    solver::Solver,\n};\n\nuse expect_test::expect;\nuse itertools::Itertools;\nuse num_traits::Zero;\nuse powdr_number::{BabyBearField, FieldElement, LargeInt};\n\nfn assert_zero<T: FieldElement, V: Hash + Eq + Ord + Clone + Display>(\n    expr: GroupedExpression<T, V>,\n) -> algebraic_constraint::AlgebraicConstraint<GroupedExpression<T, V>> {\n    algebraic_constraint::AlgebraicConstraint::assert_zero(expr)\n}\n\nfn v(name: &str) -> GroupedExpression<BabyBearField, String> {\n    GroupedExpression::from_unknown_variable(name.to_string())\n}\n\nfn c(value: i64) -> GroupedExpression<BabyBearField, String> {\n    GroupedExpression::from_number(BabyBearField::from(value))\n}\n\nfn new_var() -> impl FnMut(&str) -> String {\n    let mut counter = 0;\n    move |prefix: &str| {\n        let name = format!(\"{prefix}_{counter}\");\n        counter += 1;\n        name\n    }\n}\n\nfn handle_variable_range_checker<T: FieldElement>(\n    payload: &[RangeConstraint<T>],\n) -> Vec<RangeConstraint<T>> {\n    const MAX_BITS: u64 = 25;\n    // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/var_range/bus.rs\n    // Expects (x, bits), where `x` is in the range [0, 2^bits - 1]\n    let [_x, bits] = payload else {\n        panic!(\"Expected arguments (x, bits)\");\n    };\n    match bits.try_to_single_value() {\n        Some(bits_value) if bits_value.to_degree() <= MAX_BITS => {\n            let bits_value = bits_value.to_integer().try_into_u64().unwrap();\n            let mask = (1u64 << bits_value) - 1;\n            vec![RangeConstraint::from_mask(mask), *bits]\n        }\n        _ => {\n            vec![\n                RangeConstraint::from_mask((1u64 << MAX_BITS) - 1),\n                RangeConstraint::from_range(T::from(0), T::from(MAX_BITS)),\n            ]\n        }\n    }\n}\n\nfn try_handle_bus_interaction<T: FieldElement>(\n    bus_interaction: &BusInteraction<RangeConstraint<T>>,\n) -> Option<BusInteraction<RangeConstraint<T>>> {\n    let mult = bus_interaction.multiplicity.try_to_single_value()?;\n    if mult == Zero::zero() {\n        return None;\n    }\n    let bus_id = bus_interaction\n        .bus_id\n        .try_to_single_value()?\n        .to_integer()\n        .try_into_u64()?;\n    let payload_constraints = match bus_id {\n        3 => handle_variable_range_checker(&bus_interaction.payload),\n        _ => return None,\n    };\n    Some(BusInteraction {\n        payload: payload_constraints,\n        ..bus_interaction.clone()\n    })\n}\n\n#[derive(Clone)]\n#[allow(dead_code)]\nstruct TestBusInteractionHandler;\n\nimpl<T: FieldElement> BusInteractionHandler<T> for TestBusInteractionHandler {\n    fn handle_bus_interaction(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<T>>,\n    ) -> BusInteraction<RangeConstraint<T>> {\n        try_handle_bus_interaction(&bus_interaction).unwrap_or(bus_interaction)\n    }\n}\n\n#[allow(dead_code)]\nfn bit_constraint(\n    variable: &str,\n    bits: u32,\n) -> BusInteraction<GroupedExpression<BabyBearField, String>> {\n    BusInteraction {\n        bus_id: c(3),\n        payload: vec![v(variable), c(bits as i64)],\n        multiplicity: c(1),\n    }\n}\n\n#[test]\nfn test_rule_based_optimization_empty() {\n    let system: IndexedConstraintSystem<BabyBearField, String> = IndexedConstraintSystem::default();\n    let optimized_system = rule_based_optimization(\n        system,\n        NoRangeConstraints,\n        DefaultBusInteractionHandler::default(),\n        &mut new_var(),\n        None,\n    );\n    assert_eq!(optimized_system.0.system().algebraic_constraints.len(), 0);\n}\n\n#[test]\nfn test_rule_based_optimization_simple_assignment() {\n    let mut system = IndexedConstraintSystem::default();\n    let x = v(\"x\");\n    system.add_algebraic_constraints([\n        assert_zero(x * BabyBearField::from(7) - c(21)),\n        assert_zero(v(\"y\") * (v(\"y\") - c(1)) - v(\"x\")),\n    ]);\n    let optimized_system = rule_based_optimization(\n        system,\n        NoRangeConstraints,\n        DefaultBusInteractionHandler::default(),\n        &mut new_var(),\n        None,\n    );\n    expect![\"(y) * (y - 1) - 3 = 0\"].assert_eq(&optimized_system.0.to_string());\n}\n\n#[test]\nfn add_with_carry() {\n    // This tests a case of equivalent constraints that appear in the\n    // way \"add with carry\" is performed in openvm.\n    // X and Y end up being equivalent because they are both either\n    // A or A - 256, depending on whether the value of A is between\n    // 0 and 255 or not.\n    // A is the result of an addition with carry.\n    let mut system = IndexedConstraintSystem::default();\n    system.add_algebraic_constraints([\n        assert_zero(\n            (v(\"X\") * c(7) - v(\"A\") * c(7) + c(256) * c(7)) * (v(\"X\") * c(7) - v(\"A\") * c(7)),\n        ),\n        assert_zero((v(\"Y\") - v(\"A\") + c(256)) * (v(\"Y\") - v(\"A\"))),\n    ]);\n    system.add_bus_interactions([bit_constraint(\"X\", 8), bit_constraint(\"Y\", 8)]);\n    let optimized_system = rule_based_optimization(\n        system,\n        NoRangeConstraints,\n        TestBusInteractionHandler,\n        &mut new_var(),\n        None,\n    );\n    // Y has been replaced by X\n    expect![[r#\"\n        (7 * A - 7 * X - 1792) * (7 * A - 7 * X) = 0\n        (A - X - 256) * (A - X) = 0\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: X, 8 }\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: X, 8 }\"#]]\n    .assert_eq(&optimized_system.0.to_string());\n}\n\n#[test]\nfn test_rule_based_optimization_quadratic_equality() {\n    let mut system = IndexedConstraintSystem::default();\n    system.add_algebraic_constraints([\n        assert_zero(\n            (c(30720) * v(\"rs1_data__0_1\") + c(7864320) * v(\"rs1_data__1_1\")\n                - c(30720) * v(\"mem_ptr_limbs__0_1\")\n                + c(737280))\n                * (c(30720) * v(\"rs1_data__0_1\") + c(7864320) * v(\"rs1_data__1_1\")\n                    - c(30720) * v(\"mem_ptr_limbs__0_1\")\n                    + c(737281)),\n        ),\n        assert_zero(\n            (c(30720) * v(\"rs1_data__0_1\") + c(7864320) * v(\"rs1_data__1_1\")\n                - c(30720) * v(\"mem_ptr_limbs__0_2\")\n                + c(737280))\n                * (c(30720) * v(\"rs1_data__0_1\") + c(7864320) * v(\"rs1_data__1_1\")\n                    - c(30720) * v(\"mem_ptr_limbs__0_2\")\n                    + c(737281)),\n        ),\n    ]);\n    system.add_bus_interactions([\n        bit_constraint(\"rs1_data__0_1\", 8),\n        bit_constraint(\"rs1_data__1_1\", 8),\n        BusInteraction {\n            bus_id: c(3),\n            multiplicity: c(1),\n            payload: vec![c(-503316480) * v(\"mem_ptr_limbs__0_1\"), c(14)],\n        },\n        BusInteraction {\n            bus_id: c(3),\n            multiplicity: c(1),\n            payload: vec![c(-503316480) * v(\"mem_ptr_limbs__0_2\"), c(14)],\n        },\n    ]);\n    let optimized_system = rule_based_optimization(\n        system,\n        NoRangeConstraints,\n        TestBusInteractionHandler,\n        &mut new_var(),\n        None,\n    );\n    // Note that in the system below, mem_ptr_limbs__0_2 has been eliminated\n    expect![[r#\"\n        (30720 * mem_ptr_limbs__0_1 - 30720 * rs1_data__0_1 - 7864320 * rs1_data__1_1 - 737280) * (30720 * mem_ptr_limbs__0_1 - 30720 * rs1_data__0_1 - 7864320 * rs1_data__1_1 - 737281) = 0\n        (30720 * mem_ptr_limbs__0_1 - 30720 * rs1_data__0_1 - 7864320 * rs1_data__1_1 - 737280) * (30720 * mem_ptr_limbs__0_1 - 30720 * rs1_data__0_1 - 7864320 * rs1_data__1_1 - 737281) = 0\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: rs1_data__0_1, 8 }\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: rs1_data__1_1, 8 }\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: -(503316480 * mem_ptr_limbs__0_1), 14 }\n        BusInteraction { bus_id: 3, multiplicity: 1, payload: -(503316480 * mem_ptr_limbs__0_1), 14 }\"#]].assert_eq(&optimized_system.0.to_string());\n}\n\n#[test]\nfn test_batch_replace_with_duplicate_constraints() {\n    // Direct test of batch_replace_algebraic_constraints with duplicate constraints\n    // This verifies that the HashSet-based tracking correctly handles duplicates\n    let mut system: IndexedConstraintSystem<BabyBearField, String> =\n        IndexedConstraintSystem::default();\n\n    // Create a system with duplicate constraints\n    system.add_algebraic_constraints([\n        assert_zero(v(\"x\") + v(\"y\")),\n        assert_zero(v(\"x\") + v(\"y\")),\n        assert_zero(v(\"z\") - c(5)),\n    ]);\n\n    assert_eq!(system.system().algebraic_constraints.len(), 3);\n\n    // Replace \"x + y = 0\" and \"z - 5 = 0\" by \"a = 0\"\n    let replacements = vec![ReplacementAction {\n        replace: vec![v(\"x\") + v(\"y\"), v(\"z\") - c(5)],\n        replace_by: vec![v(\"a\")],\n    }];\n\n    // Try to apply the replacement\n    let result = batch_replace_algebraic_constraints(&mut system, replacements, None);\n\n    // The replacement should succeed because we found the constraint to replace (even though it appears twice)\n    assert!(result, \"Replacement should succeed\");\n\n    expect![\"a = 0\"].assert_eq(\n        &system\n            .system()\n            .algebraic_constraints\n            .iter()\n            .format(\"\\n\")\n            .to_string(),\n    );\n}\n\n#[test]\nfn test_batch_replace_with_duplicate_constraints2() {\n    let mut system: IndexedConstraintSystem<BabyBearField, String> =\n        IndexedConstraintSystem::default();\n\n    system.add_algebraic_constraints([assert_zero(v(\"x\") + v(\"y\")), assert_zero(v(\"z\") - c(5))]);\n\n    // Replacement has \"x + y\" twice, should get reduced to just a single one.\n    let replacements = vec![ReplacementAction {\n        replace: vec![v(\"x\") + v(\"y\"), v(\"x\") + v(\"y\")],\n        replace_by: vec![v(\"a\")],\n    }];\n\n    let result = batch_replace_algebraic_constraints(&mut system, replacements, None);\n\n    assert!(result, \"Replacement should succeed\");\n    expect![[r#\"\n        z - 5 = 0\n        a = 0\"#]]\n    .assert_eq(\n        &system\n            .system()\n            .algebraic_constraints\n            .iter()\n            .format(\"\\n\")\n            .to_string(),\n    );\n}\n\n#[test]\nfn test_batch_replace_with_duplicate_constraints3() {\n    let mut system: IndexedConstraintSystem<BabyBearField, String> =\n        IndexedConstraintSystem::default();\n\n    system.add_algebraic_constraints([\n        // x + y is contained twice, both should be replaced.\n        assert_zero(v(\"x\") + v(\"y\")),\n        assert_zero(v(\"x\") + v(\"y\")),\n        assert_zero(v(\"z\") - c(5)),\n    ]);\n\n    let replacements = vec![ReplacementAction {\n        replace: vec![v(\"x\") + v(\"y\")],\n        replace_by: vec![v(\"a\")],\n    }];\n\n    let result = batch_replace_algebraic_constraints(&mut system, replacements, None);\n\n    assert!(result, \"Replacement should succeed\");\n    expect![[r#\"\n        z - 5 = 0\n        a = 0\"#]]\n    .assert_eq(\n        &system\n            .system()\n            .algebraic_constraints\n            .iter()\n            .format(\"\\n\")\n            .to_string(),\n    );\n}\n\n#[test]\nfn test_batch_replace_with_conflict() {\n    let mut system: IndexedConstraintSystem<BabyBearField, String> =\n        IndexedConstraintSystem::default();\n\n    system.add_algebraic_constraints([assert_zero(v(\"x\") + v(\"y\")), assert_zero(v(\"z\") - c(5))]);\n\n    // both actions need \"x + y\", only the first can proceed\n    let replacements = vec![\n        ReplacementAction {\n            replace: vec![v(\"x\") + v(\"y\")],\n            replace_by: vec![v(\"a\")],\n        },\n        ReplacementAction {\n            replace: vec![v(\"x\") + v(\"y\"), v(\"z\") - c(5)],\n            replace_by: vec![v(\"b\")],\n        },\n    ];\n\n    let result = batch_replace_algebraic_constraints(&mut system, replacements, None);\n\n    assert!(result, \"Replacement should succeed\");\n    expect![[r#\"\n        z - 5 = 0\n        a = 0\"#]]\n    .assert_eq(\n        &system\n            .system()\n            .algebraic_constraints\n            .iter()\n            .format(\"\\n\")\n            .to_string(),\n    );\n\n    let mut system: IndexedConstraintSystem<BabyBearField, String> =\n        IndexedConstraintSystem::default();\n\n    system.add_algebraic_constraints([assert_zero(v(\"x\") + v(\"y\")), assert_zero(v(\"z\") - c(5))]);\n    // both actions need \"x + y\", only the first can proceed, now reverse order.\n    let replacements = vec![\n        ReplacementAction {\n            replace: vec![v(\"x\") + v(\"y\"), v(\"z\") - c(5)],\n            replace_by: vec![v(\"b\")],\n        },\n        ReplacementAction {\n            replace: vec![v(\"x\") + v(\"y\")],\n            replace_by: vec![v(\"a\")],\n        },\n    ];\n\n    let result = batch_replace_algebraic_constraints(&mut system, replacements, None);\n\n    assert!(result, \"Replacement should succeed\");\n    expect![\"b = 0\"].assert_eq(\n        &system\n            .system()\n            .algebraic_constraints\n            .iter()\n            .format(\"\\n\")\n            .to_string(),\n    );\n}\n\n#[test]\nfn test_rule_split_constraints_based_on_minimal_range() {\n    let mut system = IndexedConstraintSystem::default();\n    //opcode_sub_flag_21 + 2 * opcode_xor_flag_21 + 3 * opcode_or_flag_21 + 4 * opcode_and_flag_21 = 0\n    system.add_algebraic_constraints([assert_zero(\n        v(\"opcode_sub_flag_21\")\n            + c(2) * v(\"opcode_xor_flag_21\")\n            + c(3) * v(\"opcode_or_flag_21\")\n            + c(4) * v(\"opcode_and_flag_21\"),\n    )]);\n\n    let range_constraints = std::collections::HashMap::from([\n        (\"opcode_sub_flag_21\", RangeConstraint::from_mask(0x1u32)),\n        (\"opcode_xor_flag_21\", RangeConstraint::from_mask(0x1u32)),\n        (\"opcode_or_flag_21\", RangeConstraint::from_mask(0x1u32)),\n        (\"opcode_and_flag_21\", RangeConstraint::from_mask(0x1u32)),\n    ]);\n\n    let mut solver = crate::solver::new_solver(\n        system.clone().into(),\n        DefaultBusInteractionHandler::default(),\n    );\n    #[allow(clippy::iter_over_hash_type)]\n    for (var, constraint) in range_constraints {\n        solver.add_range_constraint(&var.to_string(), constraint);\n    }\n\n    let optimized_system = rule_based_optimization(\n        system,\n        solver,\n        DefaultBusInteractionHandler::default(),\n        &mut new_var(),\n        None,\n    );\n    assert_eq!(optimized_system.0.system().algebraic_constraints.len(), 0);\n}\n\n#[test]\nfn one_hot_flags() {\n    let mut system = IndexedConstraintSystem::default();\n    //opcode_sub_flag_21 + 2 * opcode_xor_flag_21 + 3 * opcode_or_flag_21 + 4 * opcode_and_flag_21 = 0\n    system.add_algebraic_constraints([\n        // Boolean flags\n        assert_zero(v(\"flag0\") * (v(\"flag0\") - c(1))),\n        assert_zero(v(\"flag1\") * (v(\"flag1\") - c(1))),\n        assert_zero(v(\"flag2\") * (v(\"flag2\") - c(1))),\n        assert_zero(v(\"flag3\") * (v(\"flag3\") - c(1))),\n        // Exactly one flag is active\n        assert_zero(v(\"flag0\") + v(\"flag1\") + v(\"flag2\") + v(\"flag3\") - c(1)),\n        // Flag 2 is active\n        assert_zero(\n            v(\"flag0\") * c(0) + v(\"flag1\") * c(1) + v(\"flag2\") * c(2) + v(\"flag3\") * c(3) - c(2),\n        ),\n        assert_zero(v(\"flag0\") * (v(\"x\") - v(\"y\"))),\n        assert_zero(v(\"flag2\") * (v(\"r\") - v(\"t\"))),\n    ]);\n\n    let optimized_system = rule_based_optimization(\n        system,\n        NoRangeConstraints,\n        DefaultBusInteractionHandler::default(),\n        &mut new_var(),\n        None,\n    );\n\n    expect![\"r - t = 0\"].assert_eq(&optimized_system.0.to_string());\n}\n"
  },
  {
    "path": "constraint-solver/src/rule_based_optimizer/types.rs",
    "content": "use std::fmt::Display;\n\nuse derive_more::{From, Into};\nuse powdr_number::FieldElement;\n\nuse crate::range_constraint::RangeConstraint;\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Into)]\npub struct Var(usize);\n\nimpl Display for Var {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"v_{}\", self.0)\n    }\n}\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Into)]\npub struct Expr(usize);\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub enum Action<T: FieldElement> {\n    UpdateRangeConstraintOnVar(Var, RangeConstraint<T>),\n    SubstituteVariableByConstant(Var, T),\n    /// Substitute the first variable by the second.\n    SubstituteVariableByVariable(Var, Var),\n    /// Replace one algebraic constraint by another.\n    ReplaceAlgebraicConstraintBy(Expr, Expr),\n}\n\n/// Replace a list of algebraic constraints by another list of\n/// algebraic constraints. We use an array of Option instead of\n/// a Vec because this type needs to be `Copy`.\n/// This is a separate type from `Action` because it is much larger.\n#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]\npub struct ReplaceConstraintsAction {\n    /// The constraints to be replaced. Up to 10, increase the size if needed.\n    pub to_replace: [Option<Expr>; 10],\n    /// The constraints to replace by. Up to 5, increase the size if needed.\n    pub replace_by: [Option<Expr>; 5],\n}\n"
  },
  {
    "path": "constraint-solver/src/runtime_constant.rs",
    "content": "use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub};\n\nuse num_traits::{One, Zero};\nuse powdr_number::FieldElement;\n\nuse crate::range_constraint::RangeConstraint;\n\n/// Represents a run-time constant in the constraint solver, built over\n/// a base field type.\n/// The base field type itself (i.e. any T: FieldElement) represents a run-time constant\n/// (which is also a compile-time constant), but the trait lets us represent run-time\n/// constants symbolically as well.\npub trait RuntimeConstant:\n    Sized\n    + Neg<Output = Self>\n    + Clone\n    + From<Self::FieldType>\n    + Add<Output = Self>\n    + AddAssign<Self>\n    + Sub<Output = Self>\n    + Mul<Output = Self>\n    + MulAssign<Self>\n    + PartialEq\n    + Eq\n    + Zero\n    + One\n{\n    type FieldType: FieldElement;\n\n    /// Tries to convert the constant to a single number. This always works for compile-time constants.\n    fn try_to_number(&self) -> Option<Self::FieldType>;\n\n    /// Returns the range constraint for this constant. For compile-time constants,\n    /// this will be a single value range constraint.\n    fn range_constraint(&self) -> RangeConstraint<Self::FieldType>;\n\n    /// Divides this constant by another constant, returning a new constant.\n    fn field_div(&self, other: &Self) -> Self {\n        self.clone() * other.field_inverse()\n    }\n\n    /// Returns the multiplicative inverse of this constant.\n    fn field_inverse(&self) -> Self;\n\n    /// Converts a u64 to a run-time constant.\n    fn from_u64(k: u64) -> Self {\n        Self::from(Self::FieldType::from(k))\n    }\n\n    /// Returns whether this constant is known to be zero at compile time.\n    fn is_known_zero(&self) -> bool {\n        self.try_to_number().is_some_and(|n| n.is_zero())\n    }\n\n    /// Returns whether this constant is known to be one at compile time.\n    fn is_known_one(&self) -> bool {\n        self.try_to_number().is_some_and(|n| n.is_one())\n    }\n\n    /// Returns whether this constant is known to be -1 at compile time.\n    fn is_known_minus_one(&self) -> bool {\n        self.try_to_number()\n            .is_some_and(|n| n == -Self::FieldType::from(1))\n    }\n\n    /// Returns whether this constant is known to be non-zero at compile time.\n    /// Note that this could return true even if the constant is not known fully\n    /// at compile time, but it is guaranteed that the constant is not zero.\n    fn is_known_nonzero(&self) -> bool {\n        // Only checking range constraint is enough since if this is a known\n        // fixed value, we will get a range constraint with just a single value.\n        !self.range_constraint().allows_value(0.into())\n    }\n}\n\npub trait Substitutable<V> {\n    /// Substitutes a variable with another constant.\n    fn substitute(&mut self, variable: &V, substitution: &Self);\n}\n\n/// Provides a function to transform the type of variables in an expression.\n/// The expectation is that the variable transformation function is injective, i.e.\n/// two different variables cannot become equal through the transformation.\npub trait VarTransformable<V1, V2> {\n    type Transformed;\n\n    /// Transforms `self` by applying the `var_transform` function to all variables.\n    fn transform_var_type(&self, var_transform: &mut impl FnMut(&V1) -> V2) -> Self::Transformed {\n        self.try_transform_var_type(&mut |v| Some(var_transform(v)))\n            .unwrap()\n    }\n\n    fn try_transform_var_type(\n        &self,\n        var_transform: &mut impl FnMut(&V1) -> Option<V2>,\n    ) -> Option<Self::Transformed>;\n}\n\nimpl<T: FieldElement> RuntimeConstant for T {\n    type FieldType = T;\n\n    fn try_to_number(&self) -> Option<Self> {\n        Some(*self)\n    }\n\n    fn range_constraint(&self) -> RangeConstraint<Self::FieldType> {\n        RangeConstraint::from_value(*self)\n    }\n\n    fn field_div(&self, other: &Self) -> Self {\n        *self / *other\n    }\n\n    fn field_inverse(&self) -> Self {\n        T::from(1) / *self\n    }\n}\n\nimpl<T: FieldElement, V> Substitutable<V> for T {\n    fn substitute(&mut self, _variable: &V, _substitution: &Self) {\n        // No-op for numbers.\n    }\n}\n\nimpl<T: FieldElement, V1, V2> VarTransformable<V1, V2> for T {\n    type Transformed = T;\n\n    fn transform_var_type(&self, _var_transform: &mut impl FnMut(&V1) -> V2) -> Self::Transformed {\n        // No variables to transform.\n        *self\n    }\n\n    fn try_transform_var_type(\n        &self,\n        _var_transform: &mut impl FnMut(&V1) -> Option<V2>,\n    ) -> Option<Self::Transformed> {\n        // No variables to transform.\n        Some(*self)\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/base.rs",
    "content": "use derivative::Derivative;\nuse itertools::Itertools;\nuse powdr_number::FieldElement;\n\nuse crate::constraint_system::{\n    AlgebraicConstraint, BusInteraction, BusInteractionHandler, ConstraintRef,\n};\nuse crate::effect::Effect;\nuse crate::grouped_expression::{GroupedExpression, RangeConstraintProvider};\nuse crate::indexed_constraint_system::IndexedConstraintSystemWithQueue;\nuse crate::range_constraint::RangeConstraint;\nuse crate::solver::boolean_extractor::BooleanExtractor;\nuse crate::solver::constraint_splitter::try_split_constraint;\nuse crate::solver::linearizer::Linearizer;\nuse crate::solver::var_transformation::Variable;\nuse crate::solver::{exhaustive_search, Error, Solver, VariableAssignment};\nuse crate::utils::possible_concrete_values;\n\nuse std::collections::{BTreeSet, HashMap, HashSet};\nuse std::fmt::Display;\nuse std::hash::Hash;\nuse std::iter::once;\n\n/// Given a list of constraints, tries to derive as many variable assignments as possible.\n///\n/// It contains two main components that transform constraints: The boolean extractor and the linearizer.\n///\n/// The boolean extractor is run first and tries to turn quadratic constraints into affine constraints by\n/// introducing new boolean variables.\n///\n/// The linearizer is run second and replaces all non-affine sub-components of constraints by new variables.\n/// It also replaces bus interaction fields by new variables.\n///\n/// For both of these transforming components, the original constraints are also kept unmodified.\npub struct BaseSolver<T: FieldElement, V, BusInterHandler, VarDisp> {\n    /// The constraint system to solve. During the solving process, any expressions will\n    /// be simplified as much as possible.\n    constraint_system: IndexedConstraintSystemWithQueue<T, V>,\n    /// The handler for bus interactions.\n    bus_interaction_handler: BusInterHandler,\n    /// The currently known range constraints of the variables.\n    range_constraints: RangeConstraints<T, V>,\n    /// The concrete variable assignments or replacements that were derived for variables\n    /// that do not occur in the constraints any more.\n    /// This is cleared with every call to `solve()`.\n    assignments_to_return: Vec<VariableAssignment<T, V>>,\n    /// A cache of expressions that are equivalent to a given expression.\n    equivalent_expressions_cache: HashMap<GroupedExpression<T, V>, Vec<GroupedExpression<T, V>>>,\n    /// A dispenser for fresh variables.\n    var_dispenser: VarDisp,\n    /// The boolean extraction component.\n    boolean_extractor: BooleanExtractor<T, V>,\n    /// The linearizing component.\n    linearizer: Linearizer<T, V>,\n}\n\npub trait VarDispenser<V> {\n    /// Returns a fresh new variable of kind \"boolean\".\n    fn next_boolean(&mut self) -> V;\n\n    /// Returns a fresh new variable of kind \"linear\".\n    fn next_linear(&mut self) -> V;\n\n    /// Returns an iterator over all variables of kind \"linear\" dispensed in the past.\n    fn all_linearized_vars(&self) -> impl Iterator<Item = V>;\n}\n\n#[derive(Default)]\npub struct VarDispenserImpl {\n    next_boolean_id: usize,\n    next_linearized_id: usize,\n}\n\nimpl<V> VarDispenser<Variable<V>> for VarDispenserImpl {\n    fn next_boolean(&mut self) -> Variable<V> {\n        let id = self.next_boolean_id;\n        self.next_boolean_id += 1;\n        Variable::Boolean(id)\n    }\n\n    fn next_linear(&mut self) -> Variable<V> {\n        let id = self.next_linearized_id;\n        self.next_linearized_id += 1;\n        Variable::Linearized(id)\n    }\n\n    /// Returns an iterator over all linearized variables dispensed in the past.\n    fn all_linearized_vars(&self) -> impl Iterator<Item = Variable<V>> {\n        (0..self.next_linearized_id).map(Variable::Linearized)\n    }\n}\n\nimpl<T: FieldElement, V, B, VD: Default> BaseSolver<T, V, B, VD> {\n    pub fn new(bus_interaction_handler: B) -> Self {\n        BaseSolver {\n            constraint_system: Default::default(),\n            range_constraints: Default::default(),\n            assignments_to_return: Default::default(),\n            equivalent_expressions_cache: Default::default(),\n            var_dispenser: Default::default(),\n            boolean_extractor: Default::default(),\n            linearizer: Default::default(),\n            bus_interaction_handler,\n        }\n    }\n}\n\nimpl<T, V, BusInter, VD> RangeConstraintProvider<T, V> for BaseSolver<T, V, BusInter, VD>\nwhere\n    V: Clone + Hash + Eq,\n    T: FieldElement,\n{\n    fn get(&self, var: &V) -> RangeConstraint<T> {\n        self.range_constraints.get(var)\n    }\n}\n\nimpl<T: FieldElement + Display, V: Clone + Ord + Hash + Display, BusInter, VD> Display\n    for BaseSolver<T, V, BusInter, VD>\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.constraint_system)\n    }\n}\n\nimpl<T, V, BusInter: BusInteractionHandler<T>, VD: VarDispenser<V>> Solver<T, V>\n    for BaseSolver<T, V, BusInter, VD>\nwhere\n    V: Ord + Clone + Hash + Eq + Display,\n    T: FieldElement,\n{\n    fn solve(&mut self) -> Result<Vec<VariableAssignment<T, V>>, Error> {\n        self.equivalent_expressions_cache.clear();\n        self.loop_until_no_progress()?;\n        let assignments = std::mem::take(&mut self.assignments_to_return);\n        // Apply the deduced assignments to the substitutions we performed\n        // while linearizing and boolean extracting.\n        // We assume that the user of the solver applies the assignments to\n        // their expressions and thus \"incoming\" expressions used in the functions\n        // `range_constraint_for_expression` and `are_expressions_known_to_be_different`\n        // will have the assignments applied.\n        self.linearizer.apply_assignments(&assignments);\n        self.boolean_extractor.apply_assignments(&assignments);\n        Ok(assignments)\n    }\n\n    fn add_algebraic_constraints(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) {\n        self.equivalent_expressions_cache.clear();\n\n        let constraints = constraints\n            .into_iter()\n            .filter(|c| !c.is_redundant())\n            .flat_map(|constr| {\n                self.try_extract_boolean(constr.as_ref())\n                    .into_iter()\n                    .chain(std::iter::once(constr))\n            })\n            // needed because of unique access to the var dispenser / self.\n            .collect_vec()\n            .into_iter()\n            .flat_map(|constr| self.linearize_constraint(constr))\n            .collect_vec();\n\n        self.constraint_system\n            .add_algebraic_constraints(constraints.into_iter().filter(|c| !c.is_redundant()));\n    }\n\n    fn add_bus_interactions(\n        &mut self,\n        bus_interactions: impl IntoIterator<Item = BusInteraction<GroupedExpression<T, V>>>,\n    ) {\n        self.equivalent_expressions_cache.clear();\n        let mut constraints_to_add = vec![];\n        let bus_interactions = bus_interactions\n            .into_iter()\n            .map(|bus_interaction| {\n                self.linearize_bus_interaction(bus_interaction, &mut constraints_to_add)\n            })\n            .collect_vec();\n        // We only substituted by a variable, but the substitution was not yet linearized.\n        self.add_algebraic_constraints(constraints_to_add);\n        self.constraint_system\n            .add_bus_interactions(bus_interactions);\n    }\n\n    fn add_range_constraint(&mut self, variable: &V, constraint: RangeConstraint<T>) {\n        self.equivalent_expressions_cache.clear();\n        self.apply_range_constraint_update(variable, constraint);\n    }\n\n    fn retain_variables(&mut self, variables_to_keep: &HashSet<V>) {\n        self.equivalent_expressions_cache.clear();\n        assert!(self.assignments_to_return.is_empty());\n\n        // There are constraints that only contain `Variable::Linearized` that\n        // connect quadratic terms with the original constraints. We could try to find\n        // those, but let's just keep all of them for now.\n        let mut variables_to_keep = variables_to_keep.clone();\n        variables_to_keep.extend(self.var_dispenser.all_linearized_vars());\n\n        self.constraint_system.retain_algebraic_constraints(|c| {\n            c.referenced_unknown_variables()\n                .any(|v| variables_to_keep.contains(v))\n        });\n        self.constraint_system\n            .retain_bus_interactions(|bus_interaction| {\n                bus_interaction\n                    .referenced_unknown_variables()\n                    .any(|v| variables_to_keep.contains(v))\n            });\n        let remaining_variables = self\n            .constraint_system\n            .system()\n            .referenced_unknown_variables()\n            .collect::<HashSet<_>>();\n        self.range_constraints\n            .range_constraints\n            .retain(|v, _| remaining_variables.contains(v));\n    }\n\n    fn range_constraint_for_expression(\n        &self,\n        expr: &GroupedExpression<T, V>,\n    ) -> RangeConstraint<T> {\n        self.linearizer\n            .internalized_versions_of_expression(expr)\n            .fold(RangeConstraint::default(), |acc, expr| {\n                acc.conjunction(&expr.range_constraint(self))\n            })\n    }\n\n    fn try_to_equivalent_constant(&self, expr: &GroupedExpression<T, V>) -> Option<T> {\n        self.linearizer\n            .internalized_versions_of_expression(expr)\n            .filter_map(|e| e.try_to_number())\n            .next()\n    }\n\n    fn are_expressions_known_to_be_different(\n        &mut self,\n        a: &GroupedExpression<T, V>,\n        b: &GroupedExpression<T, V>,\n    ) -> bool {\n        if let (Some(a), Some(b)) = (a.try_to_known(), b.try_to_known()) {\n            return a != b;\n        }\n        let equivalent_to_a = self.equivalent_expressions(a);\n        let equivalent_to_b = self.equivalent_expressions(b);\n        equivalent_to_a\n            .iter()\n            .cartesian_product(&equivalent_to_b)\n            .any(|(a_eq, b_eq)| {\n                possible_concrete_values(&(a_eq - b_eq), self, 20)\n                    .is_some_and(|mut values| values.all(|value| !value.is_zero()))\n            })\n    }\n}\n\nimpl<T, V, BusInter: BusInteractionHandler<T>, VD: VarDispenser<V>> BaseSolver<T, V, BusInter, VD>\nwhere\n    V: Ord + Clone + Hash + Eq + Display,\n    T: FieldElement,\n{\n    /// Tries to performs boolean extraction on `constr`, i.e. tries to turn quadratic constraints into affine constraints\n    /// by introducing new boolean variables.\n    fn try_extract_boolean(\n        &mut self,\n        constr: AlgebraicConstraint<&GroupedExpression<T, V>>,\n    ) -> Option<AlgebraicConstraint<GroupedExpression<T, V>>> {\n        let result = self\n            .boolean_extractor\n            .try_extract_boolean(constr, || self.var_dispenser.next_boolean())?;\n        if let Some(var) = result.new_unconstrained_boolean_variable {\n            // If we created a boolean variable, we constrain it to be boolean.\n            self.add_range_constraint(&var, RangeConstraint::from_mask(1));\n        }\n        Some(result.constraint)\n    }\n\n    /// Performs linearization of `constr`, i.e. replaces all non-affine sub-components of the constraint\n    /// by new variables.\n    /// This function will always return the original constraint as well as the linearized constraints\n    /// and equivalences needed after linearization.\n    fn linearize_constraint(\n        &mut self,\n        constr: AlgebraicConstraint<GroupedExpression<T, V>>,\n    ) -> impl Iterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>> {\n        let mut constrs = vec![constr.clone()];\n        if !constr.expression.is_affine() {\n            let linearized = self.linearizer.linearize_expression(\n                constr.expression,\n                &mut || self.var_dispenser.next_linear(),\n                &mut constrs,\n            );\n            constrs.push(AlgebraicConstraint::assert_zero(linearized));\n        }\n        constrs.into_iter()\n    }\n\n    /// Replaces all bus interaction fields by new variables.\n    /// Adds the equality constraint to `constraint_collection` and returns the modified\n    /// bus interaction.\n    ///\n    /// Note that the constraints added to `constraint_collection` are not yet boolean-extracted or linearized.\n    fn linearize_bus_interaction(\n        &mut self,\n        bus_interaction: BusInteraction<GroupedExpression<T, V>>,\n        constraint_collection: &mut Vec<AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) -> BusInteraction<GroupedExpression<T, V>> {\n        bus_interaction\n            .fields()\n            .map(|expr| {\n                self.linearizer.substitute_by_var(\n                    expr.clone(),\n                    &mut || self.var_dispenser.next_linear(),\n                    constraint_collection,\n                )\n            })\n            .collect()\n    }\n}\n\nimpl<T, V, BusInter: BusInteractionHandler<T>, VD> BaseSolver<T, V, BusInter, VD>\nwhere\n    V: Ord + Clone + Hash + Eq + Display,\n    T: FieldElement,\n    VD: VarDispenser<V>,\n{\n    fn loop_until_no_progress(&mut self) -> Result<(), Error> {\n        loop {\n            let mut progress = false;\n            // Try solving constraints in isolation.\n            progress |= self.solve_in_isolation()?;\n\n            if !progress {\n                // This might be expensive, so we only do it if we made no progress\n                // in the previous steps.\n                progress |= self.exhaustive_search()?;\n            }\n\n            if !progress {\n                break;\n            }\n        }\n        Ok(())\n    }\n\n    /// Tries to make progress by solving each constraint in isolation.\n    fn solve_in_isolation(&mut self) -> Result<bool, Error> {\n        let mut progress = false;\n        while let Some(item) = self.constraint_system.pop_front() {\n            let effects = match item {\n                ConstraintRef::AlgebraicConstraint(c) => {\n                    if let Some((v1, expr)) = try_to_simple_equivalence(c) {\n                        self.apply_assignment(&v1, &expr);\n                        continue;\n                    }\n                    let effects = c\n                        .solve(&self.range_constraints)\n                        .map_err(Error::AlgebraicSolverError)?\n                        .effects;\n                    if let Some(components) = try_split_constraint(&c, &self.range_constraints) {\n                        progress |= self.add_algebraic_constraints_if_new(components);\n                    }\n                    effects\n                }\n                ConstraintRef::BusInteraction(b) => b\n                    .solve(&self.bus_interaction_handler, &self.range_constraints)\n                    .map_err(|_| Error::BusInteractionError)?,\n            };\n            for effect in effects {\n                progress |= self.apply_effect(effect);\n            }\n        }\n        Ok(progress)\n    }\n\n    /// Find groups of variables with a small set of possible assignments.\n    /// For each group, performs an exhaustive search in the possible assignments\n    /// to deduce new range constraints (also on other variables).\n    /// This might be expensive.\n    fn exhaustive_search(&mut self) -> Result<bool, Error> {\n        log::debug!(\"Starting exhaustive search...\");\n        let mut variable_sets =\n            exhaustive_search::get_brute_force_candidates(self.constraint_system.system(), &*self)\n                .collect_vec();\n        // Start with small sets to make larger ones redundant after some assignments.\n        variable_sets.sort_by_key(|set| set.len());\n\n        log::debug!(\n            \"Found {} sets of variables with few possible assignments. Checking each set...\",\n            variable_sets.len()\n        );\n\n        let mut progress = false;\n        let mut unsuccessful_variable_sets = BTreeSet::new();\n\n        for mut variable_set in variable_sets {\n            variable_set.retain(|v| {\n                self.range_constraints\n                    .get(v)\n                    .try_to_single_value()\n                    .is_none()\n            });\n            if unsuccessful_variable_sets.contains(&variable_set) {\n                // It can happen that we process the same variable set twice because\n                // assignments can make previously different sets equal.\n                // We have processed this variable set before, and it did not\n                // yield new information.\n                // It could be that other assignments created in the meantime\n                // lead to progress but this is rare and we will catch it in the\n                // next loop iteration.\n                continue;\n            }\n            match exhaustive_search::exhaustive_search_on_variable_set(\n                self.constraint_system.system(),\n                &variable_set,\n                &*self,\n                &self.bus_interaction_handler,\n            ) {\n                Ok(assignments) if assignments.is_empty() => {\n                    // No new information was found.\n                    unsuccessful_variable_sets.insert(variable_set);\n                }\n                Ok(assignments) => {\n                    for (var, rc) in assignments {\n                        progress |= self.apply_range_constraint_update(&var, rc);\n                    }\n                }\n                // Might error out if a contradiction was found.\n                Err(e) => return Err(e),\n            }\n        }\n        Ok(progress)\n    }\n\n    /// Returns a vector of expressions that are equivalent to `expression`.\n    /// The vector is always non-empty, it returns at least `expression` itself.\n    fn equivalent_expressions(\n        &mut self,\n        expression: &GroupedExpression<T, V>,\n    ) -> Vec<GroupedExpression<T, V>> {\n        if expression.is_quadratic() {\n            // This case is too complicated.\n            return vec![expression.clone()];\n        }\n        if let Some(equiv) = self.equivalent_expressions_cache.get(expression) {\n            return equiv.clone();\n        }\n\n        // Go through the constraints related to this expression\n        // and try to solve for the expression\n        let mut exprs = self\n            .constraint_system\n            .system()\n            .constraints_referencing_variables(expression.referenced_unknown_variables())\n            .filter_map(|constr| match constr {\n                ConstraintRef::AlgebraicConstraint(constr) => Some(constr),\n                ConstraintRef::BusInteraction(_) => None,\n            })\n            .flat_map(|constr| constr.try_solve_for_expr(expression))\n            .collect_vec();\n        if exprs.is_empty() {\n            // If we cannot solve for the expression, we just take the expression unmodified.\n            exprs.push(expression.clone());\n        }\n        self.equivalent_expressions_cache\n            .insert(expression.clone(), exprs.clone());\n        exprs\n    }\n\n    fn apply_effect(&mut self, effect: Effect<T, V>) -> bool {\n        match effect {\n            Effect::Assignment(v, expr) => {\n                self.apply_assignment(&v, &GroupedExpression::from_runtime_constant(expr))\n            }\n            Effect::RangeConstraint(v, range_constraint) => {\n                self.apply_range_constraint_update(&v, range_constraint)\n            }\n            Effect::Assertion(..) => unreachable!(),\n            // There are no known-but-not-concrete variables, so we should never\n            // encounter a conditional assignment.\n            Effect::ConditionalAssignment { .. } => unreachable!(),\n        }\n    }\n\n    fn apply_range_constraint_update(\n        &mut self,\n        variable: &V,\n        range_constraint: RangeConstraint<T>,\n    ) -> bool {\n        if self.range_constraints.update(variable, &range_constraint) {\n            let new_rc = self.range_constraints.get(variable);\n            if let Some(value) = new_rc.try_to_single_value() {\n                self.apply_assignment(variable, &GroupedExpression::from_number(value));\n            } else {\n                // The range constraint was updated.\n                log::trace!(\"({variable}: {range_constraint})\");\n                self.constraint_system.variable_updated(variable);\n            }\n            true\n        } else {\n            false\n        }\n    }\n\n    fn apply_assignment(&mut self, variable: &V, expr: &GroupedExpression<T, V>) -> bool {\n        log::debug!(\"({variable} := {expr})\");\n        self.constraint_system.substitute_by_unknown(variable, expr);\n\n        let mut vars_to_boolean_constrain = vec![];\n        let new_constraints = self\n            .constraint_system\n            .system()\n            .constraints_referencing_variables(once(variable))\n            .filter_map(|constr| match constr {\n                ConstraintRef::AlgebraicConstraint(c) => Some(c),\n                ConstraintRef::BusInteraction(_) => None,\n            })\n            .flat_map(|constr| {\n                let result = self\n                    .boolean_extractor\n                    .try_extract_boolean(constr, &mut || self.var_dispenser.next_boolean())?;\n                vars_to_boolean_constrain.extend(result.new_unconstrained_boolean_variable);\n                Some(result.constraint)\n            })\n            .collect_vec();\n        for v in vars_to_boolean_constrain {\n            self.add_range_constraint(&v, RangeConstraint::from_mask(1));\n        }\n\n        self.add_algebraic_constraints(new_constraints);\n\n        self.assignments_to_return\n            .push((variable.clone(), expr.clone()));\n        true\n    }\n\n    /// Adds constraints that do not yet exist in the system.\n    /// Returns true if at least one new constraint was added.\n    fn add_algebraic_constraints_if_new(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) -> bool {\n        let constraints_to_add = constraints\n            .into_iter()\n            .filter(|constraint_to_add| !self.contains_algebraic_constraint(constraint_to_add))\n            .collect_vec();\n        if constraints_to_add.is_empty() {\n            false\n        } else {\n            self.add_algebraic_constraints(constraints_to_add);\n            true\n        }\n    }\n\n    /// Returns true if the system contains the given algebraic constraint.\n    fn contains_algebraic_constraint(\n        &self,\n        constraint: &AlgebraicConstraint<GroupedExpression<T, V>>,\n    ) -> bool {\n        let constraint_ref = ConstraintRef::AlgebraicConstraint(constraint.as_ref());\n        let vars = constraint.referenced_unknown_variables();\n        self.constraint_system\n            .system()\n            .constraints_referencing_variables(vars)\n            .contains(&constraint_ref)\n    }\n}\n\n/// If the constraint is equivalent to `X = Y` for some variables `X` and `Y`,\n/// returns the \"larger\" variable and the result of solving the constraint\n/// for the variable.\n///\n/// Note: Does not find all cases of equivalence.\nfn try_to_simple_equivalence<T: FieldElement, V: Clone + Ord + Eq>(\n    constr: AlgebraicConstraint<&GroupedExpression<T, V>>,\n) -> Option<(V, GroupedExpression<T, V>)> {\n    if !constr.expression.is_affine() {\n        return None;\n    }\n    if !constr.expression.constant_offset().is_zero() {\n        return None;\n    }\n    let linear = constr.expression.linear_components();\n    let [(v1, c1), (v2, c2)] = linear.collect_vec().try_into().ok()?;\n    // We have `c1 * v1 + c2 * v2 = 0`, which is equivalent to\n    // `v1 = -c2 / c1 * v2`\n    if (-*c2 / *c1).is_one() {\n        Some((\n            v2.clone(),\n            GroupedExpression::from_unknown_variable(v1.clone()),\n        ))\n    } else {\n        None\n    }\n}\n\n/// The currently known range constraints for the variables.\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct RangeConstraints<T: FieldElement, V> {\n    pub range_constraints: HashMap<V, RangeConstraint<T>>,\n}\n\nimpl<T: FieldElement, V: Clone + Hash + Eq> RangeConstraintProvider<T, V>\n    for RangeConstraints<T, V>\n{\n    fn get(&self, var: &V) -> RangeConstraint<T> {\n        self.range_constraints.get(var).cloned().unwrap_or_default()\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Hash + Eq> RangeConstraints<T, V> {\n    /// Adds a new range constraint for the variable.\n    /// Returns `true` if the new combined constraint is tighter than the existing one.\n    fn update(&mut self, variable: &V, range_constraint: &RangeConstraint<T>) -> bool {\n        let existing = self.get(variable);\n        let new = existing.conjunction(range_constraint);\n        if new != existing {\n            self.range_constraints.insert(variable.clone(), new);\n            true\n        } else {\n            false\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::bus_interaction_handler::DefaultBusInteractionHandler;\n    use expect_test::expect;\n\n    use super::*;\n\n    use powdr_number::GoldilocksField;\n\n    type VarName = &'static str;\n    type Var = Variable<VarName>;\n    type Qse = GroupedExpression<GoldilocksField, Var>;\n\n    fn var(name: VarName) -> Qse {\n        Qse::from_unknown_variable(Variable::Original(name))\n    }\n\n    fn constant(value: u64) -> Qse {\n        Qse::from_number(GoldilocksField::from(value))\n    }\n\n    #[test]\n    fn expression_simplification() {\n        let mut solver =\n            BaseSolver::<_, _, _, VarDispenserImpl>::new(DefaultBusInteractionHandler::default());\n        solver.add_algebraic_constraints(\n            [\n                // Boolean flags\n                var(\"flag0\") * (var(\"flag0\") - constant(1)),\n                var(\"flag1\") * (var(\"flag1\") - constant(1)),\n                var(\"flag2\") * (var(\"flag2\") - constant(1)),\n                // Exactly one flag is active\n                var(\"flag0\") + var(\"flag1\") + var(\"flag2\") - constant(1),\n                // This SHOULD simplify to `v - fp - 1`, but is currently not:\n                // https://github.com/powdr-labs/powdr/issues/3653\n                // Note that if we remove `fp` here it works: Exhaustive search figures out\n                // that v = 1 for all possible assignments of the flags.\n                var(\"v\") - var(\"fp\") - (var(\"flag0\") + var(\"flag1\") + var(\"flag2\")),\n            ]\n            .into_iter()\n            .map(AlgebraicConstraint::assert_zero),\n        );\n        solver.solve().unwrap();\n\n        expect![[r#\"\n            (flag0) * (flag0 - 1) = 0\n            flag0 - lin_0 - 1 = 0\n            (flag0) * (lin_0) = 0\n            0 = 0\n            (flag1) * (flag1 - 1) = 0\n            flag1 - lin_2 - 1 = 0\n            (flag1) * (lin_2) = 0\n            0 = 0\n            (flag2) * (flag2 - 1) = 0\n            flag2 - lin_4 - 1 = 0\n            (flag2) * (lin_4) = 0\n            0 = 0\n            flag0 + flag1 + flag2 - 1 = 0\n            -(flag0 + flag1 + flag2 + fp - v) = 0\"#]]\n        .assert_eq(&solver.to_string());\n    }\n\n    #[test]\n    fn is_known_to_by_nonzero() {\n        let mut solver =\n            BaseSolver::<_, _, _, VarDispenserImpl>::new(DefaultBusInteractionHandler::default());\n        assert!(!solver.are_expressions_known_to_be_different(&constant(0), &constant(0)));\n        assert!(solver.are_expressions_known_to_be_different(&constant(1), &constant(0)));\n        assert!(solver.are_expressions_known_to_be_different(&constant(7), &constant(0)));\n        assert!(solver.are_expressions_known_to_be_different(&-constant(1), &constant(0)));\n\n        assert!(\n            !(solver.are_expressions_known_to_be_different(\n                &(constant(42) - constant(2) * var(\"a\")),\n                &constant(0)\n            ))\n        );\n        assert!(\n            !(solver.are_expressions_known_to_be_different(&(var(\"a\") - var(\"b\")), &constant(0)))\n        );\n\n        solver.add_range_constraint(\n            &Variable::Original(\"a\"),\n            RangeConstraint::from_range(GoldilocksField::from(3), GoldilocksField::from(4)),\n        );\n        solver.add_range_constraint(\n            &Variable::Original(\"b\"),\n            RangeConstraint::from_range(GoldilocksField::from(3), GoldilocksField::from(4)),\n        );\n        assert!(solver.are_expressions_known_to_be_different(&(var(\"a\")), &constant(0)));\n        assert!(solver.are_expressions_known_to_be_different(\n            // If we try all possible assignments of a and b, this expression\n            // can never be zero.\n            &(var(\"a\") - constant(2) * var(\"b\")),\n            &constant(0)\n        ));\n        assert!(!solver.are_expressions_known_to_be_different(\n            // Can be zero for a = 4, b = 3.\n            &(constant(3) * var(\"a\") - constant(4) * var(\"b\")),\n            &constant(0)\n        ));\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/boolean_extractor.rs",
    "content": "use std::{cmp::min, collections::HashMap, hash::Hash};\n\nuse derivative::Derivative;\nuse itertools::Itertools;\nuse powdr_number::{FieldElement, LargeInt};\n\nuse crate::{\n    constraint_system::AlgebraicConstraint, grouped_expression::GroupedExpression,\n    indexed_constraint_system::apply_substitutions_to_expressions, solver::VariableAssignment,\n};\n\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct BooleanExtractor<T, V> {\n    /// If (expr, Some(z)) is in the map, it means that\n    /// we have transformed a constraint `left * right = 0` into\n    /// `right + z * offset = 0`, where `z` is a new boolean variable\n    /// and `expr = -right / offset = z`.\n    ///\n    /// If (expr, None) is in the map, it means that\n    /// we have transformed a constraint `right * right = 0` into\n    /// `right = 0`, which is a special case where we do not need\n    /// a new boolean variable.\n    substitutions: HashMap<GroupedExpression<T, V>, Option<V>>,\n}\n\npub struct BooleanExtractionValue<T, V> {\n    pub constraint: AlgebraicConstraint<GroupedExpression<T, V>>,\n    pub new_unconstrained_boolean_variable: Option<V>,\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Hash + Eq> BooleanExtractor<T, V> {\n    /// Tries to simplify a quadratic constraint by transforming it into an affine\n    /// constraint that makes use of a new boolean variable.\n    /// NOTE: The boolean constraint is not part of the output.\n    ///\n    /// Returns the new constraint and the new variable if required.\n    ///\n    /// If the same simplification has been performed before, it will\n    /// return None (in particular, it will not request a new variable).\n    ///\n    /// For example `(a + b) * (a + b + 10) = 0` can be transformed into\n    /// `a + b + z * 10 = 0`, where `z` is a new boolean variable.\n    ///\n    /// @param constraint The quadratic constraint to transform.\n    /// @param var_dispenser A function that returns a new variable that is assumed to be boolean-constrained.\n    /// It will only be called if the transformation is performed.\n    pub fn try_extract_boolean(\n        &mut self,\n        constraint: AlgebraicConstraint<&GroupedExpression<T, V>>,\n        mut var_dispenser: impl FnMut() -> V,\n    ) -> Option<BooleanExtractionValue<T, V>> {\n        let (left, right) = constraint.expression.try_as_single_product()?;\n        // We want to check if `left` and `right` differ by a constant offset.\n        // Since multiplying the whole constraint by a non-zero constant does\n        // not change the constraint, we also transform `left` by a constant\n        // (non-zero) factor.\n        // So we are looking for an offset `c` and a non-zero constant factor `f`\n        // such that `f * left = right + c`.\n        // Then we can write the original constraint `left * right = 0` as\n        // `(right + c) * right = 0` (we can just ignore `f`).\n        // This is in turn equivalent to `right + z * c = 0`, where `z` is\n        // a new boolean variable.\n\n        // For example, if the constraint was `(2 * a + 2 * b) * (a + b + 10) = 0`, we would\n        // set `factor = 1 / 2`, such that `left * factor - right` is a constant.\n\n        // First, try to find a good factor so that `left` and `right`\n        // likely cancel out except for a constant. As a good guess,\n        // we try to match the coefficient of the first variable.\n        let factor = match (\n            left.linear_components().next(),\n            right.linear_components().next(),\n        ) {\n            (Some((left_var, left_coeff)), Some((right_var, right_coeff)))\n                if left_var == right_var =>\n            {\n                *right_coeff / *left_coeff\n            }\n            _ => T::one(),\n        };\n\n        // `constr = 0` is equivalent to `left * right = 0`\n        let offset = &(left.clone() * factor) - right;\n        // We only do the transformation if `offset` is known, because\n        // otherwise the constraint stays quadratic.\n        let offset = *offset.try_to_known()?;\n        // We know that `offset + right = left` and thus\n        // `constr = 0` is equivalent to `right * (right + offset) = 0`\n        // which is equivalent to `right + z * offset = 0` for a new\n        // boolean variable `z`.\n\n        if offset.is_zero() {\n            // In this special case, we do not need a new variable.\n            if self.substitutions.contains_key(right) {\n                None\n            } else {\n                self.substitutions.insert(right.clone(), None);\n                Some(BooleanExtractionValue {\n                    constraint: AlgebraicConstraint::assert_zero(right.clone()),\n                    new_unconstrained_boolean_variable: None,\n                })\n            }\n        } else {\n            // We can substitute the initial constraint using a new boolean variable `z`\n            // either by\n            // `0 = right + z * offset`\n            // or by\n            // `0 = right + (1 - z) * offset = right + offset - z * offset`,\n            // which is equivalent to\n            // `0 = -right - offset + z * offset`.\n            // We use the one that has a smaller constant offset in the resulting expression.\n            let expr = [\n                right.clone(),\n                -right - GroupedExpression::from_runtime_constant(offset),\n            ]\n            .into_iter()\n            .min_by_key(|e| {\n                // Return the abs of the constant offset, or None on larger fields.\n                try_to_abs_u64(*e.constant_offset())\n            })\n            .unwrap();\n\n            let key = -&expr * (T::one() / offset);\n            if self.substitutions.contains_key(&key) {\n                // We have already performed this transformation before.\n                return None;\n            }\n\n            if key.try_to_simple_unknown().is_some() {\n                // In this case we don't gain anything because the new variable `z` will just\n                // be equivalent to the single variable in `right`.\n                None\n            } else {\n                let z = var_dispenser();\n\n                self.substitutions.insert(key, Some(z.clone()));\n\n                // We return `expr + z * offset == 0`, which is equivalent to the original constraint.\n                Some(BooleanExtractionValue {\n                    constraint: AlgebraicConstraint::assert_zero(\n                        expr + (GroupedExpression::from_unknown_variable(z.clone()) * offset),\n                    ),\n                    new_unconstrained_boolean_variable: Some(z),\n                })\n            }\n        }\n    }\n}\n\nfn try_to_abs_u64<T: FieldElement>(x: T) -> Option<u64> {\n    let modulus = T::modulus().try_into_u64()?;\n    let x = x.to_integer().try_into_u64()?;\n    Some(min(x, modulus - x))\n}\n\nimpl<T: FieldElement, V: Clone + Eq + Ord + Hash> BooleanExtractor<T, V> {\n    /// Applies the assignments to the stored substitutions.\n    pub fn apply_assignments(&mut self, assignments: &[VariableAssignment<T, V>]) {\n        if assignments.is_empty() {\n            return;\n        }\n        let (exprs, vars): (Vec<_>, Vec<_>) = self.substitutions.drain().unzip();\n        let exprs = apply_substitutions_to_expressions(exprs, assignments.iter().cloned());\n        self.substitutions = exprs.into_iter().zip_eq(vars).collect();\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use powdr_number::GoldilocksField;\n\n    use super::*;\n\n    type Var = &'static str;\n    type Qse = GroupedExpression<GoldilocksField, Var>;\n\n    fn var(name: Var) -> Qse {\n        Qse::from_unknown_variable(name)\n    }\n\n    fn constant(value: u64) -> Qse {\n        Qse::from_number(GoldilocksField::from(value))\n    }\n\n    #[test]\n    fn test_extract_boolean() {\n        let mut var_dispenser = || \"z\";\n        let expr = (var(\"a\") + var(\"b\")) * (var(\"a\") + var(\"b\") + constant(10));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .unwrap();\n        assert_eq!(result.constraint.to_string(), \"-(a + b + 10 * z) = 0\");\n        assert_eq!(result.new_unconstrained_boolean_variable, Some(\"z\"));\n    }\n\n    #[test]\n    fn test_extract_boolean_square() {\n        let mut var_dispenser = || \"z\";\n        let expr = (var(\"a\") + var(\"b\")) * (var(\"a\") + var(\"b\"));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .unwrap();\n        assert_eq!(result.constraint.to_string(), \"a + b = 0\");\n        assert_eq!(result.new_unconstrained_boolean_variable, None);\n    }\n\n    #[test]\n    fn test_extract_boolean_useless() {\n        let mut var_dispenser = || \"z\";\n        let expr = (var(\"a\") - constant(1)) * (var(\"a\"));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser);\n        assert!(result.is_none());\n\n        let expr = (constant(2) * var(\"a\") - constant(2)) * (constant(2) * var(\"a\"));\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser);\n        assert!(result.is_none());\n    }\n\n    #[test]\n    fn do_not_extract_twice() {\n        let mut var_dispenser = || \"z\";\n        let expr = (var(\"a\") + var(\"b\")) * (var(\"a\") + var(\"b\") + constant(10));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .unwrap();\n        assert_eq!(result.constraint.to_string(), \"-(a + b + 10 * z) = 0\");\n        assert_eq!(result.new_unconstrained_boolean_variable, Some(\"z\"));\n\n        assert!(extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .is_none());\n\n        // left and right swapped\n        assert!(extractor\n            .try_extract_boolean(\n                AlgebraicConstraint::assert_zero(\n                    &(var(\"a\") + var(\"b\") + constant(10) * (var(\"a\") + var(\"b\")))\n                ),\n                &mut var_dispenser\n            )\n            .is_none());\n\n        let expr2 = (constant(2) * (var(\"a\") + var(\"b\"))) * (var(\"a\") + var(\"b\") + constant(10));\n        assert!(extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr2), &mut var_dispenser)\n            .is_none());\n\n        let expr3 = (var(\"a\") + var(\"b\")) * (constant(2) * (var(\"a\") + var(\"b\") + constant(10)));\n        assert!(extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr3), &mut var_dispenser)\n            .is_none());\n\n        // This is different because the effective constant is different.\n        let expr4 = (var(\"a\") + var(\"b\")) * (constant(2) * (var(\"a\") + var(\"b\") + constant(20)));\n        assert_eq!(\n            extractor\n                .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr4), &mut var_dispenser)\n                .unwrap()\n                .constraint\n                .to_string(),\n            \"-(2 * a + 2 * b + 40 * z) = 0\"\n        );\n    }\n\n    #[test]\n    fn do_not_extract_squares_twice() {\n        let mut var_dispenser = || \"z\";\n        let expr = (var(\"a\") + var(\"b\")) * (var(\"a\") + var(\"b\"));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .unwrap();\n\n        assert_eq!(result.constraint.to_string(), \"a + b = 0\");\n        assert_eq!(result.new_unconstrained_boolean_variable, None);\n\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser);\n        assert!(result.is_none());\n    }\n\n    #[test]\n    fn apply_assignments() {\n        let mut counter = 0;\n        let vars = (0..10).map(|i| format!(\"z_{i}\")).collect_vec();\n        let mut var_dispenser = || {\n            counter += 1;\n            vars[counter - 1].as_str()\n        };\n        let expr =\n            (var(\"a\") + var(\"b\") + var(\"k\")) * (var(\"a\") + var(\"b\") + var(\"k\") - constant(2));\n        let mut extractor: BooleanExtractor<_, _> = Default::default();\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr), &mut var_dispenser)\n            .unwrap();\n        assert_eq!(result.constraint.to_string(), \"-(a + b + k - 2 * z_0) = 0\");\n        assert_eq!(result.new_unconstrained_boolean_variable, Some(\"z_0\"));\n\n        extractor.apply_assignments(&[(\"k\", -constant(9))]);\n        let expr2 =\n            (var(\"a\") + var(\"b\") - constant(9)) * (var(\"a\") + var(\"b\") - constant(9) - constant(2));\n\n        let result = extractor\n            .try_extract_boolean(AlgebraicConstraint::assert_zero(&expr2), &mut var_dispenser);\n        assert!(result.is_none());\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/constraint_splitter.rs",
    "content": "use std::{\n    fmt::Display,\n    ops::{Add, Div},\n};\n\nuse itertools::Itertools;\nuse num_traits::Zero;\nuse powdr_number::{FieldElement, LargeInt};\n\nuse crate::{\n    constraint_system::AlgebraicConstraint,\n    grouped_expression::{GroupedExpression, RangeConstraintProvider},\n    range_constraint::RangeConstraint,\n};\n\n/// Tries to split the given algebraic constraint into a list of equivalent\n/// algebraic constraints.\n/// This is the case for example if the variables in this expression can\n/// be split into different bit areas.\n///\n/// The core idea (which is applied multiple times) is as follows:\n///\n/// Suppose we have the constraint `x + k * y + c = 0` with `x` and `y` being\n/// variables (or expressions containing variables) and `k` and `c` are constants.\n/// Furthermore, the range constraints of `x` and `y` are such that no wrapping\n/// occurs in the operations, i.e. the constraint is equivalent to the same\n/// constraint in the natural numbers.\n///\n/// Then the same constraint is also true modulo `k`, where we get\n/// `x % k + c % k = 0`. If this equation has a unique solution `s` in the range\n/// constraints for `x`, we get a new constraint `x - s = 0`. We can subtract\n/// that constraint from the original to get `k * y + c - s = 0` and iterate.\npub fn try_split_constraint<T: FieldElement, V: Clone + Ord + Display>(\n    constraint: &AlgebraicConstraint<&GroupedExpression<T, V>>,\n    range_constraints: &impl RangeConstraintProvider<T, V>,\n) -> Option<Vec<AlgebraicConstraint<GroupedExpression<T, V>>>> {\n    let expression = constraint.expression;\n    if expression.is_quadratic() {\n        // We cannot split quadratic constraints.\n        return None;\n    }\n    if expression\n        .linear_components()\n        .any(|(var, _)| range_constraints.get(var).is_unconstrained())\n    {\n        // If any variable is unconstrained, we cannot split.\n        return None;\n    }\n\n    let mut constant = *expression.constant_offset();\n\n    // Turn the linear part into components (\"coefficient * expression\"),\n    // and combine components with the same coefficient, ending up with\n    // components of the form \"coefficient * (var1 + var2 - var3)\".\n    let mut components = group_components_by_coefficients(\n        expression\n            .linear_components()\n            .map(|(var, coeff)| Component::try_from((var, coeff)).ok())\n            .collect::<Option<Vec<_>>>()?,\n    )\n    .collect_vec();\n    if components.len() < 2 {\n        return None;\n    }\n\n    // The original constraint is equivalent to `sum of components + constant = 0`\n\n    // Now try to split out each component in turn, modifying `components`\n    // and `constant` for every successful split.\n    let mut extracted_parts = vec![];\n    for index in 0..components.len() {\n        let candidate = &components[index];\n        let rest = components\n            .iter()\n            .enumerate()\n            // Filter out the candidate itself and all zero components\n            // because we set components to zero when we extract them instead\n            // of removing them.\n            .filter(|(i, component)| *i != index && !component.is_zero())\n            .map(|(_, comp)| (comp.clone() / candidate.coeff).normalize())\n            .collect_vec();\n        if rest.is_empty() {\n            // Nothing to split, we are done.\n            break;\n        }\n\n        // The original constraint is equivalent to\n        // `candidate.expr + rest + constant / candidate.coeff = 0`.\n\n        // The idea is to find some `k` such that the equation has the form\n        // `expr + k * rest' + constant' = 0` and it is equivalent to\n        // the same expression in the natural numbers. Then we apply `x -> x % k` to the whole equation\n        // to obtain `expr % k + constant' % k = 0`. Finally, we check if it has a unique solution.\n\n        // We start by finding a good `k`. It is likely wo work better if the factor exists\n        // in all components of `rest`, so the GCD of the coefficients of the components would\n        // be best, but we just try the smallest coefficient.\n        let smallest_coeff_in_rest = rest.iter().map(|comp| comp.coeff).min().unwrap();\n        assert_ne!(smallest_coeff_in_rest, 0.into());\n        assert!(smallest_coeff_in_rest.is_in_lower_half());\n\n        // Try to find the unique value for `candidate.expr` in this equation.\n        if let Some(solution) = find_solution(\n            &candidate.expr,\n            smallest_coeff_in_rest,\n            rest.into_iter()\n                .map(|comp| GroupedExpression::from(comp / smallest_coeff_in_rest))\n                .sum(),\n            constant / candidate.coeff,\n            range_constraints,\n        ) {\n            // We now know that `candidate.expr = solution`, so we add it to the extracted parts.\n            extracted_parts.push(AlgebraicConstraint::assert_eq(\n                candidate.expr.clone(),\n                GroupedExpression::from_number(solution),\n            ));\n            // We remove the candidate (`candidate.coeff * candidate.expr`) from the expression.\n            // To balance this out, we add `candidate.coeff * candidate.expr = candidate.coeff * solution`\n            // to the constant.\n            constant += solution * candidate.coeff;\n            components[index] = Zero::zero();\n        }\n    }\n    if extracted_parts.is_empty() {\n        None\n    } else {\n        // We found some independent parts, add the remaining components to the parts\n        // and return them.\n        extracted_parts.push(recombine_components(components, constant));\n        Some(extracted_parts)\n    }\n}\n\n/// Groups a sequence of components (thought of as a sum) by coefficients\n/// so that its sum does not change.\n/// Before grouping, the components are normalized such that the coefficient is always\n/// in the lower half of the field (and the expression might be negated to compensate).\n/// The list is sorted by the coefficient.\nfn group_components_by_coefficients<T: FieldElement, V: Clone + Ord + Display>(\n    components: impl IntoIterator<Item = Component<T, V>>,\n) -> impl Iterator<Item = Component<T, V>> {\n    components\n        .into_iter()\n        .map(|c| c.normalize())\n        .into_grouping_map_by(|c| c.coeff)\n        .sum()\n        .into_iter()\n        .filter(|(_, expr)| !expr.is_zero())\n        .map(|(_, comp)| comp)\n        .sorted_by_key(|comp| comp.coeff.to_integer())\n}\n\n/// If this returns `Some(x)`, then `x` is the only valid value for `expr` in the equation\n/// `expr + coefficient * rest + constant = 0`.\n/// It does not make assumptions about its inputs.\n/// We try to translate the equation to an equation in the natural numbers\n/// and try to find a unique solution.\nfn find_solution<T: FieldElement, V: Clone + Ord + Display>(\n    expr: &GroupedExpression<T, V>,\n    coefficient: T,\n    rest: GroupedExpression<T, V>,\n    constant: T,\n    range_constraints: &impl RangeConstraintProvider<T, V>,\n) -> Option<T> {\n    let expr_rc = expr.range_constraint(range_constraints);\n    let rest_rc = rest.range_constraint(range_constraints);\n\n    let unconstrained_range_width = RangeConstraint::<T>::unconstrained().range_width();\n    if expr_rc.range_width() == unconstrained_range_width\n        || rest_rc.range_width() == unconstrained_range_width\n    {\n        // We probably cannot translate this into the natural numbers.\n        return None;\n    }\n\n    // Both range constraints have a \"gap'. We shift the gap such that the\n    // lower bounds for both `expr` and `rest` are zero.\n    if expr_rc.range().0 != 0.into() {\n        let shift = expr_rc.range().0;\n        return find_solution(\n            &(expr - &GroupedExpression::from_number(shift)),\n            coefficient,\n            rest,\n            constant + shift,\n            range_constraints,\n        )\n        .map(|s| s + shift);\n    } else if rest_rc.range().0 != 0.into() {\n        return find_solution(\n            expr,\n            coefficient,\n            rest - GroupedExpression::from_number(rest_rc.range().0),\n            constant + coefficient * rest_rc.range().0,\n            range_constraints,\n        );\n    }\n\n    // rc(expr): [0, max_expr]\n    // rc(rest): [0, max_rest]\n    // If max_expr + k * max_rest < P, then we can translate the equation to the natural numbers:\n    // expr + k * rest = (-constant) % modulus\n\n    let max_expr = expr_rc.range().1;\n    let max_rest = rest_rc.range().1;\n\n    // Evaluate `expr + coefficient * rest` for the largest possible value\n    // and see if it wraps around in the field.\n    if max_expr.to_arbitrary_integer()\n        + coefficient.to_arbitrary_integer() * max_rest.to_arbitrary_integer()\n        >= T::modulus().to_arbitrary_integer()\n    {\n        return None;\n    }\n    // It does not wrap around, so we know that the equation can be translated to the\n    // natural numbers:\n    // expr + coefficient * rest = (-constant) % modulus\n\n    // Next, we apply `x -> x % coefficient` to both sides of the equation to get\n    // expr % coefficient = ((-constant) % modulus) % coefficient\n    // Note that at this point, we only get an implication, not an equivalence,\n    // but if the range constraints of `expr` only allow a unique solution,\n    // it holds unconditionally.\n\n    if max_expr.to_integer() >= coefficient.to_integer() + coefficient.to_integer() {\n        // In this case, there are always at least two solutions (ignoring masks and other\n        // constraints).\n        return None;\n    }\n\n    // TODO this only works for fields that fit 64 bits, but that is probably fine for now.\n    let rhs = T::from(\n        (-constant).to_integer().try_into_u64().unwrap()\n            % coefficient.to_integer().try_into_u64().unwrap(),\n    );\n\n    // Now we try `rhs`, `rhs + coefficient`, `rhs + 2 * coefficient`, ...\n    // But because of the check above, we can stop at `2 * coefficient`.\n    (0..=1)\n        .map(|i| rhs + T::from(i) * coefficient)\n        .filter(|candidate| expr_rc.allows_value(*candidate))\n        .exactly_one()\n        .ok()\n}\n\n/// Turns the remaining components and constant into a single constraint,\n/// i.e. returns an algebraic constraint that is equivalent to\n/// `sum of components + constant = 0`.\nfn recombine_components<T: FieldElement, V: Clone + Ord + Display>(\n    components: Vec<Component<T, V>>,\n    constant: T,\n) -> AlgebraicConstraint<GroupedExpression<T, V>> {\n    let remaining = components\n        .into_iter()\n        .filter(|comp| !comp.is_zero())\n        .collect_vec();\n    AlgebraicConstraint::assert_zero(match remaining.as_slice() {\n        [Component { coeff, expr }] => {\n            // if there is only one component, we normalize\n            expr + &GroupedExpression::from_number(constant / *coeff)\n        }\n        _ => {\n            remaining\n                .into_iter()\n                .map(|comp| comp.into())\n                .sum::<GroupedExpression<_, _>>()\n                + GroupedExpression::from_number(constant)\n        }\n    })\n}\n\n/// A component of a constraint. Equivalent to the expression `coeff * expr`.\n#[derive(Clone)]\nstruct Component<T, V> {\n    coeff: T,\n    expr: GroupedExpression<T, V>,\n}\n\nimpl<T: FieldElement, V: Clone + Ord + Display> Display for Component<T, V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{} * ({})\", self.coeff, self.expr)\n    }\n}\n\nimpl<'a, T: FieldElement, V: Ord + Clone + Eq> TryFrom<(&'a V, &'a T)> for Component<T, V> {\n    type Error = ();\n    fn try_from((var, coeff): (&'a V, &'a T)) -> Result<Self, ()> {\n        let coeff = *coeff;\n        let expr = GroupedExpression::from_unknown_variable(var.clone());\n        Ok(Self { coeff, expr })\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq> Component<T, V> {\n    /// Normalize the component such that the coefficient is positive.\n    fn normalize(self) -> Self {\n        if self.coeff.is_in_lower_half() {\n            self\n        } else {\n            Self {\n                coeff: -self.coeff,\n                expr: -self.expr,\n            }\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq> Add for Component<T, V> {\n    type Output = Self;\n\n    fn add(self, other: Self) -> Self {\n        assert!(self.coeff == other.coeff);\n        Self {\n            coeff: self.coeff,\n            expr: self.expr + other.expr,\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq> Div<T> for Component<T, V> {\n    type Output = Self;\n\n    fn div(self, rhs: T) -> Self {\n        assert!(!rhs.is_zero());\n        Self {\n            coeff: self.coeff / rhs,\n            expr: self.expr,\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq> From<Component<T, V>> for GroupedExpression<T, V> {\n    fn from(comp: Component<T, V>) -> Self {\n        comp.expr * comp.coeff\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Ord> Zero for Component<T, V> {\n    fn zero() -> Self {\n        Self {\n            coeff: T::zero(),\n            expr: GroupedExpression::zero(),\n        }\n    }\n\n    fn is_zero(&self) -> bool {\n        self.coeff.is_zero() || self.expr.is_zero()\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use std::collections::HashMap;\n\n    use expect_test::expect;\n    use itertools::Itertools;\n    use powdr_number::{BabyBearField, GoldilocksField};\n\n    use super::*;\n    use crate::range_constraint::RangeConstraint;\n\n    type Var = &'static str;\n    type Qse = GroupedExpression<GoldilocksField, Var>;\n\n    fn var(name: Var) -> Qse {\n        Qse::from_unknown_variable(name)\n    }\n\n    fn constant(value: u64) -> Qse {\n        Qse::from_number(GoldilocksField::from(value))\n    }\n\n    fn try_split<T: FieldElement, V: Clone + Ord + Display>(\n        expr: GroupedExpression<T, V>,\n        rcs: &impl RangeConstraintProvider<T, V>,\n    ) -> Option<Vec<AlgebraicConstraint<GroupedExpression<T, V>>>> {\n        try_split_constraint(&AlgebraicConstraint::assert_zero(&expr), rcs)\n    }\n\n    #[test]\n    fn split_simple() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [\n            (\"x\", four_bit_rc),\n            (\"y\", four_bit_rc),\n            (\"a\", four_bit_rc),\n            (\"b\", four_bit_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr = var(\"x\") + var(\"y\") * constant(255) - var(\"a\") + var(\"b\") * constant(255);\n        let items = try_split(expr, &rcs).unwrap().iter().join(\", \");\n\n        expect![\"-(a - x) = 0, b + y = 0\"].assert_eq(&items);\n    }\n\n    #[test]\n    fn split_multiple() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [\n            (\"x\", four_bit_rc),\n            (\"y\", four_bit_rc),\n            (\"a\", four_bit_rc),\n            (\"b\", four_bit_rc),\n            (\"r\", four_bit_rc),\n            (\"s\", four_bit_rc),\n            (\"w\", four_bit_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr = var(\"x\") + var(\"y\") * constant(50) - var(\"a\") + var(\"b\") * constant(50)\n            - var(\"r\") * constant(6000)\n            + var(\"s\") * constant(6000)\n            + var(\"w\") * constant(1200000);\n        let items = try_split(expr, &rcs).unwrap().iter().join(\"\\n\");\n        assert_eq!(\n            items,\n            \"-(a - x) = 0\nb + y = 0\n-(r - s) = 0\nw = 0\"\n        );\n    }\n\n    #[test]\n    fn split_seqz() {\n        // From the seqz instruction:\n        // (b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\n        // After boolean extraction:\n        // b__3_0 - b_msb_f_0 + 256 * x = 0;\n        // or:\n        // b__3_0 - b_msb_f_0 + 256 * (1 - x) = 0;\n\n        let byte_rc = RangeConstraint::from_mask(0xffu32);\n        let bit_rc = RangeConstraint::from_mask(0x1u32);\n        let rcs = [(\"b__3_0\", byte_rc), (\"b_msb_f_0\", byte_rc), (\"x\", bit_rc)]\n            .into_iter()\n            .collect::<HashMap<_, _>>();\n        let expr1 = var(\"b__3_0\") - var(\"b_msb_f_0\") + constant(256) * var(\"x\");\n        let items = try_split(expr1, &rcs).unwrap().iter().join(\"\\n\");\n        assert_eq!(\n            items,\n            \"b__3_0 - b_msb_f_0 = 0\nx = 0\"\n        );\n        let expr2 = var(\"b__3_0\") - var(\"b_msb_f_0\") + constant(256) * (var(\"x\") - constant(1));\n        let items = try_split(expr2, &rcs).unwrap().iter().join(\"\\n\");\n        assert_eq!(\n            items,\n            \"b__3_0 - b_msb_f_0 = 0\nx - 1 = 0\"\n        );\n    }\n\n    #[test]\n    fn split_multiple_with_const() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [\n            (\"x\", four_bit_rc),\n            (\"y\", four_bit_rc),\n            (\"a\", four_bit_rc),\n            (\"b\", four_bit_rc),\n            (\"r\", four_bit_rc),\n            (\"s\", four_bit_rc),\n            (\"w\", four_bit_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr = var(\"x\") + var(\"y\") * constant(64)\n            - var(\"a\")\n            - var(\"b\") * constant(64)\n            - var(\"r\") * constant(65536)\n            + var(\"s\") * constant(65536)\n            + var(\"w\") * constant(0x1000000)\n            - constant(5 * 0x1000000 - 6 + 64 - 5 * 65536);\n\n        let items = try_split(expr, &rcs).unwrap().iter().join(\"\\n\");\n        assert_eq!(\n            items,\n            \"-(a - x - 6) = 0\n-(b - y + 1) = 0\n-(r - s - 5) = 0\nw - 5 = 0\"\n        );\n    }\n\n    #[test]\n    fn split_limb_decomposition() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [\n            (\"l0\", four_bit_rc),\n            (\"l1\", four_bit_rc),\n            (\"l2\", four_bit_rc),\n            (\"l3\", four_bit_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr = var(\"l0\")\n            + var(\"l1\") * constant(0x10)\n            + var(\"l2\") * constant(0x100)\n            + var(\"l3\") * constant(0x1000)\n            - constant(0x1234);\n\n        let items = try_split(expr, &rcs).unwrap().iter().join(\"\\n\");\n        assert_eq!(\n            items,\n            \"l0 - 4 = 0\nl1 - 3 = 0\nl2 - 2 = 0\nl3 - 1 = 0\"\n        );\n    }\n\n    #[test]\n    fn negated_and_unnegated() {\n        // 7864320 * a__0_12 - bool_113 + 314572801\n        // a__0_12 + 256 * bool_113 - 216\n        let byte_rc = RangeConstraint::from_mask(0xffu32);\n        let bit_rc = RangeConstraint::from_mask(0x1u32);\n        let rcs = [(\"a__0_12\", byte_rc), (\"bool_113\", bit_rc)]\n            .into_iter()\n            .collect::<HashMap<_, _>>();\n        let expr1: GroupedExpression<BabyBearField, _> =\n            -(GroupedExpression::from_unknown_variable(\"a__0_12\")\n                * GroupedExpression::from_number(BabyBearField::from(7864320))\n                - GroupedExpression::from_unknown_variable(\"bool_113\")\n                + GroupedExpression::from_number(BabyBearField::from(314572801)));\n\n        // Split `expr1` and `-expr1`, the result should be equivalent.\n        let first = try_split(expr1.clone(), &rcs)\n            .unwrap()\n            .into_iter()\n            .join(\", \");\n        expect![\"bool_113 = 0, -(a__0_12 - 216) = 0\"].assert_eq(&first);\n        let expr2 = -expr1;\n        let second = try_split(expr2, &rcs).unwrap().into_iter().join(\", \");\n        expect![\"-(bool_113) = 0, a__0_12 - 216 = 0\"].assert_eq(&second);\n    }\n\n    #[test]\n    fn wrapping_1() {\n        // -(c__1_3) + 256 * (30720 * c__0_3 - c__2_3) = 1226833928\n        let byte_rc = RangeConstraint::from_mask(0xffu32);\n        let rcs = [\n            (\"c__0_3\", byte_rc),\n            (\"c__1_3\", byte_rc),\n            (\"c__2_3\", byte_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr: GroupedExpression<BabyBearField, _> =\n            -GroupedExpression::from_unknown_variable(\"c__1_3\")\n                + GroupedExpression::from_number(BabyBearField::from(256))\n                    * (GroupedExpression::from_number(BabyBearField::from(30720))\n                        * GroupedExpression::from_unknown_variable(\"c__0_3\")\n                        - GroupedExpression::from_unknown_variable(\"c__2_3\"))\n                - GroupedExpression::from_number(BabyBearField::from(1226833928));\n        let result = try_split(expr.clone(), &rcs).unwrap().iter().join(\", \");\n        expect![\"-(c__1_3 - 248) = 0, c__0_3 - 157 = 0, -(c__2_3 - 30719) = 0\"].assert_eq(&result);\n\n        let mut expr = expr;\n        expr.substitute_by_known(&\"c__0_3\", &BabyBearField::from(157));\n        expr.substitute_by_known(&\"c__1_3\", &BabyBearField::from(248));\n        expr.substitute_by_known(&\"c__2_3\", &BabyBearField::from(30719));\n        assert!(expr.is_zero());\n    }\n\n    #[test]\n    fn wrapping_2() {\n        // bool_17 + 1069547521 * (a__0_0) = 943718400\n        let bit_rc = RangeConstraint::from_mask(0x1u32);\n        let rcs = [(\"bool_17\", bit_rc), (\"a__0_0\", bit_rc)]\n            .into_iter()\n            .collect::<HashMap<_, _>>();\n        let expr: GroupedExpression<BabyBearField, _> =\n            GroupedExpression::from_unknown_variable(\"bool_17\")\n                + GroupedExpression::from_number(BabyBearField::from(1069547521))\n                    * GroupedExpression::from_unknown_variable(\"a__0_0\")\n                - GroupedExpression::from_number(BabyBearField::from(943718400));\n        let result = try_split(expr.clone(), &rcs).unwrap().iter().join(\", \");\n        expect![\"bool_17 = 0, -(a__0_0 + 1) = 0\"].assert_eq(&result);\n    }\n\n    #[test]\n    fn split_at_boundary() {\n        let bit_rc = RangeConstraint::from_mask(0x1u32);\n        let limb_rc = RangeConstraint::from_mask(0x7fffu32);\n        let rcs = [\n            (\"bool_103\", bit_rc),\n            (\"to_pc_least_sig_bit_4\", bit_rc),\n            (\"to_pc_limbs__0_4\", limb_rc),\n        ]\n        .into_iter()\n        .collect::<HashMap<_, _>>();\n        let expr: GroupedExpression<BabyBearField, _> =\n            GroupedExpression::from_unknown_variable(\"bool_103\")\n                + GroupedExpression::from_number(BabyBearField::from(30720))\n                    * (GroupedExpression::from_unknown_variable(\"to_pc_least_sig_bit_4\")\n                        + GroupedExpression::from_number(BabyBearField::from(2))\n                            * GroupedExpression::from_unknown_variable(\"to_pc_limbs__0_4\"))\n                - GroupedExpression::from_number(BabyBearField::from(30720 * 123 + 1));\n        let items = try_split(expr, &rcs).unwrap().iter().join(\", \");\n        assert_eq!(\n            items,\n            \"bool_103 - 1 = 0, to_pc_least_sig_bit_4 - 1 = 0, to_pc_limbs__0_4 - 61 = 0\"\n        );\n    }\n\n    #[test]\n    fn bit_decomposition_bug() {\n        // This tests against a bug that was present in the old bit\n        // decomposition algorithm.\n        let lin = var(\"lin\");\n        let result = var(\"result\");\n        let constr = lin.clone() - constant(4) * result.clone() - constant(4);\n        let range_constraints = HashMap::from([\n            (\"lin\", RangeConstraint::from_mask(0x8u32)),\n            (\"result\", RangeConstraint::from_mask(0x1u32)),\n        ]);\n        // We try to solve `lin - 4 * result = 4` and the problem is\n        // that we cannot assign `lin = 4 & mask` for some mask, since\n        // it needs to be assigned `8`.\n        assert!(try_split(constr, &range_constraints).is_none());\n    }\n\n    #[test]\n    fn split_fail_overlapping() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [(\"x\", four_bit_rc), (\"y\", four_bit_rc)]\n            .into_iter()\n            .collect::<HashMap<_, _>>();\n        // The RC of x is not tight enough\n        let expr = var(\"x\") + var(\"y\") * constant(2);\n        assert!(try_split(expr, &rcs).is_none());\n    }\n\n    #[test]\n    fn split_fail_not_unique() {\n        let four_bit_rc = RangeConstraint::from_mask(0xfu32);\n        let rcs = [(\"x\", four_bit_rc), (\"y\", four_bit_rc), (\"z\", four_bit_rc)]\n            .into_iter()\n            .collect::<HashMap<_, _>>();\n        // There are multiple ways to solve the modulo equation.\n        let expr = (var(\"x\") - var(\"y\")) + constant(16) * var(\"z\") - constant(1);\n        assert!(try_split(expr, &rcs).is_none());\n\n        // If we adjust the constant, it works.\n        let expr = (var(\"x\") - var(\"y\")) + constant(16) * var(\"z\") - constant(0);\n        let result = try_split(expr.clone(), &rcs).unwrap().iter().join(\", \");\n        expect![\"x - y = 0, z = 0\"].assert_eq(&result);\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/exhaustive_search.rs",
    "content": "use itertools::Itertools;\nuse powdr_number::FieldElement;\nuse powdr_number::LargeInt;\n\nuse crate::constraint_system::BusInteractionHandler;\nuse crate::constraint_system::ConstraintRef;\nuse crate::effect::Effect;\nuse crate::grouped_expression::RangeConstraintProvider;\nuse crate::indexed_constraint_system::IndexedConstraintSystem;\nuse crate::range_constraint::RangeConstraint;\nuse crate::utils::{get_all_possible_assignments, has_few_possible_assignments};\n\nuse std::collections::btree_map::Entry;\nuse std::collections::{BTreeMap, BTreeSet};\nuse std::fmt::Display;\nuse std::hash::Hash;\n\nuse super::Error;\n\n/// The maximum number of possible assignments to try when doing exhaustive search.\nconst MAX_SEARCH_WIDTH: u64 = 1 << 10;\n/// The maximum range width of a variable to be considered for exhaustive search.\nconst MAX_VAR_RANGE_WIDTH: u64 = 5;\n\n/// Goes through all possible assignments for the given variables and tries no deduce\n/// new range constraints (on any variable) for each of the assignments. Returns the union of the obtained\n/// range constraints over all assignments.\n/// Can also return range constraints for the input variables if some of them lead\n/// to a contradiction.\n/// Returns an error if all assignments are contradictory.\npub fn exhaustive_search_on_variable_set<T: FieldElement, V: Clone + Hash + Ord + Eq + Display>(\n    constraint_system: &IndexedConstraintSystem<T, V>,\n    variables: &BTreeSet<V>,\n    range_constraints: impl RangeConstraintProvider<T, V> + Clone,\n    bus_interaction_handler: &impl BusInteractionHandler<T>,\n) -> Result<BTreeMap<V, RangeConstraint<T>>, Error> {\n    let mut new_constraints =\n        get_all_possible_assignments(variables.iter().cloned(), &range_constraints).filter_map(\n            |assignments| {\n                derive_new_range_constraints(\n                    constraint_system,\n                    assignments,\n                    &range_constraints,\n                    bus_interaction_handler,\n                )\n                .ok()\n            },\n        );\n    let Some(first_assignment_constraints) = new_constraints.next() else {\n        // No assignment satisfied the constraint system.\n        return Err(Error::ExhaustiveSearchError);\n    };\n    // Compute the disjunction of the effects af each assignment.\n    let result = new_constraints.try_fold(first_assignment_constraints, |mut acc, new_constr| {\n        for (var, rc) in &mut acc {\n            let other_rc = new_constr.get(var).cloned().unwrap_or_default();\n            *rc = rc.disjunction(&other_rc)\n        }\n        // Remove the constraints that are not better than the ones we already know.\n        acc.retain(|v, rc| range_constraints.get(v) != *rc);\n        if acc.is_empty() {\n            // Exiting early here is crucial for performance.\n            // This is not an error though, it only means we could not find an improvement.\n            return Err(());\n        }\n        Ok(acc)\n    });\n    match result {\n        Ok(assignments) => Ok(assignments),\n        Err(_) => Ok(Default::default()),\n    }\n}\n\n/// Returns all unique sets of variables that appear together in an identity\n/// (either in an algebraic constraint or in the same field of a bus interaction),\n/// IF the number of possible assignments is less than `MAX_SEARCH_WIDTH`.\npub fn get_brute_force_candidates<'a, T: FieldElement, V: Clone + Hash + Ord>(\n    constraint_system: &'a IndexedConstraintSystem<T, V>,\n    rc: impl RangeConstraintProvider<T, V> + Clone + 'a,\n) -> impl Iterator<Item = BTreeSet<V>> + 'a {\n    constraint_system\n        .algebraic_constraints()\n        .iter()\n        .map(|c| &c.expression)\n        .chain(\n            constraint_system\n                .bus_interactions()\n                .iter()\n                .flat_map(|b| b.fields()),\n        )\n        .map(|expression| {\n            expression\n                .referenced_unknown_variables()\n                .cloned()\n                .collect::<BTreeSet<_>>()\n        })\n        .unique()\n        .filter_map(move |variables| {\n            match is_candidate_for_exhaustive_search(&variables, &rc) {\n                true => Some(variables),\n                false => {\n                    // It could be that only one variable has a large range, but that the rest uniquely determine it.\n                    // In that case, searching through all combinations of the other variables would be enough.\n                    // Check if removing the variable results in a small enough set of possible assignments.\n                    let num_variables = variables.len();\n                    let variables_without_largest_range = variables\n                        .into_iter()\n                        .sorted_by(|a, b| rc.get(a).size_estimate().cmp(&rc.get(b).size_estimate()))\n                        .take(num_variables - 1)\n                        .collect::<BTreeSet<_>>();\n                    is_candidate_for_exhaustive_search(&variables_without_largest_range, &rc)\n                        .then_some(variables_without_largest_range)\n                }\n            }\n        })\n        .filter(|variables| !variables.is_empty())\n        .unique()\n}\n\nfn is_candidate_for_exhaustive_search<T: FieldElement, V: Clone + Ord>(\n    variables: &BTreeSet<V>,\n    rc: &impl RangeConstraintProvider<T, V>,\n) -> bool {\n    has_few_possible_assignments(variables.iter().cloned(), rc, MAX_SEARCH_WIDTH)\n        && has_small_max_range_constraint_size(variables.iter().cloned(), rc, MAX_VAR_RANGE_WIDTH)\n}\n\nfn has_small_max_range_constraint_size<T: FieldElement, V: Clone + Ord>(\n    mut variables: impl Iterator<Item = V>,\n    rc: &impl RangeConstraintProvider<T, V>,\n    threshold: u64,\n) -> bool {\n    variables.all(|v| {\n        if let Some(size) = rc.get(&v).size_estimate().try_into_u64() {\n            size <= threshold\n        } else {\n            false\n        }\n    })\n}\n\n/// The provided assignments lead to a contradiction in the constraint system.\nstruct ContradictingConstraintError;\n\n/// Given a list of assignments of concrete values to variables, tries to derive\n/// new range constraints from them. To keep this function relatively fast,\n/// only tries to each algebraic or bus constraint it isolation.\n/// Fails if any of the assignments *directly* contradicts any of the constraints.\n/// Note that getting an OK(_) here does not mean that there is no contradiction, as\n/// this function only does one step of the derivation.\nfn derive_new_range_constraints<T: FieldElement, V: Clone + Hash + Ord + Eq + Display>(\n    constraint_system: &IndexedConstraintSystem<T, V>,\n    assignments: BTreeMap<V, T>,\n    range_constraints: &impl RangeConstraintProvider<T, V>,\n    bus_interaction_handler: &impl BusInteractionHandler<T>,\n) -> Result<BTreeMap<V, RangeConstraint<T>>, ContradictingConstraintError> {\n    let effects = constraint_system\n        .constraints_referencing_variables(assignments.keys())\n        .map(|constraint| match constraint {\n            ConstraintRef::AlgebraicConstraint(identity) => {\n                let mut identity = identity.cloned();\n                for (variable, value) in assignments.iter() {\n                    identity.substitute_by_known(variable, value);\n                }\n                identity\n                    .as_ref()\n                    .solve(range_constraints)\n                    .map(|result| result.effects)\n                    .map_err(|_| ContradictingConstraintError)\n            }\n            ConstraintRef::BusInteraction(bus_interaction) => {\n                let mut bus_interaction = bus_interaction.clone();\n                for (variable, value) in assignments.iter() {\n                    bus_interaction\n                        .fields_mut()\n                        .for_each(|expr| expr.substitute_by_known(variable, value))\n                }\n                bus_interaction\n                    .solve(bus_interaction_handler, range_constraints)\n                    .map_err(|_| ContradictingConstraintError)\n            }\n        })\n        // Early return if any constraint leads to a contradiction.\n        .collect::<Result<Vec<_>, _>>()?;\n\n    effects\n        .into_iter()\n        .flatten()\n        .filter_map(|effect| match effect {\n            Effect::Assignment(variable, value) => {\n                // Turn assignment into range constraint, we can recover it later.\n                Some((variable, RangeConstraint::from_value(value)))\n            }\n            Effect::RangeConstraint(variable, rc) => Some((variable, rc)),\n            _ => None,\n        })\n        .chain(\n            assignments\n                .into_iter()\n                .map(|(v, val)| (v, RangeConstraint::from_value(val))),\n        )\n        // All range constraints in this iterator hold simultaneously,\n        // so we compute the intersection for each variable.\n        .try_fold(BTreeMap::new(), |mut map, (variable, rc)| {\n            match map.entry(variable.clone()) {\n                Entry::Vacant(entry) => {\n                    entry.insert(rc);\n                }\n                Entry::Occupied(mut entry) => {\n                    let existing = entry.get();\n                    if existing.is_disjoint(&rc) {\n                        return Err(ContradictingConstraintError);\n                    }\n                    entry.insert(existing.conjunction(&rc));\n                }\n            }\n            Ok(map)\n        })\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/linearizer.rs",
    "content": "use std::collections::HashMap;\nuse std::hash::Hash;\n\nuse derivative::Derivative;\nuse itertools::Itertools;\nuse powdr_number::FieldElement;\n\nuse crate::constraint_system::AlgebraicConstraint;\nuse crate::grouped_expression::GroupedExpression;\nuse crate::grouped_expression::GroupedExpressionComponent;\nuse crate::indexed_constraint_system::apply_substitutions_to_expressions;\nuse crate::solver::VariableAssignment;\n\n/// Solver component that substitutes non-affine sub-expressions\n/// by new variables (or constants if those variables have been determined\n/// later on to have a constant value).\n#[derive(Derivative)]\n#[derivative(Default(bound = \"\"))]\npub struct Linearizer<T, V> {\n    substitutions: HashMap<GroupedExpression<T, V>, GroupedExpression<T, V>>,\n}\n\nimpl<T: FieldElement, V: Clone + Eq + Ord + Hash> Linearizer<T, V> {\n    /// Linearizes the expression by introducing new variables for\n    /// non-affine parts. The new constraints are appended to\n    /// `constraint_collection` and must be added to the system.\n    /// The linearized expression is returned.\n    pub fn linearize_expression(\n        &mut self,\n        expr: GroupedExpression<T, V>,\n        var_dispenser: &mut impl FnMut() -> V,\n        constraint_collection: &mut impl Extend<AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) -> GroupedExpression<T, V> {\n        if expr.is_affine() {\n            return expr;\n        }\n        expr.into_summands()\n            .map(|c| match c {\n                GroupedExpressionComponent::Quadratic(l, r) => {\n                    let l = self.linearize_and_substitute_by_var(\n                        l,\n                        var_dispenser,\n                        constraint_collection,\n                    );\n                    let r = self.linearize_and_substitute_by_var(\n                        r,\n                        var_dispenser,\n                        constraint_collection,\n                    );\n                    self.substitute_by_var(l * r, var_dispenser, constraint_collection)\n                }\n                GroupedExpressionComponent::Linear(v, coeff) => {\n                    GroupedExpression::from_unknown_variable(v) * coeff\n                }\n                GroupedExpressionComponent::Constant(c) => {\n                    GroupedExpression::from_runtime_constant(c)\n                }\n            })\n            .sum()\n    }\n\n    /// Tries to linearize the expression according to already existing substitutions.\n    pub fn try_linearize_existing(\n        &self,\n        expr: GroupedExpression<T, V>,\n    ) -> Option<GroupedExpression<T, V>> {\n        if expr.is_affine() {\n            return Some(expr);\n        }\n        Some(\n            expr.into_summands()\n                .map(|c| match c {\n                    GroupedExpressionComponent::Quadratic(l, r) => {\n                        let l =\n                            self.try_substitute_by_existing_var(&self.try_linearize_existing(l)?)?;\n                        let r =\n                            self.try_substitute_by_existing_var(&self.try_linearize_existing(r)?)?;\n                        self.try_substitute_by_existing_var(&(l * r))\n                    }\n                    GroupedExpressionComponent::Linear(v, coeff) => {\n                        Some(GroupedExpression::from_unknown_variable(v) * coeff)\n                    }\n                    GroupedExpressionComponent::Constant(c) => {\n                        Some(GroupedExpression::from_runtime_constant(c))\n                    }\n                })\n                .collect::<Option<Vec<_>>>()?\n                .into_iter()\n                .sum(),\n        )\n    }\n\n    /// Linearizes the expression and substitutes the expression by a single variable.\n    /// The substitution is not performed if the expression is a constant or a single\n    /// variable (without coefficient).\n    fn linearize_and_substitute_by_var(\n        &mut self,\n        expr: GroupedExpression<T, V>,\n        var_dispenser: &mut impl FnMut() -> V,\n        constraint_collection: &mut impl Extend<AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) -> GroupedExpression<T, V> {\n        let linearized = self.linearize_expression(expr, var_dispenser, constraint_collection);\n        self.substitute_by_var(linearized, var_dispenser, constraint_collection)\n    }\n\n    /// Substitutes the given expression by a single variable using the variable dispenser,\n    /// unless the expression is already just a single variable or constant. Re-uses substitutions\n    /// that were made in the past.\n    /// Adds the equality constraint to `constraint_collection` and returns the variable\n    /// as an expression.\n    pub fn substitute_by_var(\n        &mut self,\n        expr: GroupedExpression<T, V>,\n        var_dispenser: &mut impl FnMut() -> V,\n        constraint_collection: &mut impl Extend<AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) -> GroupedExpression<T, V> {\n        if let Some(var) = self.try_substitute_by_existing_var(&expr) {\n            var\n        } else {\n            let var = var_dispenser();\n            self.substitutions.insert(\n                expr.clone(),\n                GroupedExpression::from_unknown_variable(var.clone()),\n            );\n            let var = GroupedExpression::from_unknown_variable(var);\n            constraint_collection.extend([AlgebraicConstraint::assert_zero(expr - var.clone())]);\n            var\n        }\n    }\n\n    /// Tries to substitute the given expression by an existing variable.\n    pub fn try_substitute_by_existing_var(\n        &self,\n        expr: &GroupedExpression<T, V>,\n    ) -> Option<GroupedExpression<T, V>> {\n        if expr.try_to_known().is_some() || expr.try_to_simple_unknown().is_some() {\n            Some(expr.clone())\n        } else {\n            self.substitutions.get(expr).cloned()\n        }\n    }\n\n    /// Returns an iterator over expressions equivalent to `expr` with the idea that\n    /// they might allow to answer a query better or worse.\n    /// It usually returns the original expression, a single variable that it was\n    /// substituted into during a previous linearization and a previously linearized version.\n    pub fn internalized_versions_of_expression(\n        &self,\n        expr: &GroupedExpression<T, V>,\n    ) -> impl Iterator<Item = GroupedExpression<T, V>> + Clone {\n        let direct = expr.clone();\n        // See if we have a direct substitution for the expression by a variable.\n        let simple_substituted = self.try_substitute_by_existing_var(expr);\n        // Try to re-do the linearization\n        let substituted = self.try_linearize_existing(expr.clone());\n        std::iter::once(direct)\n            .chain(simple_substituted)\n            .chain(substituted)\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq + Ord + Hash> Linearizer<T, V> {\n    /// Applies the assignments to the stored substitutions.\n    pub fn apply_assignments(&mut self, assignments: &[VariableAssignment<T, V>]) {\n        if assignments.is_empty() {\n            return;\n        }\n        let (exprs, vars): (Vec<_>, Vec<_>) = self.substitutions.drain().unzip();\n        let exprs = apply_substitutions_to_expressions(exprs, assignments.iter().cloned());\n        let vars = apply_substitutions_to_expressions(vars, assignments.iter().cloned());\n        self.substitutions = exprs.into_iter().zip_eq(vars).collect();\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use expect_test::expect;\n    use powdr_number::GoldilocksField;\n\n    use super::*;\n    use crate::{\n        bus_interaction_handler::DefaultBusInteractionHandler,\n        constraint_system::BusInteraction,\n        solver::{\n            base::{BaseSolver, VarDispenserImpl},\n            var_transformation::Variable,\n            Solver,\n        },\n    };\n\n    type Qse = GroupedExpression<GoldilocksField, Variable<&'static str>>;\n\n    fn var(name: &'static str) -> Qse {\n        GroupedExpression::from_unknown_variable(Variable::from(name))\n    }\n\n    fn constant(value: u64) -> Qse {\n        GroupedExpression::from_number(GoldilocksField::from(value))\n    }\n\n    #[test]\n    fn linearization() {\n        let mut var_counter = 0usize;\n        let mut linearizer = Linearizer::default();\n        let expr = var(\"x\") + var(\"y\") * (var(\"z\") + constant(1)) * (var(\"x\") - constant(1));\n        let mut constraints_to_add = vec![];\n        let linearized = linearizer.linearize_expression(\n            expr,\n            &mut || {\n                let var = Variable::Linearized(var_counter);\n                var_counter += 1;\n                var\n            },\n            &mut constraints_to_add,\n        );\n        assert_eq!(linearized.to_string(), \"x + lin_3\");\n        assert_eq!(\n            constraints_to_add.into_iter().format(\"\\n\").to_string(),\n            \"z - lin_0 + 1 = 0\\n(y) * (lin_0) - lin_1 = 0\\nx - lin_2 - 1 = 0\\n(lin_1) * (lin_2) - lin_3 = 0\"\n        );\n    }\n\n    #[test]\n    fn solver_transforms() {\n        let mut solver =\n            BaseSolver::<_, _, _, VarDispenserImpl>::new(DefaultBusInteractionHandler::default());\n        solver.add_algebraic_constraints(\n            [\n                (var(\"x\") + var(\"y\")) * (var(\"z\") + constant(1)) * (var(\"x\") - constant(1)),\n                (var(\"a\") + var(\"b\")) * (var(\"c\") - constant(2)),\n            ]\n            .into_iter()\n            .map(AlgebraicConstraint::assert_zero),\n        );\n        solver.add_bus_interactions(vec![BusInteraction {\n            bus_id: constant(1),\n            payload: vec![var(\"x\") + var(\"y\"), -var(\"a\"), var(\"a\")],\n            multiplicity: var(\"z\") + constant(1),\n        }]);\n        // Below, it is important that in the bus interaction,\n        // `a` is not replaced and that the first payload re-uses the\n        // already linearized `x + y`.\n        expect!([r#\"\n            ((x + y) * (z + 1)) * (x - 1) = 0\n            x + y - lin_0 = 0\n            z - lin_1 + 1 = 0\n            (lin_0) * (lin_1) - lin_2 = 0\n            x - lin_3 - 1 = 0\n            (lin_2) * (lin_3) - lin_4 = 0\n            lin_4 = 0\n            (a + b) * (c - 2) = 0\n            a + b - lin_5 = 0\n            c - lin_6 - 2 = 0\n            (lin_5) * (lin_6) - lin_7 = 0\n            lin_7 = 0\n            -(a + lin_8) = 0\n            BusInteraction { bus_id: 1, multiplicity: lin_1, payload: lin_0, lin_8, a }\"#])\n        .assert_eq(&solver.to_string());\n        let assignments = solver.solve().unwrap();\n        expect!([r#\"\n            lin_4 = 0\n            lin_7 = 0\"#])\n        .assert_eq(\n            &assignments\n                .iter()\n                .map(|(var, value)| format!(\"{var} = {value}\"))\n                .join(\"\\n\"),\n        );\n\n        expect!([r#\"\n            ((x + y) * (z + 1)) * (x - 1) = 0\n            x + y - lin_0 = 0\n            z - lin_1 + 1 = 0\n            (lin_0) * (lin_1) - lin_2 = 0\n            x - lin_3 - 1 = 0\n            (lin_2) * (lin_3) = 0\n            0 = 0\n            (a + b) * (c - 2) = 0\n            a + b - lin_5 = 0\n            c - lin_6 - 2 = 0\n            (lin_5) * (lin_6) = 0\n            0 = 0\n            -(a + lin_8) = 0\n            BusInteraction { bus_id: 1, multiplicity: lin_1, payload: lin_0, lin_8, a }\"#])\n        .assert_eq(&solver.to_string());\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/solver/var_transformation.rs",
    "content": "use powdr_number::FieldElement;\n\nuse crate::constraint_system::{AlgebraicConstraint, BusInteraction};\nuse crate::grouped_expression::{GroupedExpression, RangeConstraintProvider};\nuse crate::range_constraint::RangeConstraint;\nuse crate::runtime_constant::VarTransformable;\nuse crate::solver::{Error, Solver, VariableAssignment};\n\nuse std::collections::HashSet;\nuse std::fmt::{Debug, Display};\nuse std::hash::Hash;\n\n/// We introduce new variables.\n/// This enum avoids clashes with the original variables.\n#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]\npub enum Variable<V> {\n    /// A regular variable that also exists in the original system.\n    Original(V),\n    /// A new boolean-constrained variable that was introduced by the solver.\n    Boolean(usize),\n    /// A new variable introduced by the linearizer.\n    Linearized(usize),\n}\n\nimpl<V> From<V> for Variable<V> {\n    /// Converts a regular variable to a `Variable`.\n    fn from(v: V) -> Self {\n        Variable::Original(v)\n    }\n}\n\nimpl<V: Clone> From<&V> for Variable<V> {\n    /// Converts a regular variable to a `Variable`.\n    fn from(v: &V) -> Self {\n        Variable::Original(v.clone())\n    }\n}\n\nimpl<V: Clone> Variable<V> {\n    pub fn try_to_original(&self) -> Option<V> {\n        match self {\n            Variable::Original(v) => Some(v.clone()),\n            _ => None,\n        }\n    }\n}\n\nimpl<V: Display> Display for Variable<V> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Variable::Original(v) => write!(f, \"{v}\"),\n            Variable::Boolean(i) => write!(f, \"bool_{i}\"),\n            Variable::Linearized(i) => write!(f, \"lin_{i}\"),\n        }\n    }\n}\n\n/// A solver that transforms variables from one type to another,\npub struct VarTransformation<T, V, S> {\n    solver: S,\n    _phantom: std::marker::PhantomData<(T, V)>,\n}\n\nimpl<T, V, S> VarTransformation<T, V, S>\nwhere\n    T: FieldElement,\n    V: Clone + Eq,\n    S: Solver<T, Variable<V>>,\n{\n    pub fn new(solver: S) -> Self {\n        Self {\n            solver,\n            _phantom: std::marker::PhantomData,\n        }\n    }\n}\n\nimpl<T, V, S> RangeConstraintProvider<T, V> for VarTransformation<T, V, S>\nwhere\n    T: FieldElement,\n    S: RangeConstraintProvider<T, Variable<V>>,\n    V: Clone,\n{\n    fn get(&self, var: &V) -> RangeConstraint<T> {\n        self.solver.get(&Variable::from(var))\n    }\n}\n\nimpl<T, V, S: Display> Display for VarTransformation<T, V, S> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.solver)\n    }\n}\n\nimpl<T, V, S> Solver<T, V> for VarTransformation<T, V, S>\nwhere\n    T: FieldElement,\n    V: Ord + Clone + Eq + Hash + Display,\n    S: Solver<T, Variable<V>>,\n{\n    /// Solves the system and ignores all assignments that contain a new variable\n    /// (either on the LHS or the RHS).\n    fn solve(&mut self) -> Result<Vec<VariableAssignment<T, V>>, Error> {\n        let assignments = self.solver.solve()?;\n        Ok(assignments\n            .into_iter()\n            .filter_map(|(v, expr)| {\n                assert!(expr.is_affine());\n                let v = v.try_to_original()?;\n                let expr = expr.try_transform_var_type(&mut |v| v.try_to_original())?;\n                Some((v, expr))\n            })\n            .collect())\n    }\n\n    fn add_algebraic_constraints(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    ) {\n        self.solver\n            .add_algebraic_constraints(constraints.into_iter().map(|c| transform_constraint(&c)));\n    }\n\n    fn add_bus_interactions(\n        &mut self,\n        bus_interactions: impl IntoIterator<Item = BusInteraction<GroupedExpression<T, V>>>,\n    ) {\n        self.solver.add_bus_interactions(\n            bus_interactions\n                .into_iter()\n                .map(|bus_interaction| bus_interaction.fields().map(transform_expr).collect()),\n        )\n    }\n\n    fn add_range_constraint(&mut self, variable: &V, constraint: RangeConstraint<T>) {\n        self.solver\n            .add_range_constraint(&variable.into(), constraint);\n    }\n\n    fn retain_variables(&mut self, variables_to_keep: &HashSet<V>) {\n        // This will cause constraints to be deleted if they\n        // only contain newly added variables.\n        let variables_to_keep = variables_to_keep\n            .iter()\n            .map(From::from)\n            .collect::<HashSet<_>>();\n        self.solver.retain_variables(&variables_to_keep);\n    }\n\n    fn range_constraint_for_expression(\n        &self,\n        expr: &GroupedExpression<T, V>,\n    ) -> RangeConstraint<T> {\n        self.solver\n            .range_constraint_for_expression(&transform_expr(expr))\n    }\n\n    fn try_to_equivalent_constant(&self, expr: &GroupedExpression<T, V>) -> Option<T> {\n        self.solver\n            .try_to_equivalent_constant(&transform_expr(expr))\n    }\n\n    fn are_expressions_known_to_be_different(\n        &mut self,\n        a: &GroupedExpression<T, V>,\n        b: &GroupedExpression<T, V>,\n    ) -> bool {\n        let a = transform_expr(a);\n        let b = transform_expr(b);\n        self.solver.are_expressions_known_to_be_different(&a, &b)\n    }\n}\n\nfn transform_expr<T: FieldElement, V: Ord + Clone>(\n    expr: &GroupedExpression<T, V>,\n) -> GroupedExpression<T, Variable<V>> {\n    expr.transform_var_type(&mut |v| v.into())\n}\n\nfn transform_constraint<T: FieldElement, V: Ord + Clone>(\n    constraint: &AlgebraicConstraint<GroupedExpression<T, V>>,\n) -> AlgebraicConstraint<GroupedExpression<T, Variable<V>>> {\n    AlgebraicConstraint::assert_zero(transform_expr(&constraint.expression))\n}\n"
  },
  {
    "path": "constraint-solver/src/solver.rs",
    "content": "use powdr_number::FieldElement;\n\nuse crate::constraint_system::{\n    AlgebraicConstraint, BusInteraction, BusInteractionHandler, ConstraintSystem,\n};\nuse crate::grouped_expression::GroupedExpression;\nuse crate::range_constraint::RangeConstraint;\nuse crate::solver::base::{BaseSolver, VarDispenserImpl};\nuse crate::solver::var_transformation::VarTransformation;\n\nuse super::grouped_expression::RangeConstraintProvider;\n\nuse crate::algebraic_constraint::solve::Error as AlgebraicSolverError;\nuse std::collections::HashSet;\nuse std::fmt::{Debug, Display};\nuse std::hash::Hash;\n\nmod base;\nmod boolean_extractor;\nmod constraint_splitter;\nmod exhaustive_search;\nmod linearizer;\nmod var_transformation;\n\n/// Solve a constraint system, i.e. derive assignments for variables in the system.\npub fn solve_system<T, V>(\n    constraint_system: ConstraintSystem<T, V>,\n    bus_interaction_handler: impl BusInteractionHandler<T>,\n) -> Result<Vec<VariableAssignment<T, V>>, Error>\nwhere\n    T: FieldElement,\n    V: Ord + Clone + Hash + Eq + Display,\n{\n    new_solver(constraint_system, bus_interaction_handler).solve()\n}\n\n/// Creates a new solver for the given system and bus interaction handler.\npub fn new_solver<T, V>(\n    constraint_system: ConstraintSystem<T, V>,\n    bus_interaction_handler: impl BusInteractionHandler<T>,\n) -> impl Solver<T, V>\nwhere\n    T: FieldElement,\n    V: Ord + Clone + Hash + Eq + Display,\n{\n    let mut solver = VarTransformation::new(BaseSolver::<_, _, _, VarDispenserImpl>::new(\n        bus_interaction_handler,\n    ));\n    solver.add_algebraic_constraints(constraint_system.algebraic_constraints);\n    solver.add_bus_interactions(constraint_system.bus_interactions);\n    solver\n}\n\npub trait Solver<T: FieldElement, V>: RangeConstraintProvider<T, V> + Sized {\n    /// Solves the constraints as far as possible, returning concrete variable\n    /// assignments. Does not return the same assignments again if called more than once.\n    fn solve(&mut self) -> Result<Vec<VariableAssignment<T, V>>, Error>;\n\n    /// Adds a new algebraic constraint to the system.\n    fn add_algebraic_constraints(\n        &mut self,\n        constraints: impl IntoIterator<Item = AlgebraicConstraint<GroupedExpression<T, V>>>,\n    );\n\n    /// Adds a new bus interaction to the system.\n    fn add_bus_interactions(\n        &mut self,\n        bus_interactions: impl IntoIterator<Item = BusInteraction<GroupedExpression<T, V>>>,\n    );\n\n    /// Adds a new range constraint for the variable.\n    fn add_range_constraint(&mut self, var: &V, constraint: RangeConstraint<T>);\n\n    /// Permits the solver to remove all variables except those in `variables_to_keep`.\n    /// This should only keep the constraints that reference at least one of the variables.\n    fn retain_variables(&mut self, variables_to_keep: &HashSet<V>);\n\n    /// Returns the best known range constraint for the given expression.\n    fn range_constraint_for_expression(&self, expr: &GroupedExpression<T, V>)\n        -> RangeConstraint<T>;\n\n    /// If the solver can determine the given expression to always have a constant\n    /// value, returns that value. Otherwise, returns `None`.\n    /// Note that if this function returns `x` on input `e`, replacing `x`\n    /// by `x` in a system does not always yield an equivalent system - it might\n    /// be less strict. Replacing and afterwards adding `e = x` does yield an\n    /// jequivalent system, though.\n    fn try_to_equivalent_constant(&self, expr: &GroupedExpression<T, V>) -> Option<T>;\n\n    /// Returns `true` if `a` and `b` are different for all satisfying assignments.\n    /// In other words, `a - b` does not allow the value zero.\n    /// If this function returns `false`, it does not mean that `a` and `b` are equal,\n    /// i.e. a function always returning `false` here satisfies the trait.\n    fn are_expressions_known_to_be_different(\n        &mut self,\n        a: &GroupedExpression<T, V>,\n        b: &GroupedExpression<T, V>,\n    ) -> bool;\n}\n\n/// An error occurred while solving the constraint system.\n/// This means that the constraint system is unsatisfiable.\n#[derive(Debug, PartialEq, Eq)]\npub enum Error {\n    /// An error occurred while calling `GroupedExpression::solve`\n    AlgebraicSolverError(AlgebraicSolverError),\n    /// The bus interaction handler reported that some sent data was invalid.\n    BusInteractionError,\n    /// During exhaustive search, we came across a combination of variables for which\n    /// no assignment would satisfy all the constraints.\n    ExhaustiveSearchError,\n}\n\n/// An assignment of a variable.\npub type VariableAssignment<T, V> = (V, GroupedExpression<T, V>);\n"
  },
  {
    "path": "constraint-solver/src/symbolic_expression.rs",
    "content": "use auto_enums::auto_enum;\nuse num_traits::{One, Zero};\nuse std::hash::Hash;\nuse std::ops::Sub;\nuse std::ops::{AddAssign, MulAssign};\nuse std::{\n    fmt::{self, Display, Formatter},\n    iter,\n    ops::{Add, Mul, Neg},\n    sync::Arc,\n};\n\nuse powdr_number::{ExpressionConvertible, FieldElement};\n\nuse crate::runtime_constant::{RuntimeConstant, Substitutable, VarTransformable};\n\nuse super::range_constraint::RangeConstraint;\n\n/// A value that is known at run-time, defined through a complex expression\n/// involving known cells or variables and compile-time constants.\n/// Each of the sub-expressions can have its own range constraint.\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub enum SymbolicExpression<T: FieldElement, S> {\n    /// A concrete constant value known at compile time.\n    Concrete(T),\n    /// A symbolic value known at run-time, referencing a cell,\n    /// an input, a local variable or whatever it is used for.\n    Symbol(S, RangeConstraint<T>),\n    BinaryOperation(Arc<Self>, BinaryOperator, Arc<Self>, RangeConstraint<T>),\n    UnaryOperation(UnaryOperator, Arc<Self>, RangeConstraint<T>),\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub enum BinaryOperator {\n    Add,\n    Sub,\n    Mul,\n    /// Finite field division.\n    Div,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub enum UnaryOperator {\n    Neg,\n}\n\nimpl<T: FieldElement, S> SymbolicExpression<T, S> {\n    /// Returns all direct children of this expression.\n    /// Does specifically not implement the `Children` trait, because it does not go\n    /// well with recursive types.\n    #[auto_enum(Iterator)]\n    fn children(&self) -> impl Iterator<Item = &SymbolicExpression<T, S>> {\n        match self {\n            SymbolicExpression::BinaryOperation(lhs, _, rhs, _) => {\n                [lhs.as_ref(), rhs.as_ref()].into_iter()\n            }\n            SymbolicExpression::UnaryOperation(_, expr, _) => iter::once(expr.as_ref()),\n            SymbolicExpression::Concrete(_) | SymbolicExpression::Symbol(..) => iter::empty(),\n        }\n    }\n\n    /// Returns an iterator over all direct and indirect children of this expression, including\n    /// the expression itself.\n    pub fn all_children(&self) -> Box<dyn Iterator<Item = &SymbolicExpression<T, S>> + '_> {\n        Box::new(iter::once(self).chain(self.children().flat_map(|e| e.all_children())))\n    }\n}\n\nimpl<T: FieldElement, S> SymbolicExpression<T, S> {\n    pub fn from_symbol(symbol: S, rc: RangeConstraint<T>) -> Self {\n        if let Some(v) = rc.try_to_single_value() {\n            SymbolicExpression::Concrete(v)\n        } else {\n            SymbolicExpression::Symbol(symbol, rc)\n        }\n    }\n}\n\nimpl<T: FieldElement, S: Clone + Eq> SymbolicExpression<T, S> {\n    /// Applies a variable substitution and returns a modified version if there was a change.\n    pub fn compute_substitution(&self, variable: &S, substitution: &Self) -> Option<Self> {\n        match self {\n            SymbolicExpression::Concrete(_) => None,\n            SymbolicExpression::Symbol(v, _) => (v == variable).then(|| substitution.clone()),\n            SymbolicExpression::BinaryOperation(left, op, right, _) => {\n                let (l, r) = match (\n                    left.compute_substitution(variable, substitution),\n                    right.compute_substitution(variable, substitution),\n                ) {\n                    (None, None) => return None,\n                    (Some(l), None) => (l, (**right).clone()),\n                    (None, Some(r)) => ((**left).clone(), r),\n                    (Some(l), Some(r)) => (l, r),\n                };\n                match op {\n                    BinaryOperator::Add => Some(l + r),\n                    BinaryOperator::Sub => Some(l - r),\n                    BinaryOperator::Mul => Some(l * r),\n                    BinaryOperator::Div => Some(l.field_div(&r)),\n                }\n            }\n            SymbolicExpression::UnaryOperation(op, inner, _) => {\n                let inner = inner.compute_substitution(variable, substitution)?;\n                match op {\n                    UnaryOperator::Neg => Some(-inner),\n                }\n            }\n        }\n    }\n\n    /// Applies a variable substitution in place.\n    pub fn substitute(&mut self, variable: &S, substitution: &Self) {\n        if let Some(updated) = self.compute_substitution(variable, substitution) {\n            *self = updated;\n        }\n    }\n}\n\nimpl<T: FieldElement, V> ExpressionConvertible<T, V> for SymbolicExpression<T, V> {\n    /// Turns a SymbolicExpression into an expression over its variables, essentially\n    /// making all variables unknown variables.\n    ///\n    /// Fails in case a division operation is used.\n    fn try_to_expression<\n        E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n    >(\n        &self,\n        number_converter: &impl Fn(&T) -> E,\n        var_converter: &impl Fn(&V) -> E,\n        try_to_number: &impl Fn(&E) -> Option<T>,\n    ) -> Option<E> {\n        Some(match self {\n            SymbolicExpression::Concrete(value) => number_converter(value),\n            SymbolicExpression::Symbol(var, _) => var_converter(var),\n            SymbolicExpression::BinaryOperation(left, op, right, _) => {\n                let left =\n                    left.try_to_expression(number_converter, var_converter, try_to_number)?;\n                let right =\n                    right.try_to_expression(number_converter, var_converter, try_to_number)?;\n                match op {\n                    BinaryOperator::Add => left + right,\n                    BinaryOperator::Sub => left - right,\n                    BinaryOperator::Mul => left * right,\n                    BinaryOperator::Div => {\n                        if let Some(right) = try_to_number(&right) {\n                            left * number_converter(&(T::from(1) / right))\n                        } else {\n                            return None;\n                        }\n                    }\n                }\n            }\n            SymbolicExpression::UnaryOperation(op, inner, _) => {\n                let inner =\n                    inner.try_to_expression(number_converter, var_converter, try_to_number)?;\n                match op {\n                    UnaryOperator::Neg => -inner,\n                }\n            }\n        })\n    }\n}\n\nimpl<T: FieldElement, S1: Ord + Clone, S2: Ord + Clone> VarTransformable<S1, S2>\n    for SymbolicExpression<T, S1>\n{\n    type Transformed = SymbolicExpression<T, S2>;\n\n    fn try_transform_var_type(\n        &self,\n        var_transform: &mut impl FnMut(&S1) -> Option<S2>,\n    ) -> Option<SymbolicExpression<T, S2>> {\n        Some(match self {\n            SymbolicExpression::Concrete(n) => SymbolicExpression::Concrete(*n),\n            SymbolicExpression::Symbol(v, rc) => {\n                SymbolicExpression::from_symbol(var_transform(v)?, *rc)\n            }\n            SymbolicExpression::BinaryOperation(lhs, op, rhs, rc) => {\n                SymbolicExpression::BinaryOperation(\n                    Arc::new(lhs.try_transform_var_type(var_transform)?),\n                    *op,\n                    Arc::new(rhs.try_transform_var_type(var_transform)?),\n                    *rc,\n                )\n            }\n            SymbolicExpression::UnaryOperation(op, inner, rc) => {\n                SymbolicExpression::UnaryOperation(\n                    *op,\n                    Arc::new(inner.try_transform_var_type(var_transform)?),\n                    *rc,\n                )\n            }\n        })\n    }\n}\n\n/// Display for affine symbolic expressions, for informational purposes only.\nimpl<T: FieldElement, V: Display> Display for SymbolicExpression<T, V> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            SymbolicExpression::Concrete(n) => {\n                if n.is_in_lower_half() {\n                    write!(f, \"{n}\")\n                } else {\n                    write!(f, \"-{}\", -*n)\n                }\n            }\n            SymbolicExpression::Symbol(name, _) => write!(f, \"{name}\"),\n            SymbolicExpression::BinaryOperation(lhs, op, rhs, _) => {\n                write!(f, \"({lhs} {op} {rhs})\")\n            }\n            SymbolicExpression::UnaryOperation(op, expr, _) => write!(f, \"{op}{expr}\"),\n        }\n    }\n}\n\nimpl Display for BinaryOperator {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            BinaryOperator::Add => write!(f, \"+\"),\n            BinaryOperator::Sub => write!(f, \"-\"),\n            BinaryOperator::Mul => write!(f, \"*\"),\n            BinaryOperator::Div => write!(f, \"/\"),\n        }\n    }\n}\n\nimpl Display for UnaryOperator {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            UnaryOperator::Neg => write!(f, \"-\"),\n        }\n    }\n}\n\nimpl<T: FieldElement, V> From<T> for SymbolicExpression<T, V> {\n    fn from(n: T) -> Self {\n        SymbolicExpression::Concrete(n)\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Add for &SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n\n    fn add(self, rhs: Self) -> Self::Output {\n        if self.is_known_zero() {\n            return rhs.clone();\n        }\n        if rhs.is_known_zero() {\n            return self.clone();\n        }\n        match (self, rhs) {\n            (SymbolicExpression::Concrete(a), SymbolicExpression::Concrete(b)) => {\n                SymbolicExpression::Concrete(*a + *b)\n            }\n            (SymbolicExpression::UnaryOperation(UnaryOperator::Neg, negated, _), other)\n            | (other, SymbolicExpression::UnaryOperation(UnaryOperator::Neg, negated, _))\n                if negated.as_ref() == other =>\n            {\n                T::from(0).into()\n            }\n            _ => SymbolicExpression::BinaryOperation(\n                Arc::new(self.clone()),\n                BinaryOperator::Add,\n                Arc::new(rhs.clone()),\n                self.range_constraint().combine_sum(&rhs.range_constraint()),\n            ),\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Add for SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n    fn add(self, rhs: Self) -> Self::Output {\n        &self + &rhs\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> AddAssign for SymbolicExpression<T, V> {\n    fn add_assign(&mut self, rhs: Self) {\n        *self = self.clone() + rhs;\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Sub for &SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n\n    fn sub(self, rhs: Self) -> Self::Output {\n        if self.is_known_zero() {\n            return -rhs.clone();\n        }\n        if rhs.is_known_zero() {\n            return self.clone();\n        }\n        match (self, rhs) {\n            (SymbolicExpression::Concrete(a), SymbolicExpression::Concrete(b)) => {\n                SymbolicExpression::Concrete(*a - *b)\n            }\n            (a, b) if a == b => T::from(0).into(),\n            _ => SymbolicExpression::BinaryOperation(\n                Arc::new(self.clone()),\n                BinaryOperator::Sub,\n                Arc::new(rhs.clone()),\n                self.range_constraint()\n                    .combine_sum(&rhs.range_constraint().neg()),\n            ),\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Sub for SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n    fn sub(self, rhs: Self) -> Self::Output {\n        &self - &rhs\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Neg for &SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n\n    fn neg(self) -> Self::Output {\n        match self {\n            SymbolicExpression::Concrete(n) => SymbolicExpression::Concrete(-*n),\n            SymbolicExpression::UnaryOperation(UnaryOperator::Neg, expr, _) => {\n                expr.as_ref().clone()\n            }\n            SymbolicExpression::BinaryOperation(lhs, BinaryOperator::Add, rhs, _) => {\n                -(**lhs).clone() + -(**rhs).clone()\n            }\n            SymbolicExpression::BinaryOperation(lhs, BinaryOperator::Sub, rhs, _) => {\n                SymbolicExpression::BinaryOperation(\n                    rhs.clone(),\n                    BinaryOperator::Sub,\n                    lhs.clone(),\n                    self.range_constraint().multiple(-T::from(1)),\n                )\n            }\n            SymbolicExpression::BinaryOperation(lhs, BinaryOperator::Mul, rhs, _)\n                if matches!(**lhs, SymbolicExpression::Concrete(_)) =>\n            {\n                SymbolicExpression::BinaryOperation(\n                    Arc::new(-(**lhs).clone()),\n                    BinaryOperator::Mul,\n                    rhs.clone(),\n                    self.range_constraint().multiple(-T::from(1)),\n                )\n            }\n            SymbolicExpression::BinaryOperation(lhs, BinaryOperator::Mul, rhs, _)\n                if matches!(**rhs, SymbolicExpression::Concrete(_)) =>\n            {\n                SymbolicExpression::BinaryOperation(\n                    lhs.clone(),\n                    BinaryOperator::Mul,\n                    Arc::new(-(**rhs).clone()),\n                    self.range_constraint().multiple(-T::from(1)),\n                )\n            }\n            _ => SymbolicExpression::UnaryOperation(\n                UnaryOperator::Neg,\n                Arc::new(self.clone()),\n                self.range_constraint().multiple(-T::from(1)),\n            ),\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Neg for SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n    fn neg(self) -> Self::Output {\n        -&self\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Mul for &SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n\n    fn mul(self, rhs: Self) -> Self::Output {\n        if let (SymbolicExpression::Concrete(a), SymbolicExpression::Concrete(b)) = (self, rhs) {\n            SymbolicExpression::Concrete(*a * *b)\n        } else if self.is_known_zero() || rhs.is_known_zero() {\n            SymbolicExpression::Concrete(T::from(0))\n        } else if self.is_known_one() {\n            rhs.clone()\n        } else if rhs.is_known_one() {\n            self.clone()\n        } else if self.is_known_minus_one() {\n            -rhs\n        } else if rhs.is_known_minus_one() {\n            -self\n        } else {\n            SymbolicExpression::BinaryOperation(\n                Arc::new(self.clone()),\n                BinaryOperator::Mul,\n                Arc::new(rhs.clone()),\n                self.range_constraint()\n                    .combine_product(&rhs.range_constraint()),\n            )\n        }\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Mul for SymbolicExpression<T, V> {\n    type Output = SymbolicExpression<T, V>;\n    fn mul(self, rhs: Self) -> Self {\n        &self * &rhs\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> MulAssign for SymbolicExpression<T, V> {\n    fn mul_assign(&mut self, rhs: Self) {\n        *self = self.clone() * rhs;\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> Zero for SymbolicExpression<T, V> {\n    fn zero() -> Self {\n        SymbolicExpression::Concrete(T::from(0))\n    }\n\n    fn is_zero(&self) -> bool {\n        self.is_known_zero()\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> One for SymbolicExpression<T, V> {\n    fn one() -> Self {\n        SymbolicExpression::Concrete(T::from(1))\n    }\n\n    fn is_one(&self) -> bool {\n        self.is_known_one()\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Eq> RuntimeConstant for SymbolicExpression<T, V> {\n    type FieldType = T;\n\n    fn try_to_number(&self) -> Option<Self::FieldType> {\n        match self {\n            SymbolicExpression::Concrete(n) => Some(*n),\n            SymbolicExpression::Symbol(..)\n            | SymbolicExpression::BinaryOperation(..)\n            | SymbolicExpression::UnaryOperation(..) => None,\n        }\n    }\n\n    fn range_constraint(&self) -> RangeConstraint<Self::FieldType> {\n        match self {\n            SymbolicExpression::Concrete(v) => RangeConstraint::from_value(*v),\n            SymbolicExpression::Symbol(.., rc)\n            | SymbolicExpression::BinaryOperation(.., rc)\n            | SymbolicExpression::UnaryOperation(.., rc) => *rc,\n        }\n    }\n\n    /// Field element division.\n    /// If you use this, you must ensure that the divisor is not zero.\n    fn field_div(&self, rhs: &Self) -> Self {\n        if let (SymbolicExpression::Concrete(a), SymbolicExpression::Concrete(b)) = (self, rhs) {\n            assert!(b != &T::from(0));\n            SymbolicExpression::Concrete(*a / *b)\n        } else if self.is_known_zero() {\n            SymbolicExpression::Concrete(T::from(0))\n        } else if rhs.is_known_one() {\n            self.clone()\n        } else if rhs.is_known_minus_one() {\n            -self\n        } else {\n            // TODO other simplifications like `-x / -y => x / y`, `-x / concrete => x / -concrete`, etc.\n            SymbolicExpression::BinaryOperation(\n                Arc::new(self.clone()),\n                BinaryOperator::Div,\n                Arc::new(rhs.clone()),\n                Default::default(),\n            )\n        }\n    }\n\n    /// Returns the multiplicative inverse in the field.\n    fn field_inverse(&self) -> Self {\n        if let SymbolicExpression::Concrete(x) = self {\n            assert!(x != &T::from(0));\n            SymbolicExpression::Concrete(T::from(1) / *x)\n        } else if let SymbolicExpression::BinaryOperation(x, BinaryOperator::Div, y, _) = self {\n            SymbolicExpression::BinaryOperation(\n                y.clone(),\n                BinaryOperator::Div,\n                x.clone(),\n                Default::default(),\n            )\n        } else {\n            SymbolicExpression::BinaryOperation(\n                Arc::new(Self::from(T::from(1))),\n                BinaryOperator::Div,\n                Arc::new(self.clone()),\n                Default::default(),\n            )\n        }\n    }\n\n    fn from_u64(k: u64) -> Self {\n        SymbolicExpression::Concrete(T::from(k))\n    }\n}\n\nimpl<T: FieldElement, V: Clone + Hash + Eq + Ord> Substitutable<V> for SymbolicExpression<T, V> {\n    fn substitute(&mut self, variable: &V, substitution: &Self) {\n        SymbolicExpression::substitute(self, variable, substitution);\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/system_splitter.rs",
    "content": "use std::hash::Hash;\nuse std::{collections::BTreeSet, fmt::Display};\n\nuse crate::constraint_system::{AlgebraicConstraint, ConstraintRef};\nuse crate::reachability::reachable_variables;\nuse crate::{\n    constraint_system::ConstraintSystem, indexed_constraint_system::IndexedConstraintSystem,\n    runtime_constant::RuntimeConstant,\n};\n\n/// Splits the constraint system into independent subsets.\n/// Each variable occurs in exactly one subset and all constraints referencing a\n/// certain variable have to be in the same subsystem.\n/// Note that the list of derived variables in the returned set is empty,\n/// but derived variables do occur in the constraints.\npub fn split_system<T: RuntimeConstant, V: Clone + Ord + Hash + Display>(\n    constraint_system: IndexedConstraintSystem<T, V>,\n) -> Vec<ConstraintSystem<T, V>> {\n    // We cleanup and re-index the constraint system, otherwise we get too many\n    // empty systems due to variables that have already been substituted.\n    let mut constraint_system: ConstraintSystem<T, V> = constraint_system.into();\n    constraint_system\n        .algebraic_constraints\n        .retain(|constr| !constr.is_redundant());\n    let constraint_system: IndexedConstraintSystem<T, V> = constraint_system.into();\n\n    let mut systems = Vec::new();\n    let mut remaining_variables: BTreeSet<_> = constraint_system.variables().cloned().collect();\n\n    while let Some(v) = remaining_variables.pop_first() {\n        let variables_to_extract = reachable_variables([v.clone()], &constraint_system);\n\n        let mut algebraic_constraints = Vec::new();\n        let mut bus_interactions = Vec::new();\n        for constr in constraint_system.constraints_referencing_variables(&variables_to_extract) {\n            match constr {\n                ConstraintRef::AlgebraicConstraint(algebraic_constraint) => algebraic_constraints\n                    .push(AlgebraicConstraint::assert_zero(\n                        algebraic_constraint.expression.clone(),\n                    )),\n                ConstraintRef::BusInteraction(bus_interaction) => {\n                    bus_interactions.push(bus_interaction.clone())\n                }\n            }\n        }\n        systems.push(ConstraintSystem {\n            algebraic_constraints,\n            bus_interactions,\n            derived_variables: Vec::new(),\n        });\n        // Fine to iterate over a hash set here since the order in which we remove\n        // is not relevant.\n        #[allow(clippy::iter_over_hash_type)]\n        for v in variables_to_extract {\n            remaining_variables.remove(&v);\n        }\n    }\n    systems\n}\n"
  },
  {
    "path": "constraint-solver/src/test_utils.rs",
    "content": "use powdr_number::GoldilocksField;\n\nuse crate::{\n    constraint_system::{AlgebraicConstraint, BusInteraction, ConstraintSystem},\n    grouped_expression::GroupedExpression,\n    runtime_constant::RuntimeConstant,\n    symbolic_expression::SymbolicExpression,\n};\n\npub type Var = &'static str;\npub type Qse = GroupedExpression<SymbolicExpression<GoldilocksField, Var>, Var>;\n\npub fn var(name: Var) -> Qse {\n    Qse::from_unknown_variable(name)\n}\n\npub fn constant(value: u64) -> Qse {\n    Qse::from_number(GoldilocksField::from(value))\n}\n\nimpl<T: RuntimeConstant, V> ConstraintSystem<T, V> {\n    pub fn with_constraints(\n        mut self,\n        constraints: Vec<impl Into<AlgebraicConstraint<GroupedExpression<T, V>>>>,\n    ) -> Self {\n        self.algebraic_constraints\n            .extend(constraints.into_iter().map(Into::into));\n        self\n    }\n\n    pub fn with_bus_interactions(\n        mut self,\n        bus_interactions: Vec<impl Into<BusInteraction<GroupedExpression<T, V>>>>,\n    ) -> Self {\n        self.bus_interactions\n            .extend(bus_interactions.into_iter().map(Into::into));\n        self\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/utils.rs",
    "content": "use std::collections::BTreeMap;\nuse std::hash::Hash;\n\nuse itertools::Itertools;\nuse powdr_number::{FieldElement, LargeInt};\n\nuse crate::grouped_expression::{GroupedExpression, RangeConstraintProvider};\nuse crate::runtime_constant::{RuntimeConstant, Substitutable};\n\n/// Returns the number of possible assignments for the variables given the range constraints.\n/// Returns `None` if this number would not fit a `u64`.\npub fn has_few_possible_assignments<T: FieldElement, V: Clone + Ord>(\n    variables: impl Iterator<Item = V>,\n    rc: &impl RangeConstraintProvider<T, V>,\n    threshold: u64,\n) -> bool {\n    variables\n        .map(|v| rc.get(&v))\n        .map(|rc| rc.size_estimate().try_into_u64())\n        .try_fold(1u64, |acc, x| acc.checked_mul(x?))\n        .is_some_and(|count| count <= threshold)\n}\n\n/// Returns all possible assignments for the given variables that satisfy their\n/// range constraints.\n///\n/// Note that it should be verified that the returned sequence is\n/// \"small\" before calling this function, for example using\n/// the function `has_few_possible_assignments`.\npub fn get_all_possible_assignments<T: FieldElement, V: Clone + Ord>(\n    variables: impl IntoIterator<Item = V>,\n    rc: &impl RangeConstraintProvider<T, V>,\n) -> impl Iterator<Item = BTreeMap<V, T>> {\n    variables\n        .into_iter()\n        .map(|v| {\n            rc.get(&v)\n                .allowed_values()\n                .collect_vec()\n                .into_iter()\n                .map(move |value| (v.clone(), value))\n        })\n        .multi_cartesian_product()\n        .map(|assignment| assignment.into_iter().collect::<BTreeMap<_, _>>())\n}\n\n/// Returns all possible concrete values for `expr` using exhaustive search.\n/// Returns None if the number of possible assignments exceeds `max_elements`.\npub fn possible_concrete_values<\n    'a,\n    T: RuntimeConstant + Substitutable<V> + Clone,\n    V: Clone + Ord + Hash,\n>(\n    expr: &'a GroupedExpression<T, V>,\n    rc: &'a impl RangeConstraintProvider<T::FieldType, V>,\n    max_elements: u64,\n) -> Option<impl Iterator<Item = T> + 'a> {\n    let variables = expr.referenced_unknown_variables().cloned().collect_vec();\n    if has_few_possible_assignments(variables.iter().cloned(), rc, max_elements) {\n        Some(\n            get_all_possible_assignments(variables, rc).map(|assignment| {\n                let mut expr = expr.clone();\n                for (variable, value) in assignment.iter() {\n                    expr.substitute_by_known(variable, &T::from(*value));\n                }\n                // We substitute all variables, so this has to be a runtime constant.\n                expr.try_to_known().unwrap().clone()\n            }),\n        )\n    } else {\n        // If there are too many possible assignments, we do not try to perform exhaustive search.\n        None\n    }\n}\n"
  },
  {
    "path": "constraint-solver/src/variable_update.rs",
    "content": "use powdr_number::FieldElement;\n\nuse super::range_constraint::RangeConstraint;\n\n/// An update representing new information about a variable.\n#[derive(Debug, Clone)]\npub struct VariableUpdate<T: FieldElement, V, R> {\n    pub variable: V,\n    pub update: UpdateKind<T, R>,\n}\n\n#[derive(Debug, Clone)]\npub enum UpdateKind<T: FieldElement, R> {\n    /// We have updated range constraints for the variable.\n    RangeConstraintUpdate(RangeConstraint<T>),\n    /// The variable is to be replaced by a different expression.\n    Replace(R),\n}\n"
  },
  {
    "path": "constraint-solver/tests/solver.rs",
    "content": "use std::collections::BTreeMap;\n\nuse num_traits::identities::{One, Zero};\nuse powdr_constraint_solver::{\n    bus_interaction_handler::DefaultBusInteractionHandler,\n    constraint_system::{BusInteraction, BusInteractionHandler, ConstraintSystem},\n    grouped_expression::GroupedExpression,\n    range_constraint::RangeConstraint,\n    solver::{solve_system, Error},\n};\nuse powdr_number::{FieldElement, GoldilocksField, LargeInt};\nuse test_log::test;\n\nuse pretty_assertions::assert_eq;\n\ntype Var = &'static str;\n\nfn var(name: Var) -> GroupedExpression<GoldilocksField, Var> {\n    GroupedExpression::from_unknown_variable(name)\n}\n\nfn constant(value: u64) -> GroupedExpression<GoldilocksField, Var> {\n    GroupedExpression::from_number(GoldilocksField::from(value))\n}\n\npub fn assert_solve_result<B: BusInteractionHandler<GoldilocksField>>(\n    system: ConstraintSystem<GoldilocksField, Var>,\n    bus_interaction_handler: B,\n    expected_assignments: Vec<(Var, GoldilocksField)>,\n) {\n    let final_state = solve_system(system, bus_interaction_handler).unwrap();\n    let expected_final_state = expected_assignments.into_iter().collect();\n    assert_expected_state(final_state, expected_final_state);\n}\n\nfn assert_expected_state(\n    final_state: impl IntoIterator<Item = (Var, GroupedExpression<GoldilocksField, Var>)>,\n    expected_final_state: BTreeMap<Var, GoldilocksField>,\n) {\n    let final_state = final_state.into_iter().collect::<BTreeMap<_, _>>();\n    assert_eq!(\n        final_state.keys().collect::<Vec<_>>(),\n        expected_final_state.keys().collect::<Vec<_>>(),\n        \"Different set of variables\"\n    );\n\n    let mut error = false;\n    for (variable, value) in expected_final_state {\n        // Compare string representation, so that range constraints are ignored.\n        if final_state[variable].to_string() != value.to_string() {\n            log::error!(\"Mismatch for variable {variable}:\");\n            log::error!(\"  Expected: {value}\");\n            log::error!(\"  Actual:   {}\", final_state[variable]);\n            error = true;\n        }\n    }\n    assert!(!error, \"Final state does not match expected state\");\n}\n\n#[test]\nfn single_variable() {\n    assert_solve_result(\n        ConstraintSystem::default().with_constraints(vec![var(\"x\") - constant(5)]),\n        DefaultBusInteractionHandler::default(),\n        vec![(\"x\", 5.into())],\n    );\n}\n\n#[test]\nfn concretely_solvable() {\n    let constraint_system = ConstraintSystem::default().with_constraints(vec![\n        var(\"a\") - constant(2),\n        var(\"b\") - constant(3),\n        // c = a * b = 6\n        var(\"c\") - var(\"a\") * var(\"b\"),\n        // d = c * 4 - a = 22\n        var(\"d\") - (var(\"c\") * constant(4) - var(\"a\")),\n    ]);\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![\n            (\"a\", 2.into()),\n            (\"b\", 3.into()),\n            (\"c\", 6.into()),\n            (\"d\", 22.into()),\n        ],\n    );\n}\n\n#[test]\nfn bit_decomposition() {\n    let constraint_system = ConstraintSystem::default().with_constraints(vec![\n        // 4 bit-constrained variables:\n        var(\"b0\") * (var(\"b0\") - constant(1)),\n        var(\"b1\") * (var(\"b1\") - constant(1)),\n        var(\"b2\") * (var(\"b2\") - constant(1)),\n        var(\"b3\") * (var(\"b3\") - constant(1)),\n        // Bit-decomposition of a concrete value:\n        var(\"b0\") + var(\"b1\") * constant(2) + var(\"b2\") * constant(4) + var(\"b3\") * constant(8)\n            - constant(0b1110),\n    ]);\n\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![\n            (\"b0\", 0.into()),\n            (\"b1\", 1.into()),\n            (\"b2\", 1.into()),\n            (\"b3\", 1.into()),\n        ],\n    );\n}\n\nconst BYTE_BUS_ID: u64 = 42;\nconst XOR_BUS_ID: u64 = 43;\n\nstruct TestBusInteractionHandler;\nimpl BusInteractionHandler<GoldilocksField> for TestBusInteractionHandler {\n    fn handle_bus_interaction(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<GoldilocksField>>,\n    ) -> BusInteraction<RangeConstraint<GoldilocksField>> {\n        let (Some(bus_id), Some(multiplicity)) = (\n            bus_interaction.bus_id.try_to_single_value(),\n            bus_interaction.multiplicity.try_to_single_value(),\n        ) else {\n            return bus_interaction;\n        };\n\n        if multiplicity.is_zero() {\n            return bus_interaction;\n        }\n\n        assert!(multiplicity.is_one(), \"Only expected send interactions\");\n        let byte_constraint = RangeConstraint::from_mask(0xffu32);\n        let payload_constraints = match bus_id.to_integer().try_into_u64().unwrap() {\n            BYTE_BUS_ID => {\n                assert_eq!(bus_interaction.payload.len(), 1);\n                vec![byte_constraint]\n            }\n            XOR_BUS_ID => {\n                assert_eq!(bus_interaction.payload.len(), 3);\n                if let (Some(a), Some(b)) = (\n                    bus_interaction.payload[0].try_to_single_value(),\n                    bus_interaction.payload[1].try_to_single_value(),\n                ) {\n                    // Both inputs are known, can compute result concretely\n                    let result = GoldilocksField::from(\n                        a.to_integer().try_into_u64().unwrap()\n                            ^ b.to_integer().try_into_u64().unwrap(),\n                    );\n                    vec![\n                        bus_interaction.payload[0],\n                        bus_interaction.payload[1],\n                        RangeConstraint::from_value(result),\n                    ]\n                } else {\n                    vec![byte_constraint; 3]\n                }\n            }\n            _ => {\n                panic!(\"Unexpected bus ID: {bus_id}\");\n            }\n        };\n        BusInteraction {\n            payload: payload_constraints,\n            ..bus_interaction\n        }\n    }\n}\n\nfn send(\n    bus_id: u64,\n    payload: Vec<GroupedExpression<GoldilocksField, Var>>,\n) -> BusInteraction<GroupedExpression<GoldilocksField, Var>> {\n    BusInteraction {\n        multiplicity: constant(1),\n        bus_id: constant(bus_id),\n        payload,\n    }\n}\n\n#[test]\nfn byte_decomposition() {\n    let constraint_system = ConstraintSystem::default()\n        .with_constraints(vec![\n            // Byte-decomposition of a concrete value:\n            var(\"b0\")\n                + var(\"b1\") * constant(1 << 8)\n                + var(\"b2\") * constant(1 << 16)\n                + var(\"b3\") * constant(1 << 24)\n                - constant(0xabcdef12),\n        ])\n        .with_bus_interactions(\n            // Byte range constraints on b0..3\n            (0..4)\n                .map(|i| send(BYTE_BUS_ID, vec![var(format!(\"b{i}\").leak())]))\n                .collect(),\n        );\n\n    assert_solve_result(\n        constraint_system,\n        TestBusInteractionHandler,\n        vec![\n            (\"b0\", 0x12.into()),\n            (\"b1\", 0xef.into()),\n            (\"b2\", 0xcd.into()),\n            (\"b3\", 0xab.into()),\n        ],\n    );\n}\n\n#[test]\nfn xor() {\n    let constraint_system = ConstraintSystem::default()\n        .with_constraints(vec![\n            // a and b are the byte decomposition of 0xa00b\n            // Note that solving this requires range constraints on a and b\n            constant(1 << 8) * var(\"a\") + var(\"b\") - constant(0xa00b),\n        ])\n        // Send (a, b, c) to the XOR table.\n        // Initially, this should return the required range constraints for a and b.\n        // Once a and b are known concretely, c can be computed concretely as well.\n        .with_bus_interactions(vec![send(XOR_BUS_ID, vec![var(\"a\"), var(\"b\"), var(\"c\")])]);\n\n    assert_solve_result(\n        constraint_system,\n        TestBusInteractionHandler,\n        vec![(\"a\", 0xa0.into()), (\"b\", 0x0b.into()), (\"c\", 0xab.into())],\n    );\n}\n\n#[test]\nfn xor_invalid() {\n    let constraint_system = ConstraintSystem::default()\n        .with_constraints(vec![\n            var(\"a\") - constant(0xa0),\n            var(\"b\") - constant(0x0b),\n            var(\"c\") - constant(0xff),\n        ])\n        .with_bus_interactions(vec![send(XOR_BUS_ID, vec![var(\"a\"), var(\"b\"), var(\"c\")])]);\n\n    match solve_system(constraint_system, TestBusInteractionHandler) {\n        Err(e) => assert_eq!(e, Error::BusInteractionError),\n        _ => panic!(\"Expected error!\"),\n    }\n}\n\n#[test]\nfn one_hot_flags() {\n    let constraint_system = ConstraintSystem::default().with_constraints(vec![\n        // Boolean flags\n        var(\"flag0\") * (var(\"flag0\") - constant(1)),\n        var(\"flag1\") * (var(\"flag1\") - constant(1)),\n        var(\"flag2\") * (var(\"flag2\") - constant(1)),\n        var(\"flag3\") * (var(\"flag3\") - constant(1)),\n        // Exactly one flag is active\n        var(\"flag0\") + var(\"flag1\") + var(\"flag2\") + var(\"flag3\") - constant(1),\n        // Flag 2 is active\n        var(\"flag0\") * constant(0)\n            + var(\"flag1\") * constant(1)\n            + var(\"flag2\") * constant(2)\n            + var(\"flag3\") * constant(3)\n            - constant(2),\n    ]);\n\n    // This can be solved via backtracking: There are 16 possible assignments\n    // for the 4 flags, but only 1 of them satisfies all the constraints.\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![\n            (\"flag0\", 0.into()),\n            (\"flag1\", 0.into()),\n            (\"flag2\", 1.into()),\n            (\"flag3\", 0.into()),\n        ],\n    );\n}\n\n#[test]\nfn binary_flags() {\n    let bit_to_expression = |bit, var| match bit {\n        true => var,\n        false => constant(1) - var,\n    };\n    let index_to_expression = |i: usize| -> GroupedExpression<GoldilocksField, Var> {\n        (0..3)\n            .map(move |j| bit_to_expression(i & (1 << j) != 0, var(format!(\"flag{j}\").leak())))\n            .fold(constant(1), |acc, x| acc * x)\n    };\n    let constraint_system = ConstraintSystem::default().with_constraints(vec![\n        // Boolean flags\n        var(\"flag0\") * (var(\"flag0\") - constant(1)),\n        var(\"flag1\") * (var(\"flag1\") - constant(1)),\n        var(\"flag2\") * (var(\"flag2\") - constant(1)),\n        index_to_expression(0b000) * constant(101)\n            + index_to_expression(0b001) * constant(102)\n            + index_to_expression(0b010) * constant(103)\n            + index_to_expression(0b011) * constant(104)\n            + index_to_expression(0b100) * constant(105)\n            + index_to_expression(0b101) * constant(106)\n            + index_to_expression(0b110) * constant(107)\n            + index_to_expression(0b111) * constant(108)\n            - constant(104),\n    ]);\n\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![\n            (\"flag0\", 1.into()),\n            (\"flag1\", 1.into()),\n            (\"flag2\", 0.into()),\n        ],\n    );\n}\n\n#[test]\nfn ternary_flags() {\n    // Implementing this logic in the OpenVM load/store chip:\n    // https://github.com/openvm-org/openvm/blob/v1.2.0/extensions/rv32im/circuit/src/loadstore/core.rs#L110-L139\n    let two_inv = GroupedExpression::from_number(GoldilocksField::one() / GoldilocksField::from(2));\n    let neg_one = GroupedExpression::from_number(-GoldilocksField::one());\n    let sum = var(\"flag0\") + var(\"flag1\") + var(\"flag2\") + var(\"flag3\");\n    // The flags must be 0, 1, or 2, and their sum must be 1 or 2.\n    // Given these constraints, there are 14 possible assignments. The following\n    // expressions evaluate to 1 for exactly one of them, and otherwise to 0:\n    let cases = vec![\n        // (2, 0, 0, 0), (0, 2, 0, 0), (0, 0, 2, 0), (0, 0, 0, 2)\n        var(\"flag0\") * (var(\"flag0\") - constant(1)) * two_inv.clone(),\n        var(\"flag1\") * (var(\"flag1\") - constant(1)) * two_inv.clone(),\n        var(\"flag2\") * (var(\"flag2\") - constant(1)) * two_inv.clone(),\n        var(\"flag3\") * (var(\"flag3\") - constant(1)) * two_inv.clone(),\n        // (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)\n        var(\"flag0\") * (sum.clone() - constant(2)) * neg_one.clone(),\n        var(\"flag1\") * (sum.clone() - constant(2)) * neg_one.clone(),\n        var(\"flag2\") * (sum.clone() - constant(2)) * neg_one.clone(),\n        var(\"flag3\") * (sum.clone() - constant(2)) * neg_one.clone(),\n        // (1, 1, 0, 0), (1, 0, 1, 0), (1, 0, 0, 1), (0, 1, 1, 0), (0, 1, 0, 1), (0, 0, 1, 1)\n        var(\"flag0\") * var(\"flag1\"),\n        var(\"flag0\") * var(\"flag2\"),\n        var(\"flag0\") * var(\"flag3\"),\n        var(\"flag1\") * var(\"flag2\"),\n        var(\"flag1\") * var(\"flag3\"),\n        var(\"flag2\") * var(\"flag3\"),\n    ];\n    let constraint_system = ConstraintSystem::default().with_constraints(vec![\n        // All flags are either 0, 1, or 2.\n        var(\"flag0\") * (var(\"flag0\") - constant(1)) * (var(\"flag0\") - constant(2)),\n        var(\"flag1\") * (var(\"flag1\") - constant(1)) * (var(\"flag1\") - constant(2)),\n        var(\"flag2\") * (var(\"flag2\") - constant(1)) * (var(\"flag2\") - constant(2)),\n        var(\"flag3\") * (var(\"flag3\") - constant(1)) * (var(\"flag3\") - constant(2)),\n        // The sum of flags is either 1 or 2.\n        (sum.clone() - constant(1)) * (sum.clone() - constant(2)),\n        // Of the expressions in `cases`, exactly one must evaluate to 1.\n        // From this constraint, it can be derived that it must be one of case 3, 4, 5, or 6.\n        cases[0].clone() * constant(1)\n            + (cases[1].clone() + cases[2].clone()) * constant(2)\n            + (cases[3].clone() + cases[4].clone() + cases[5].clone() + cases[6].clone())\n                * constant(3)\n            + cases[7].clone() * constant(4)\n            + (cases[8].clone() + cases[9].clone()) * constant(5)\n            + (cases[10].clone() + cases[11].clone() + cases[12].clone() + cases[13].clone())\n                * constant(6)\n            - constant(3),\n        // We don't know which case is active, but for any of the cases that it could be,\n        // is_load would be 1, so we should be able to solve for it.\n        var(\"is_load\")\n            - (cases[0].clone()\n                + cases[1].clone()\n                + cases[2].clone()\n                + cases[3].clone()\n                + cases[4].clone()\n                + cases[5].clone()\n                + cases[6].clone()),\n    ]);\n\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![(\"is_load\", 1.into())],\n    );\n}\n\n#[test]\nfn bit_decomposition_bug() {\n    let algebraic_constraints = vec![\n        var(\"cmp_result_0\") * (var(\"cmp_result_0\") - constant(1)),\n        var(\"imm_0\") - constant(8),\n        var(\"cmp_result_0\") * var(\"imm_0\")\n            - constant(4) * var(\"cmp_result_0\")\n            - var(\"BusInteractionField(10, 2)\")\n            + constant(4),\n        (var(\"BusInteractionField(10, 2)\") - constant(4))\n            * (var(\"BusInteractionField(10, 2)\") - constant(8)),\n    ];\n    let constraint_system = ConstraintSystem::default().with_constraints(algebraic_constraints);\n    // The solver used to infer more assignments due to a bug\n    // in the bit decomposition logic.\n    assert_solve_result(\n        constraint_system,\n        DefaultBusInteractionHandler::default(),\n        vec![(\"imm_0\", 8.into())],\n    );\n}\n"
  },
  {
    "path": "expression/Cargo.toml",
    "content": "[package]\nname = \"powdr-expression\"\ndescription = \"powdr expression type\"\nversion = { workspace = true }\nedition = { workspace = true }\nlicense = { workspace = true }\nhomepage = { workspace = true }\nrepository = { workspace = true }\n\n[dependencies]\npowdr-number.workspace = true\n\nnum-traits.workspace = true\nderive_more.workspace = true\nserde = { version = \"1.0\", default-features = false, features = [\"alloc\", \"derive\", \"rc\"] }\nschemars = { version = \"0.8.16\", features = [\"preserve_order\"]}\n\n[dev-dependencies]\ntest-log.workspace = true\npretty_assertions.workspace = true\nserde_json.workspace = true\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "expression/src/display.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\nuse crate::{\n    AlgebraicBinaryOperation, AlgebraicBinaryOperator, AlgebraicExpression,\n    AlgebraicUnaryOperation, AlgebraicUnaryOperator,\n};\n\ntype ExpressionPrecedence = u64;\ntrait Precedence {\n    fn precedence(&self) -> Option<ExpressionPrecedence>;\n}\n\nimpl Precedence for AlgebraicUnaryOperator {\n    fn precedence(&self) -> Option<ExpressionPrecedence> {\n        Some(match self {\n            AlgebraicUnaryOperator::Minus => 1,\n        })\n    }\n}\n\nimpl Precedence for AlgebraicBinaryOperator {\n    fn precedence(&self) -> Option<ExpressionPrecedence> {\n        Some(match self {\n            Self::Mul => 3,\n            Self::Add | Self::Sub => 4,\n        })\n    }\n}\n\nimpl<T, R> Precedence for AlgebraicExpression<T, R> {\n    fn precedence(&self) -> Option<ExpressionPrecedence> {\n        match self {\n            AlgebraicExpression::UnaryOperation(operation) => operation.op.precedence(),\n            AlgebraicExpression::BinaryOperation(operation) => operation.op.precedence(),\n            AlgebraicExpression::Number(..) | AlgebraicExpression::Reference(..) => None,\n        }\n    }\n}\n\nimpl<T: Display, R: Display> Display for AlgebraicBinaryOperation<T, R> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let op_precedence = self.op.precedence().unwrap();\n        let use_left_parentheses = match self.left.precedence() {\n            Some(left_precedence) => left_precedence > op_precedence,\n            None => false,\n        };\n\n        let use_right_parentheses = match self.right.precedence() {\n            Some(right_precedence) => right_precedence >= op_precedence,\n            None => false,\n        };\n\n        let left_string = if use_left_parentheses {\n            format!(\"({})\", self.left)\n        } else {\n            format!(\"{}\", self.left)\n        };\n        let right_string = if use_right_parentheses {\n            format!(\"({})\", self.right)\n        } else {\n            format!(\"{}\", self.right)\n        };\n\n        write!(f, \"{left_string} {} {right_string}\", self.op)\n    }\n}\n\nimpl<T: Display, R: Display> Display for AlgebraicUnaryOperation<T, R> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let exp_string = match (self.op.precedence(), self.expr.precedence()) {\n            (Some(precedence), Some(inner_precedence)) if precedence < inner_precedence => {\n                format!(\"({})\", self.expr)\n            }\n            _ => {\n                format!(\"{}\", self.expr)\n            }\n        };\n\n        write!(f, \"{}{exp_string}\", self.op)\n    }\n}\n\nimpl Display for AlgebraicUnaryOperator {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            AlgebraicUnaryOperator::Minus => write!(f, \"-\"),\n        }\n    }\n}\n\nimpl Display for AlgebraicBinaryOperator {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            AlgebraicBinaryOperator::Add => write!(f, \"+\"),\n            AlgebraicBinaryOperator::Sub => write!(f, \"-\"),\n            AlgebraicBinaryOperator::Mul => write!(f, \"*\"),\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use powdr_number::GoldilocksField;\n    use pretty_assertions::assert_eq;\n    use test_log::test;\n\n    use super::AlgebraicExpression;\n\n    fn test_display(expr: AlgebraicExpression<GoldilocksField, &str>, expected: &str) {\n        assert_eq!(expr.to_string(), expected);\n    }\n\n    #[test]\n    fn binary_op() {\n        let x = AlgebraicExpression::Reference(\"x\");\n        let y = AlgebraicExpression::Reference(\"y\");\n        let z = AlgebraicExpression::Reference(\"z\");\n        // Don't add extra\n        test_display(x.clone() + y.clone() + z.clone(), \"x + y + z\");\n        test_display(x.clone() * y.clone() * z.clone(), \"x * y * z\");\n        // Remove unneeded\n        test_display(-x.clone() + y.clone() * z.clone(), \"-x + y * z\");\n        test_display((x.clone() * y.clone()) * z.clone(), \"x * y * z\");\n        test_display(x.clone() - (y.clone() + z.clone()), \"x - (y + z)\");\n        test_display((x.clone() * y.clone()) + z.clone(), \"x * y + z\");\n        // Observe associativity\n        test_display(x.clone() * (y.clone() * z.clone()), \"x * (y * z)\");\n        test_display(x.clone() + (y.clone() + z.clone()), \"x + (y + z)\");\n        // Don't remove needed\n        test_display((x.clone() + y.clone()) * z.clone(), \"(x + y) * z\");\n        test_display(-(x.clone() + y.clone()), \"-(x + y)\");\n    }\n}\n"
  },
  {
    "path": "expression/src/lib.rs",
    "content": "use std::{\n    iter,\n    ops::{self, Add, Mul, Neg, Sub},\n};\n\nuse powdr_number::ExpressionConvertible;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\npub mod display;\npub mod visitors;\n\n#[derive(\n    Debug,\n    PartialEq,\n    Eq,\n    PartialOrd,\n    Ord,\n    Clone,\n    Serialize,\n    Deserialize,\n    JsonSchema,\n    Hash,\n    derive_more::Display,\n)]\npub enum AlgebraicExpression<T, R> {\n    #[serde(untagged)]\n    Reference(R),\n    #[serde(untagged)]\n    Number(T),\n    #[serde(untagged, serialize_with = \"serialize_binary_operation\")]\n    BinaryOperation(AlgebraicBinaryOperation<T, R>),\n    #[serde(untagged, serialize_with = \"serialize_unary_operation\")]\n    UnaryOperation(AlgebraicUnaryOperation<T, R>),\n}\n\n#[derive(\n    Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Serialize, Deserialize, JsonSchema, Hash,\n)]\npub struct AlgebraicBinaryOperation<T, R> {\n    pub left: Box<AlgebraicExpression<T, R>>,\n    pub op: AlgebraicBinaryOperator,\n    pub right: Box<AlgebraicExpression<T, R>>,\n}\n\n#[derive(\n    Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize, Deserialize, JsonSchema, Hash,\n)]\npub enum AlgebraicBinaryOperator {\n    #[serde(rename = \"+\")]\n    Add,\n    #[serde(rename = \"-\")]\n    Sub,\n    #[serde(rename = \"*\")]\n    Mul,\n}\n\n#[derive(\n    Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Serialize, Deserialize, JsonSchema, Hash,\n)]\npub struct AlgebraicUnaryOperation<T, R> {\n    pub op: AlgebraicUnaryOperator,\n    pub expr: Box<AlgebraicExpression<T, R>>,\n}\n\n#[derive(\n    Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize, Deserialize, JsonSchema, Hash,\n)]\npub enum AlgebraicUnaryOperator {\n    #[serde(rename = \"-\")]\n    Minus,\n}\n\nimpl<T, R> AlgebraicExpression<T, R> {\n    /// Returns an iterator over all (top-level) expressions in this expression.\n    /// This specifically does not implement the Children trait because otherwise it\n    /// would have a wrong implementation of ExpressionVisitable (which is implemented\n    /// generically for all types that implement Children<Expr>).\n    fn children(&self) -> Box<dyn Iterator<Item = &AlgebraicExpression<T, R>> + '_> {\n        match self {\n            AlgebraicExpression::Reference(_) | AlgebraicExpression::Number(_) => {\n                Box::new(iter::empty())\n            }\n            AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {\n                left, right, ..\n            }) => Box::new([left.as_ref(), right.as_ref()].into_iter()),\n            AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation { expr: e, .. }) => {\n                Box::new([e.as_ref()].into_iter())\n            }\n        }\n    }\n    /// Returns an iterator over all (top-level) expressions in this expression.\n    /// This specifically does not implement the Children trait because otherwise it\n    /// would have a wrong implementation of ExpressionVisitable (which is implemented\n    /// generically for all types that implement Children<Expr>).\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut AlgebraicExpression<T, R>> + '_> {\n        match self {\n            AlgebraicExpression::Reference(_) | AlgebraicExpression::Number(_) => {\n                Box::new(iter::empty())\n            }\n            AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {\n                left, right, ..\n            }) => Box::new([left.as_mut(), right.as_mut()].into_iter()),\n            AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation { expr: e, .. }) => {\n                Box::new([e.as_mut()].into_iter())\n            }\n        }\n    }\n\n    /// Returns the degree of the expressions\n    pub fn degree(&self) -> usize {\n        match self {\n            AlgebraicExpression::Reference(..) => 1,\n            // Multiplying two expressions adds their degrees\n            AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {\n                op: AlgebraicBinaryOperator::Mul,\n                left,\n                right,\n            }) => left.degree() + right.degree(),\n            // In all other cases, we take the maximum of the degrees of the children\n            _ => self.children().map(|e| e.degree()).max().unwrap_or(0),\n        }\n    }\n\n    pub fn new_binary(left: Self, op: AlgebraicBinaryOperator, right: Self) -> Self {\n        AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {\n            left: Box::new(left),\n            op,\n            right: Box::new(right),\n        })\n    }\n\n    pub fn new_unary(op: AlgebraicUnaryOperator, expr: Self) -> Self {\n        AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation {\n            op,\n            expr: Box::new(expr),\n        })\n    }\n}\n\nimpl<T, R> ops::Add for AlgebraicExpression<T, R> {\n    type Output = Self;\n\n    fn add(self, rhs: Self) -> Self::Output {\n        Self::new_binary(self, AlgebraicBinaryOperator::Add, rhs)\n    }\n}\n\nimpl<T, R> ops::Sub for AlgebraicExpression<T, R> {\n    type Output = Self;\n\n    fn sub(self, rhs: Self) -> Self::Output {\n        Self::new_binary(self, AlgebraicBinaryOperator::Sub, rhs)\n    }\n}\n\nimpl<T, R> ops::Neg for AlgebraicExpression<T, R> {\n    type Output = Self;\n\n    fn neg(self) -> Self::Output {\n        Self::new_unary(AlgebraicUnaryOperator::Minus, self)\n    }\n}\n\nimpl<T, R> ops::Mul for AlgebraicExpression<T, R> {\n    type Output = Self;\n\n    fn mul(self, rhs: Self) -> Self::Output {\n        Self::new_binary(self, AlgebraicBinaryOperator::Mul, rhs)\n    }\n}\n\nimpl<T, R> From<T> for AlgebraicExpression<T, R> {\n    fn from(value: T) -> Self {\n        AlgebraicExpression::Number(value)\n    }\n}\n\nimpl<T, R> ExpressionConvertible<T, R> for AlgebraicExpression<T, R> {\n    fn to_expression<\n        E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n    >(\n        &self,\n        number_converter: &impl Fn(&T) -> E,\n        var_converter: &impl Fn(&R) -> E,\n    ) -> E {\n        match self {\n            AlgebraicExpression::Reference(r) => var_converter(r),\n            AlgebraicExpression::Number(n) => number_converter(n),\n            AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation { left, op, right }) => {\n                let left = left.to_expression(number_converter, var_converter);\n                let right = right.to_expression(number_converter, var_converter);\n\n                match op {\n                    AlgebraicBinaryOperator::Add => left + right,\n                    AlgebraicBinaryOperator::Sub => left - right,\n                    AlgebraicBinaryOperator::Mul => left * right,\n                }\n            }\n            AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation { op, expr }) => match op {\n                AlgebraicUnaryOperator::Minus => {\n                    -expr.to_expression(number_converter, var_converter)\n                }\n            },\n        }\n    }\n}\n\nfn serialize_unary_operation<S, T, R>(\n    un_op: &AlgebraicUnaryOperation<T, R>,\n    serializer: S,\n) -> Result<S::Ok, S::Error>\nwhere\n    S: serde::Serializer,\n    T: Serialize,\n    R: Serialize,\n{\n    (&un_op.op, un_op.expr.as_ref()).serialize(serializer)\n}\n\nfn serialize_binary_operation<S, T, R>(\n    bin_op: &AlgebraicBinaryOperation<T, R>,\n    serializer: S,\n) -> Result<S::Ok, S::Error>\nwhere\n    S: serde::Serializer,\n    T: Serialize,\n    R: Serialize,\n{\n    (bin_op.left.as_ref(), &bin_op.op, bin_op.right.as_ref()).serialize(serializer)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_serde() {\n        let x: AlgebraicExpression<u32, &'static str> = AlgebraicExpression::from(5)\n            * AlgebraicExpression::Reference(\"x\")\n            - AlgebraicExpression::from(3);\n        let serialized = serde_json::to_string(&x).unwrap();\n        assert_eq!(serialized, r#\"[[5,\"*\",\"x\"],\"-\",3]\"#);\n        let deserialized = serde_json::from_str(&serialized).unwrap();\n        assert_eq!(x, deserialized);\n    }\n}\n"
  },
  {
    "path": "expression/src/visitors.rs",
    "content": "use std::{iter, ops::ControlFlow};\n\nuse crate::AlgebraicExpression;\n\n/// Generic trait that allows to iterate over sub-structures.\n///\n/// It is only meant to iterate non-recursively over the direct children.\n/// Self and O do not have to be the same type and we can also have\n/// Children<O1> and Children<O2> implemented for the same type,\n/// if the goal is to iterate over sub-structures of different kinds.\npub trait Children<O> {\n    /// Returns an iterator over all direct children of kind O in this object.\n    fn children(&self) -> Box<dyn Iterator<Item = &O> + '_>;\n    /// Returns an iterator over all direct children of kind Q in this object.\n    fn children_mut(&mut self) -> Box<dyn Iterator<Item = &mut O> + '_>;\n}\n\npub trait AllChildren<O> {\n    /// Returns an iterator over all direct and indirect children of kind `O` in this object.\n    /// If `O` and `Self` are the same type, also includes `self`.\n    /// Pre-order visitor.\n    fn all_children(&self) -> Box<dyn Iterator<Item = &O> + '_>;\n}\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq)]\npub enum VisitOrder {\n    Pre,\n    Post,\n}\n\n/// A trait to be implemented by an AST node.\n///\n/// The idea is that it calls a callback function on each of the sub-nodes\n/// that are expressions.\n/// The difference to the Children<Expr> trait is that ExpressionVisitable\n/// visits recursively.\n/// If a node implements Children<Expr>, it also implements ExpressionVisitable<Expr>.\npub trait ExpressionVisitable<Expr> {\n    /// Traverses the AST and calls `f` on each Expression in pre-order.\n    fn pre_visit_expressions_mut<F>(&mut self, f: &mut F)\n    where\n        F: FnMut(&mut Expr),\n    {\n        let _ = self.visit_expressions_mut(\n            &mut move |e| {\n                f(e);\n                ControlFlow::Continue::<()>(())\n            },\n            VisitOrder::Pre,\n        );\n    }\n\n    /// Traverses the AST and calls `f` on each Expression in post-order.\n    fn post_visit_expressions_mut<F>(&mut self, f: &mut F)\n    where\n        F: FnMut(&mut Expr),\n    {\n        let _ = self.visit_expressions_mut(\n            &mut move |e| {\n                f(e);\n                ControlFlow::Continue::<()>(())\n            },\n            VisitOrder::Post,\n        );\n    }\n\n    fn visit_expressions<F, B>(&self, f: &mut F, order: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&Expr) -> ControlFlow<B>;\n\n    fn visit_expressions_mut<F, B>(&mut self, f: &mut F, order: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&mut Expr) -> ControlFlow<B>;\n}\n\nimpl<Expr: ExpressionVisitable<Expr>, C: Children<Expr>> ExpressionVisitable<Expr> for C {\n    fn visit_expressions_mut<F, B>(&mut self, f: &mut F, o: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&mut Expr) -> ControlFlow<B>,\n    {\n        self.children_mut()\n            .try_for_each(|child| child.visit_expressions_mut(f, o))\n    }\n\n    fn visit_expressions<F, B>(&self, f: &mut F, o: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&Expr) -> ControlFlow<B>,\n    {\n        self.children()\n            .try_for_each(|child| child.visit_expressions(f, o))\n    }\n}\n\nimpl<Expr: AllChildren<Expr>, C: Children<Expr>> AllChildren<Expr> for C {\n    fn all_children(&self) -> Box<dyn Iterator<Item = &Expr> + '_> {\n        Box::new(self.children().flat_map(|e| e.all_children()))\n    }\n}\n\nimpl<T, R> ExpressionVisitable<AlgebraicExpression<T, R>> for AlgebraicExpression<T, R> {\n    fn visit_expressions_mut<F, B>(&mut self, f: &mut F, o: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&mut AlgebraicExpression<T, R>) -> ControlFlow<B>,\n    {\n        if o == VisitOrder::Pre {\n            f(self)?;\n        }\n        self.children_mut()\n            .try_for_each(|e| e.visit_expressions_mut(f, o))?;\n        if o == VisitOrder::Post {\n            f(self)?;\n        }\n        ControlFlow::Continue(())\n    }\n\n    fn visit_expressions<F, B>(&self, f: &mut F, o: VisitOrder) -> ControlFlow<B>\n    where\n        F: FnMut(&AlgebraicExpression<T, R>) -> ControlFlow<B>,\n    {\n        if o == VisitOrder::Pre {\n            f(self)?;\n        }\n        self.children()\n            .try_for_each(|e| e.visit_expressions(f, o))?;\n        if o == VisitOrder::Post {\n            f(self)?;\n        }\n        ControlFlow::Continue(())\n    }\n}\n\nimpl<T, R> AllChildren<AlgebraicExpression<T, R>> for AlgebraicExpression<T, R> {\n    fn all_children(&self) -> Box<dyn Iterator<Item = &AlgebraicExpression<T, R>> + '_> {\n        Box::new(iter::once(self).chain(self.children().flat_map(|e| e.all_children())))\n    }\n}\n"
  },
  {
    "path": "isa-utils/Cargo.toml",
    "content": "[package]\nname = \"powdr-isa-utils\"\ndescription = \"powdr utilities for translating from native ISA code (RISCV for now)\"\nversion = { workspace = true }\nedition = { workspace = true }\nlicense = { workspace = true }\nhomepage = { workspace = true }\nrepository = { workspace = true }\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "isa-utils/src/lib.rs",
    "content": "/// A single 32-bit data value.\npub enum SingleDataValue {\n    /// A literal value.\n    Value(u32),\n    /// The value of a pointer to a text label. Since there may be not a\n    /// 1-to-1 correspondence between nativa ISAs and Powdr ASM instructions,\n    /// this is passed unresolved to the code generator.\n    LabelReference(String),\n    /// Currently not supported.\n    Offset(String, String),\n}\n\npub fn quote(s: &str) -> String {\n    // TODO more things to quote\n    format!(\"\\\"{}\\\"\", s.replace('\\\\', \"\\\\\\\\\").replace('\\\"', \"\\\\\\\"\"))\n}\n\npub fn escape_label(l: &str) -> String {\n    // TODO make this proper\n    l.replace('.', \"_dot_\")\n        .replace('/', \"_slash_\")\n        .replace(\"[]\", \"_slice_\")\n        .replace(\",\", \"_comma_\")\n        .replace(\"(\", \"_left_parens_\")\n        .replace(\")\", \"_right_parens_\")\n        .replace(\"[\", \"_left_square_\")\n        .replace(\"]\", \"_right_square_\")\n        .replace(\"{\", \"_left_brace_\")\n        .replace(\"}\", \"_right_brace_\")\n        .replace(\" \", \"_space_\")\n        .replace(\"'\", \"_quote_\")\n        .replace(\"*\", \"_deref_\")\n}\n"
  },
  {
    "path": "number/Cargo.toml",
    "content": "[package]\nname = \"powdr-number\"\ndescription = \"powdr finite field definitions\"\nversion = { workspace = true }\nedition = { workspace = true }\nlicense = { workspace = true }\nhomepage = { workspace = true }\nrepository = { workspace = true }\n\n[dependencies]\nark-bn254 = { version = \"0.4.0\", default-features = false, features = [\n  \"scalar_field\",\n] }\nark-ff = \"0.4.2\"\nark-serialize = \"0.4.2\"\np3-baby-bear = { git = \"https://github.com/plonky3/Plonky3.git\", rev = \"2192432ddf28e7359dd2c577447886463e6124f0\" }\np3-koala-bear = { git = \"https://github.com/plonky3/Plonky3.git\", rev = \"2192432ddf28e7359dd2c577447886463e6124f0\" }\np3-mersenne-31 = { git = \"https://github.com/plonky3/Plonky3.git\", rev = \"2192432ddf28e7359dd2c577447886463e6124f0\" }\np3-field = { git = \"https://github.com/plonky3/Plonky3.git\", rev = \"2192432ddf28e7359dd2c577447886463e6124f0\" }\nnum-bigint = { version = \"0.4.3\", features = [\"serde\"] }\nnum-traits.workspace = true\ncsv = \"1.3\"\nserde = { version = \"1.0\", default-features = false, features = [\n  \"alloc\",\n  \"derive\",\n  \"rc\",\n] }\nserde_with = \"3.6.1\"\nschemars = { version = \"0.8.16\", features = [\"preserve_order\"] }\nibig = { version = \"0.3.6\", features = [\"serde\"] }\nserde_cbor.workspace = true\nderive_more.workspace = true\n\n[dev-dependencies]\ntest-log.workspace = true\nenv_logger.workspace = true\n\n[package.metadata.cargo-udeps.ignore]\ndevelopment = [\"env_logger\"]\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "number/src/baby_bear.rs",
    "content": "use p3_baby_bear::BabyBear;\n\nuse crate::powdr_field_plonky3;\n\npowdr_field_plonky3!(BabyBearField, BabyBear);\n\n#[cfg(test)]\nmod test {\n    use crate::traits::int_from_hex_str;\n    use test_log::test;\n\n    use super::*;\n\n    #[test]\n    fn bitwise() {\n        let n = int_from_hex_str::<BabyBearField>(\"00ff00ff\");\n        let p = int_from_hex_str::<BabyBearField>(\"f00ff00f\");\n        let not_n = int_from_hex_str::<BabyBearField>(\"ff00ff00\");\n        let n_shr_4 = int_from_hex_str::<BabyBearField>(\"000ff00f\");\n        let n_shl_4 = int_from_hex_str::<BabyBearField>(\"0ff00ff0\");\n        let n_or_p = int_from_hex_str::<BabyBearField>(\"f0fff0ff\");\n        let n_and_p = int_from_hex_str::<BabyBearField>(\"000f000f\");\n        let n_xor_p = int_from_hex_str::<BabyBearField>(\"f0f0f0f0\");\n\n        assert_eq!(n.not().not(), n);\n        assert_eq!(n.not(), not_n);\n        assert_eq!(n >> 4, n_shr_4);\n        assert_eq!(n << 4, n_shl_4);\n        assert_eq!(n & p, n_and_p);\n        assert_eq!(n | p, n_or_p);\n        assert_eq!(n ^ p, n_xor_p);\n    }\n\n    #[test]\n    fn zero_one() {\n        let x = BabyBearField::ZERO;\n        assert_eq!(x, BabyBearField::zero());\n        assert_eq!(x.to_canonical_u32(), 0);\n        let y = BabyBearField::ONE;\n        assert_eq!(y, BabyBearField::one());\n        assert_eq!(y.to_canonical_u32(), 1);\n        let z = x + y + y;\n        assert_eq!(z.to_canonical_u32(), 2);\n    }\n\n    #[test]\n    fn lower_half() {\n        let x = BabyBearField::from(0);\n        assert!(x.is_in_lower_half());\n        assert!(!(x - 1.into()).is_in_lower_half());\n\n        let y = BabyBearField::from_str_radix(\"3c000000\", 16).unwrap();\n        assert!(y.is_in_lower_half());\n        assert!(!(y + 1.into()).is_in_lower_half());\n    }\n\n    #[test]\n    #[should_panic]\n    fn integer_div_by_zero() {\n        let _ = BabyBearField::from(1).to_arbitrary_integer()\n            / BabyBearField::from(0).to_arbitrary_integer();\n    }\n\n    #[test]\n    #[should_panic]\n    fn div_by_zero() {\n        let _ = BabyBearField::from(1) / BabyBearField::from(0);\n    }\n\n    #[test]\n    fn to_signed_integer() {\n        let values = [\n            i16::MIN as i64,\n            i16::MIN as i64 + 1,\n            i16::MIN as i64 + 4242,\n            -0x6faa21,\n            -3456,\n            -1,\n            0,\n            0x6faa21,\n            1,\n            3456,\n            i16::MAX as i64 - 4242,\n            i16::MAX as i64 - 1,\n            i16::MAX as i64,\n        ];\n        for &value in &values {\n            let field_value = BabyBearField::from(value);\n            let signed_integer_value = field_value.to_signed_integer();\n            assert_eq!(signed_integer_value, value.into());\n        }\n    }\n}\n"
  },
  {
    "path": "number/src/bn254.rs",
    "content": "use ark_bn254::Fr;\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\npowdr_field!(Bn254Field, Fr);\n\n#[cfg(test)]\nmod tests {\n    use std::ops::*;\n\n    use super::Bn254Field;\n    use crate::{traits::int_from_hex_str, FieldElement};\n    use test_log::test;\n\n    #[test]\n    fn bitwise() {\n        let n = int_from_hex_str::<Bn254Field>(\n            \"00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff\",\n        );\n        let p = int_from_hex_str::<Bn254Field>(\n            \"000ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00f\",\n        );\n        let not_n = int_from_hex_str::<Bn254Field>(\n            \"ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00\",\n        );\n        let n_shr_4 = int_from_hex_str::<Bn254Field>(\n            \"000ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00f\",\n        );\n        let n_shl_4 = int_from_hex_str::<Bn254Field>(\n            \"0ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff0\",\n        );\n        let n_or_p = int_from_hex_str::<Bn254Field>(\n            \"00fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0fff0ff\",\n        );\n        let n_and_p = int_from_hex_str::<Bn254Field>(\n            \"000f000f000f000f000f000f000f000f000f000f000f000f000f000f000f000f\",\n        );\n        let n_xor_p = int_from_hex_str::<Bn254Field>(\n            \"00f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0\",\n        );\n\n        assert_eq!(n.not().not(), n);\n        assert_eq!(n.not(), not_n);\n        assert_eq!(n >> 4, n_shr_4);\n        assert_eq!(n << 4, n_shl_4);\n        assert_eq!(n & p, n_and_p);\n        assert_eq!(n | p, n_or_p);\n        assert_eq!(n ^ p, n_xor_p);\n    }\n\n    #[test]\n    fn minus_one() {\n        let minus_one = Bn254Field::from(0) - Bn254Field::from(1);\n        assert_eq!(\n            minus_one.to_arbitrary_integer(),\n            crate::BigUint::from_str_radix(\n                \"21888242871839275222246405745257275088548364400416034343698204186575808495616\",\n                10\n            )\n            .unwrap()\n        );\n    }\n\n    #[test]\n    fn format() {\n        let one = Bn254Field::from(1);\n        assert_eq!(format!(\"{one:x}\"), \"1\");\n        let minus_one = Bn254Field::from(0) - Bn254Field::from(1);\n        assert_eq!(\n            format!(\"{minus_one:x}\"),\n            \"30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000000\"\n        );\n    }\n\n    #[test]\n    #[should_panic]\n    fn integer_div_by_zero() {\n        let _ =\n            Bn254Field::from(1).to_arbitrary_integer() / Bn254Field::from(0).to_arbitrary_integer();\n    }\n\n    #[test]\n    #[should_panic]\n    fn div_by_zero() {\n        let _ = Bn254Field::from(1) / Bn254Field::from(0);\n    }\n\n    #[test]\n    fn to_signed_integer() {\n        let values = [\n            i32::MIN as i64,\n            i32::MIN as i64 + 1,\n            i32::MIN as i64 + 4242,\n            -0x6faa2185,\n            -3456,\n            -1,\n            0,\n            0x6faa2185,\n            1,\n            3456,\n            i32::MAX as i64 - 4242,\n            i32::MAX as i64 - 1,\n            i32::MAX as i64,\n        ];\n        for &value in &values {\n            let field_value = Bn254Field::from(value);\n            let signed_integer_value = field_value.to_signed_integer();\n            assert_eq!(signed_integer_value, value.into());\n        }\n    }\n}\n"
  },
  {
    "path": "number/src/expression_convertible.rs",
    "content": "use std::ops::{Add, Mul, Neg, Sub};\n\nuse crate::FieldElement;\n\npub trait ExpressionConvertible<T, V> {\n    /// Converts `self` into a structure that supports algebraic operations.\n    ///\n    /// Fails in case a non-algebraic operation is used.\n    ///\n    /// The `try_to_number` function is used to check if some conversions can be simplified.\n    ///\n    /// This or `to_expression` must be implemented.\n    fn try_to_expression<\n        E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n    >(\n        &self,\n        number_converter: &impl Fn(&T) -> E,\n        var_converter: &impl Fn(&V) -> E,\n        _try_to_number: &impl Fn(&E) -> Option<T>,\n    ) -> Option<E> {\n        Some(self.to_expression(number_converter, var_converter))\n    }\n\n    /// Converts `self` into a structure that supports algebraic operations.\n    ///\n    /// This or `try_to_expression` must be implemented.\n    fn to_expression<\n        E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n    >(\n        &self,\n        number_converter: &impl Fn(&T) -> E,\n        var_converter: &impl Fn(&V) -> E,\n    ) -> E {\n        self.try_to_expression(number_converter, var_converter, &|_| unreachable!())\n            .unwrap()\n    }\n}\n\nimpl<V, T: FieldElement> ExpressionConvertible<T, V> for T {\n    fn to_expression<\n        E: Add<E, Output = E> + Sub<E, Output = E> + Mul<E, Output = E> + Neg<Output = E>,\n    >(\n        &self,\n        number_converter: &impl Fn(&T) -> E,\n        _var_converter: &impl Fn(&V) -> E,\n    ) -> E {\n        number_converter(self)\n    }\n}\n"
  },
  {
    "path": "number/src/goldilocks.rs",
    "content": "use std::fmt::LowerHex;\nuse std::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Not, Sub, SubAssign};\nuse std::str::FromStr;\n\nuse ark_ff::{One, Zero};\n\nuse num_traits::{ConstOne, ConstZero};\nuse schemars::JsonSchema;\nuse serde::{Deserialize, Serialize};\n\nuse core::fmt::{self, Debug, Formatter};\nuse core::hash::Hash;\n#[cfg(target_arch = \"x86_64\")]\nuse core::hint::unreachable_unchecked;\n\nuse crate::{BigUint, FieldElement, KnownField, LargeInt};\n\n// This implementation is adapted from plonky2. The main change is that we ensure that the stored\n// value is always less than the field modulus, since we do conversions from and to canonical\n// integers all the time.\n\nconst EPSILON: u64 = (1 << 32) - 1;\n\n#[derive(\n    Clone,\n    Copy,\n    PartialEq,\n    Eq,\n    Debug,\n    Default,\n    PartialOrd,\n    Ord,\n    Hash,\n    Serialize,\n    Deserialize,\n    JsonSchema,\n    derive_more::Display,\n)]\n#[repr(transparent)]\npub struct GoldilocksField(u64);\n\nimpl GoldilocksField {\n    const ORDER: u64 = 0xFFFFFFFF00000001;\n\n    /// Returns the inverse of the field element, using Fermat's little theorem.\n    /// The inverse of `a` is computed as `a^(p-2)`, where `p` is the prime order of the field.\n    ///\n    /// Mathematically, this is equivalent to:\n    ///                $a^(p-1)     = 1 (mod p)$\n    ///                $a^(p-2) * a = 1 (mod p)$\n    /// Therefore      $a^(p-2)     = a^-1 (mod p)$\n    ///\n    /// The following code has been adapted from winterfell/math/src/field/f64/mod.rs\n    /// located at <https://github.com/facebook/winterfell>.\n    fn try_inverse(&self) -> Option<Self> {\n        if self.is_zero() {\n            return None;\n        }\n\n        // compute base^(P - 2) using 72 multiplications\n        // The exponent P - 2 is represented in binary as:\n        // 0b1111111111111111111111111111111011111111111111111111111111111111\n\n        // compute base^11\n        let t2 = self.square() * *self;\n\n        // compute base^111\n        let t3 = t2.square() * *self;\n\n        // compute base^111111 (6 ones)\n        // repeatedly square t3 3 times and multiply by t3\n        let t6 = exp_acc::<3>(t3, t3);\n\n        // compute base^111111111111 (12 ones)\n        // repeatedly square t6 6 times and multiply by t6\n        let t12 = exp_acc::<6>(t6, t6);\n\n        // compute base^111111111111111111111111 (24 ones)\n        // repeatedly square t12 12 times and multiply by t12\n        let t24 = exp_acc::<12>(t12, t12);\n\n        // compute base^1111111111111111111111111111111 (31 ones)\n        // repeatedly square t24 6 times and multiply by t6 first. then square t30 and\n        // multiply by base\n        let t30 = exp_acc::<6>(t24, t6);\n        let t31 = t30.square() * *self;\n\n        // compute base^111111111111111111111111111111101111111111111111111111111111111\n        // repeatedly square t31 32 times and multiply by t31\n        let t63 = exp_acc::<32>(t31, t31);\n\n        // compute base^1111111111111111111111111111111011111111111111111111111111111111\n        Some(t63.square() * *self)\n    }\n\n    fn square(&self) -> Self {\n        *self * *self\n    }\n\n    fn exp_power_of_2(&self, power_log: usize) -> Self {\n        let mut res = *self;\n        for _ in 0..power_log {\n            res = res.square();\n        }\n        res\n    }\n\n    #[inline(always)]\n    fn from_canonical_u64(n: u64) -> Self {\n        debug_assert!(n < Self::ORDER);\n        Self(n)\n    }\n\n    #[inline]\n    fn from_noncanonical_i64(n: i64) -> Self {\n        Self::from_canonical_u64(if n < 0 {\n            // If n < 0, then this is guaranteed to overflow since\n            // both arguments have their high bit set, so the result\n            // is in the canonical range.\n            Self::ORDER.wrapping_add(n as u64)\n        } else {\n            n as u64\n        })\n    }\n\n    #[inline]\n    fn to_canonical_u64(self) -> u64 {\n        self.0\n    }\n}\n\n#[inline]\nfn wrap(x: u64) -> u64 {\n    if x >= GoldilocksField::ORDER {\n        x - GoldilocksField::ORDER\n    } else {\n        x\n    }\n}\n\nimpl Neg for GoldilocksField {\n    type Output = Self;\n\n    #[inline]\n    fn neg(self) -> Self {\n        if self.is_zero() {\n            Self::ZERO\n        } else {\n            Self(Self::ORDER - self.to_canonical_u64())\n        }\n    }\n}\n\nimpl Add for GoldilocksField {\n    type Output = Self;\n\n    #[inline]\n    #[allow(clippy::suspicious_arithmetic_impl)]\n    fn add(self, rhs: Self) -> Self {\n        let (sum, over) = self.0.overflowing_add(rhs.0);\n        let (sum, over) = sum.overflowing_add((over as u64) * EPSILON);\n        debug_assert!(!over);\n        Self(wrap(sum))\n    }\n}\n\nimpl AddAssign for GoldilocksField {\n    #[inline]\n    fn add_assign(&mut self, rhs: Self) {\n        *self = *self + rhs\n    }\n}\n\nimpl Sub for GoldilocksField {\n    type Output = Self;\n\n    #[inline]\n    #[allow(clippy::suspicious_arithmetic_impl)]\n    fn sub(self, rhs: Self) -> Self {\n        let (diff, under) = self.0.overflowing_sub(rhs.0);\n        let (diff, under) = diff.overflowing_sub((under as u64) * EPSILON);\n        debug_assert!(!under);\n        Self(wrap(diff))\n    }\n}\n\nimpl SubAssign for GoldilocksField {\n    #[inline]\n    fn sub_assign(&mut self, rhs: Self) {\n        *self = *self - rhs\n    }\n}\n\nimpl Mul for GoldilocksField {\n    type Output = Self;\n\n    fn mul(self, rhs: Self) -> Self {\n        reduce128((self.0 as u128) * (rhs.0 as u128))\n    }\n}\n\nimpl MulAssign for GoldilocksField {\n    fn mul_assign(&mut self, rhs: Self) {\n        *self = *self * rhs\n    }\n}\n\nimpl Div for GoldilocksField {\n    type Output = Self;\n\n    #[allow(clippy::suspicious_arithmetic_impl)]\n    fn div(self, rhs: Self) -> Self::Output {\n        self * rhs.try_inverse().unwrap()\n    }\n}\n\n/// Fast addition modulo ORDER for x86-64.\n/// This function is marked unsafe for the following reasons:\n///   - It is only correct if x + y < 2**64 + ORDER = 0x1ffffffff00000001.\n///   - It is only faster in some circumstances. In particular, on x86 it overwrites both inputs in\n///     the registers, so its use is not recommended when either input will be used again.\n#[inline(always)]\n#[cfg(target_arch = \"x86_64\")]\nunsafe fn add_no_canonicalize_trashing_input(x: u64, y: u64) -> u64 {\n    let res_wrapped: u64;\n    let adjustment: u64;\n    core::arch::asm!(\n        \"add {0}, {1}\",\n        // Trick. The carry flag is set iff the addition overflowed.\n        // sbb x, y does x := x - y - CF. In our case, x and y are both {1:e}, so it simply does\n        // {1:e} := 0xffffffff on overflow and {1:e} := 0 otherwise. {1:e} is the low 32 bits of\n        // {1}; the high 32-bits are zeroed on write. In the end, we end up with 0xffffffff in {1}\n        // on overflow; this happens be EPSILON.\n        // Note that the CPU does not realize that the result of sbb x, x does not actually depend\n        // on x. We must write the result to a register that we know to be ready. We have a\n        // dependency on {1} anyway, so let's use it.\n        \"sbb {1:e}, {1:e}\",\n        inlateout(reg) x => res_wrapped,\n        inlateout(reg) y => adjustment,\n        options(pure, nomem, nostack),\n    );\n    assume(x != 0 || (res_wrapped == y && adjustment == 0));\n    assume(y != 0 || (res_wrapped == x && adjustment == 0));\n    // Add EPSILON == subtract ORDER.\n    // Cannot overflow unless the assumption if x + y < 2**64 + ORDER is incorrect.\n    res_wrapped + adjustment\n}\n\n#[inline(always)]\n#[cfg(not(target_arch = \"x86_64\"))]\nconst unsafe fn add_no_canonicalize_trashing_input(x: u64, y: u64) -> u64 {\n    let (res_wrapped, carry) = x.overflowing_add(y);\n    // Below cannot overflow unless the assumption if x + y < 2**64 + ORDER is incorrect.\n    res_wrapped + EPSILON * (carry as u64)\n}\n\n/// Reduces to a 64-bit value. The result is in canonical form.\n#[inline]\nfn reduce128(x: u128) -> GoldilocksField {\n    let (x_lo, x_hi) = split(x); // This is a no-op\n    let x_hi_hi = x_hi >> 32;\n    let x_hi_lo = x_hi & EPSILON;\n\n    let (mut t0, borrow) = x_lo.overflowing_sub(x_hi_hi);\n    if borrow {\n        branch_hint(); // A borrow is exceedingly rare. It is faster to branch.\n        t0 -= EPSILON; // Cannot underflow.\n    }\n    let t1 = x_hi_lo * EPSILON;\n    let t2 = unsafe { add_no_canonicalize_trashing_input(t0, t1) };\n\n    GoldilocksField(wrap(t2))\n}\n\n/// Squares the base N number of times and multiplies the result by the tail value.\n#[inline(always)]\nfn exp_acc<const N: usize>(base: GoldilocksField, tail: GoldilocksField) -> GoldilocksField {\n    base.exp_power_of_2(N) * tail\n}\n\n#[inline]\nconst fn split(x: u128) -> (u64, u64) {\n    (x as u64, (x >> 64) as u64)\n}\n\n#[inline(always)]\n#[cfg(target_arch = \"x86_64\")]\npub fn assume(p: bool) {\n    debug_assert!(p);\n    if !p {\n        unsafe {\n            unreachable_unchecked();\n        }\n    }\n}\n\n/// Try to force Rust to emit a branch. Example:\n///     if x > 2 {\n///         y = foo();\n///         branch_hint();\n///     } else {\n///         y = bar();\n///     }\n/// This function has no semantics. It is a hint only.\n#[inline(always)]\npub fn branch_hint() {\n    // NOTE: These are the currently supported assembly architectures. See the\n    // [nightly reference](https://doc.rust-lang.org/nightly/reference/inline-assembly.html) for\n    // the most up-to-date list.\n    #[cfg(any(\n        target_arch = \"aarch64\",\n        target_arch = \"arm\",\n        target_arch = \"riscv32\",\n        target_arch = \"riscv64\",\n        target_arch = \"x86\",\n        target_arch = \"x86_64\",\n    ))]\n    unsafe {\n        core::arch::asm!(\"\", options(nomem, nostack, preserves_flags));\n    }\n}\n\nimpl FieldElement for GoldilocksField {\n    type Integer = GLLargeInt;\n\n    const BITS: u32 = 64;\n\n    fn to_degree(&self) -> crate::DegreeType {\n        self.to_canonical_u64()\n    }\n\n    fn to_integer(&self) -> Self::Integer {\n        self.to_canonical_u64().into()\n    }\n\n    #[inline]\n    fn modulus() -> Self::Integer {\n        Self::ORDER.into()\n    }\n\n    fn pow(self, exp: Self::Integer) -> Self {\n        let mut exp = exp.0;\n        if exp == 0 {\n            return 1.into();\n        } else if exp == 1 {\n            return self;\n        }\n        let mut x = self;\n        let mut r: Self = 1.into();\n        while exp >= 2 {\n            if exp & 1 != 0 {\n                r *= x;\n            }\n            x = x.square();\n            exp >>= 1;\n        }\n        r * x\n    }\n\n    fn to_bytes_le(&self) -> Vec<u8> {\n        self.to_canonical_u64().to_le_bytes().to_vec()\n    }\n\n    fn from_bytes_le(bytes: &[u8]) -> Self {\n        wrap(u64::try_from(BigUint::from_le_bytes(bytes)).unwrap()).into()\n    }\n\n    fn from_str_radix(s: &str, radix: u32) -> Result<Self, String> {\n        let n = u64::from_str_radix(s, radix).map_err(|e| e.to_string())?;\n        if n < Self::ORDER {\n            Ok(Self::from_canonical_u64(n))\n        } else {\n            Err(format!(\"Number \\\"{s}\\\" too large for Goldilocks field.\"))\n        }\n    }\n\n    fn checked_from(value: ibig::UBig) -> Option<Self> {\n        if value < Self::modulus().to_arbitrary_integer() {\n            Some(u64::try_from(value).unwrap().into())\n        } else {\n            None\n        }\n    }\n\n    fn is_in_lower_half(&self) -> bool {\n        self.to_canonical_u64() <= (Self::ORDER - 1) / 2\n    }\n\n    fn known_field() -> Option<crate::KnownField> {\n        Some(KnownField::GoldilocksField)\n    }\n\n    fn has_direct_repr() -> bool {\n        true\n    }\n}\n\nimpl LowerHex for GoldilocksField {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        LowerHex::fmt(&self.to_canonical_u64(), f)\n    }\n}\n\nimpl From<bool> for GoldilocksField {\n    fn from(b: bool) -> Self {\n        Self(b as u64)\n    }\n}\n\nimpl From<i64> for GoldilocksField {\n    fn from(n: i64) -> Self {\n        Self::from_noncanonical_i64(n)\n    }\n}\n\nimpl From<i32> for GoldilocksField {\n    fn from(n: i32) -> Self {\n        From::<i64>::from(n as i64)\n    }\n}\n\nimpl From<u32> for GoldilocksField {\n    fn from(n: u32) -> Self {\n        Self::from_canonical_u64(n as u64)\n    }\n}\n\nimpl From<u64> for GoldilocksField {\n    #[inline]\n    fn from(n: u64) -> Self {\n        Self(wrap(n))\n    }\n}\n\nimpl From<crate::BigUint> for GoldilocksField {\n    fn from(n: crate::BigUint) -> Self {\n        u64::try_from(n).unwrap().into()\n    }\n}\n\nimpl From<GLLargeInt> for GoldilocksField {\n    #[inline]\n    fn from(n: GLLargeInt) -> Self {\n        Self(wrap(n.0))\n    }\n}\n\nimpl ConstZero for GoldilocksField {\n    const ZERO: Self = Self(0);\n}\n\nimpl Zero for GoldilocksField {\n    fn zero() -> Self {\n        Self::ZERO\n    }\n\n    fn is_zero(&self) -> bool {\n        self.0 == 0\n    }\n}\n\nimpl ConstOne for GoldilocksField {\n    const ONE: Self = Self(1);\n}\n\nimpl One for GoldilocksField {\n    fn one() -> Self {\n        Self::ONE\n    }\n\n    fn is_one(&self) -> bool {\n        self.to_canonical_u64() == 1\n    }\n}\n\nimpl FromStr for GoldilocksField {\n    type Err = String;\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        let n = BigUint::from_str(s).map_err(|e| e.to_string())?;\n        let modulus = Self::modulus();\n        if n >= modulus.to_arbitrary_integer() {\n            Err(format!(\"Decimal number \\\"{s}\\\" too large for field.\"))\n        } else {\n            Ok(n.into())\n        }\n    }\n}\n\n#[derive(\n    Clone,\n    Copy,\n    PartialEq,\n    Eq,\n    Debug,\n    Default,\n    PartialOrd,\n    Ord,\n    Hash,\n    derive_more::Display,\n    Serialize,\n    Deserialize,\n    JsonSchema,\n    derive_more::Mul,\n    derive_more::Add,\n    derive_more::Sub,\n    derive_more::AddAssign,\n    derive_more::SubAssign,\n    derive_more::MulAssign,\n    derive_more::Shr,\n    derive_more::Shl,\n    derive_more::BitAnd,\n    derive_more::BitOr,\n    derive_more::BitXor,\n    derive_more::BitAndAssign,\n    derive_more::BitOrAssign,\n    derive_more::BitXorAssign,\n)]\npub struct GLLargeInt(u64);\n\nimpl LargeInt for GLLargeInt {\n    const MAX: Self = Self(u64::MAX);\n    const NUM_BITS: usize = 64;\n\n    fn to_arbitrary_integer(self) -> ibig::UBig {\n        self.0.into()\n    }\n\n    fn num_bits(&self) -> usize {\n        Self::NUM_BITS - self.0.leading_zeros() as usize\n    }\n\n    fn one() -> Self {\n        Self(1)\n    }\n\n    fn is_one(&self) -> bool {\n        self.0 == 1\n    }\n\n    fn try_into_u64(&self) -> Option<u64> {\n        Some(self.0)\n    }\n\n    fn try_into_u32(&self) -> Option<u32> {\n        u32::try_from(self.0).ok()\n    }\n\n    fn from_hex(s: &str) -> Self {\n        Self(u64::from_str_radix(s, 16).unwrap())\n    }\n}\n\nimpl From<u32> for GLLargeInt {\n    fn from(value: u32) -> Self {\n        Self(value as u64)\n    }\n}\n\nimpl From<u64> for GLLargeInt {\n    fn from(value: u64) -> Self {\n        Self(value)\n    }\n}\n\nimpl Zero for GLLargeInt {\n    fn zero() -> Self {\n        Self(0)\n    }\n\n    fn is_zero(&self) -> bool {\n        self.0 == 0\n    }\n}\n\nimpl ConstZero for GLLargeInt {\n    const ZERO: Self = Self(0);\n}\n\nimpl LowerHex for GLLargeInt {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        LowerHex::fmt(&self.0, f)\n    }\n}\n\nimpl Not for GLLargeInt {\n    type Output = Self;\n\n    fn not(self) -> Self::Output {\n        Self(!self.0)\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use crate::traits::int_from_hex_str;\n    use test_log::test;\n\n    use super::*;\n\n    #[test]\n    fn bitwise() {\n        let n = int_from_hex_str::<GoldilocksField>(\"00ff00ff00ff00ff\");\n        let p = int_from_hex_str::<GoldilocksField>(\"000ff00ff00ff00f\");\n        let not_n = int_from_hex_str::<GoldilocksField>(\"ff00ff00ff00ff00\");\n        let n_shr_4 = int_from_hex_str::<GoldilocksField>(\"000ff00ff00ff00f\");\n        let n_shl_4 = int_from_hex_str::<GoldilocksField>(\"0ff00ff00ff00ff0\");\n        let n_or_p = int_from_hex_str::<GoldilocksField>(\"00fff0fff0fff0ff\");\n        let n_and_p = int_from_hex_str::<GoldilocksField>(\"000f000f000f000f\");\n        let n_xor_p = int_from_hex_str::<GoldilocksField>(\"00f0f0f0f0f0f0f0\");\n\n        assert_eq!(n.not().not(), n);\n        assert_eq!(n.not(), not_n);\n        assert_eq!(n >> 4, n_shr_4);\n        assert_eq!(n << 4, n_shl_4);\n        assert_eq!(n & p, n_and_p);\n        assert_eq!(n | p, n_or_p);\n        assert_eq!(n ^ p, n_xor_p);\n    }\n\n    #[test]\n    fn lower_half() {\n        let x = GoldilocksField::from(0);\n        assert!(x.is_in_lower_half());\n        assert!(!(x - 1.into()).is_in_lower_half());\n\n        let y = GoldilocksField::from_str_radix(\"7fffffff80000000\", 16).unwrap();\n        assert!(y.is_in_lower_half());\n        assert!(!(y + 1.into()).is_in_lower_half());\n    }\n\n    #[test]\n    fn from_str_radix_rejects_modulus() {\n        // ORDER = 0xffffffff00000001, should be rejected\n        assert!(GoldilocksField::from_str_radix(\"ffffffff00000001\", 16).is_err());\n    }\n\n    #[test]\n    fn from_str_radix_accepts_order_minus_one() {\n        // ORDER - 1 = 0xffffffff00000000, should be accepted and equal to the literal value\n        let v = GoldilocksField::from_str_radix(\"ffffffff00000000\", 16).unwrap();\n        assert_eq!(v.to_canonical_u64(), 0xffff_ffff_0000_0000);\n    }\n\n    #[test]\n    #[should_panic]\n    fn integer_div_by_zero() {\n        let _ = GoldilocksField::from(1).to_arbitrary_integer()\n            / GoldilocksField::from(0).to_arbitrary_integer();\n    }\n\n    #[test]\n    #[should_panic]\n    fn div_by_zero() {\n        let _ = GoldilocksField::from(1) / GoldilocksField::from(0);\n    }\n}\n"
  },
  {
    "path": "number/src/koala_bear.rs",
    "content": "use p3_koala_bear::KoalaBear;\n\nuse crate::powdr_field_plonky3;\n\npowdr_field_plonky3!(KoalaBearField, KoalaBear);\n\n#[cfg(test)]\nmod test {\n    use crate::traits::int_from_hex_str;\n    use test_log::test;\n\n    use super::*;\n\n    #[test]\n    fn bitwise() {\n        let n = int_from_hex_str::<KoalaBearField>(\"00ff00ff\");\n        let p = int_from_hex_str::<KoalaBearField>(\"f00ff00f\");\n        let not_n = int_from_hex_str::<KoalaBearField>(\"ff00ff00\");\n        let n_shr_4 = int_from_hex_str::<KoalaBearField>(\"000ff00f\");\n        let n_shl_4 = int_from_hex_str::<KoalaBearField>(\"0ff00ff0\");\n        let n_or_p = int_from_hex_str::<KoalaBearField>(\"f0fff0ff\");\n        let n_and_p = int_from_hex_str::<KoalaBearField>(\"000f000f\");\n        let n_xor_p = int_from_hex_str::<KoalaBearField>(\"f0f0f0f0\");\n\n        assert_eq!(n.not().not(), n);\n        assert_eq!(n.not(), not_n);\n        assert_eq!(n >> 4, n_shr_4);\n        assert_eq!(n << 4, n_shl_4);\n        assert_eq!(n & p, n_and_p);\n        assert_eq!(n | p, n_or_p);\n        assert_eq!(n ^ p, n_xor_p);\n    }\n\n    #[test]\n    fn zero_one() {\n        let x = KoalaBearField::ZERO;\n        assert_eq!(x, KoalaBearField::zero());\n        assert_eq!(x.to_canonical_u32(), 0);\n        let y = KoalaBearField::ONE;\n        assert_eq!(y, KoalaBearField::one());\n        assert_eq!(y.to_canonical_u32(), 1);\n        let z = x + y + y;\n        assert_eq!(z.to_canonical_u32(), 2);\n    }\n\n    #[test]\n    fn lower_half() {\n        let x = KoalaBearField::from(0);\n        assert!(x.is_in_lower_half());\n        assert!(!(x - 1.into()).is_in_lower_half());\n\n        let y = KoalaBearField::from_str_radix(\"3f800000\", 16).unwrap();\n        assert!(y.is_in_lower_half());\n        assert!(!(y + 1.into()).is_in_lower_half());\n    }\n\n    #[test]\n    #[should_panic]\n    fn integer_div_by_zero() {\n        let _ = KoalaBearField::from(1).to_arbitrary_integer()\n            / KoalaBearField::from(0).to_arbitrary_integer();\n    }\n\n    #[test]\n    #[should_panic]\n    fn div_by_zero() {\n        let _ = KoalaBearField::from(1) / KoalaBearField::from(0);\n    }\n}\n"
  },
  {
    "path": "number/src/lib.rs",
    "content": "//! Numerical types used across powdr\n\n#[macro_use]\nmod macros;\nmod baby_bear;\nmod bn254;\nmod goldilocks;\nmod koala_bear;\nmod mersenne31;\n#[macro_use]\nmod plonky3_macros;\nmod expression_convertible;\nmod serialize;\nmod traits;\n\npub use serialize::{\n    buffered_write_file, read_polys_csv_file, write_polys_csv_file, CsvRenderMode, ReadWrite,\n};\n\npub use baby_bear::BabyBearField;\npub use bn254::Bn254Field;\npub use expression_convertible::ExpressionConvertible;\npub use goldilocks::GoldilocksField;\npub use koala_bear::KoalaBearField;\npub use mersenne31::Mersenne31Field;\npub use traits::{FieldSize, KnownField};\n\npub use ibig::{IBig as BigInt, UBig as BigUint};\npub use traits::{FieldElement, LargeInt};\n/// An arbitrary precision big integer, to be used as a last recourse\n/// The type of polynomial degrees and indices into columns.\npub type DegreeType = u64;\n\n/// Returns Some(i) if n == 2**i and None otherwise.\npub fn log2_exact(n: BigUint) -> Option<usize> {\n    n.trailing_zeros()\n        .filter(|zeros| n == (BigUint::from(1u32) << zeros))\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use test_log::test;\n\n    #[test]\n    fn log2_exact_function() {\n        assert_eq!(log2_exact(0u32.into()), None);\n        assert_eq!(log2_exact(1u32.into()), Some(0));\n        assert_eq!(log2_exact(2u32.into()), Some(1));\n        assert_eq!(log2_exact(4u32.into()), Some(2));\n        assert_eq!(log2_exact(BigUint::from(1u32) << 300), Some(300));\n        assert_eq!(log2_exact(17u32.into()), None);\n    }\n}\n"
  },
  {
    "path": "number/src/macros.rs",
    "content": "macro_rules! powdr_field {\n    ($name:ident, $ark_type:ty) => {\n        use crate::{\n            traits::{FieldElement, KnownField, LargeInt},\n            BigUint, DegreeType,\n        };\n        use ark_ff::{BigInteger, Field, PrimeField};\n        use num_traits::{ConstOne, ConstZero, One, Zero};\n        use std::fmt;\n        use std::ops::*;\n        use std::str::FromStr;\n\n        #[derive(\n            Clone,\n            Copy,\n            PartialEq,\n            Eq,\n            Debug,\n            Default,\n            PartialOrd,\n            Ord,\n            Hash,\n            Serialize,\n            Deserialize,\n            JsonSchema,\n        )]\n        pub struct $name {\n            #[serde(\n                serialize_with = \"crate::serialize::ark_se\",\n                deserialize_with = \"crate::serialize::ark_de\"\n            )]\n            #[schemars(skip)]\n            value: $ark_type,\n        }\n\n        #[derive(Clone, Copy, PartialEq, Eq, Debug, Default, PartialOrd, Ord, Hash)]\n        pub struct LargeIntImpl {\n            value: <$ark_type as PrimeField>::BigInt,\n        }\n\n        impl fmt::Display for LargeIntImpl {\n            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n                write!(f, \"{}\", self.value)\n            }\n        }\n\n        impl fmt::LowerHex for LargeIntImpl {\n            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n                let val = self.to_arbitrary_integer();\n\n                fmt::LowerHex::fmt(&val, f)\n            }\n        }\n\n        impl LargeIntImpl {\n            const fn new(value: <$ark_type as PrimeField>::BigInt) -> Self {\n                Self { value }\n            }\n        }\n\n        impl From<u32> for LargeIntImpl {\n            fn from(v: u32) -> Self {\n                Self::new(v.into())\n            }\n        }\n\n        impl From<u64> for LargeIntImpl {\n            fn from(v: u64) -> Self {\n                Self::new(v.into())\n            }\n        }\n\n        impl Shl<usize> for LargeIntImpl {\n            type Output = Self;\n\n            fn shl(self, other: usize) -> Self {\n                (BigUint::from_le_bytes(&self.value.to_bytes_le()) << other)\n                    .try_into()\n                    .unwrap()\n            }\n        }\n\n        impl Shr<usize> for LargeIntImpl {\n            type Output = Self;\n\n            fn shr(self, other: usize) -> Self {\n                (BigUint::from_le_bytes(&self.value.to_bytes_le()) >> other)\n                    .try_into()\n                    .unwrap()\n            }\n        }\n\n        impl BitAnd for LargeIntImpl {\n            type Output = Self;\n\n            fn bitand(mut self, other: Self) -> Self {\n                for (x, y) in self\n                    .value\n                    .as_mut()\n                    .iter_mut()\n                    .zip(other.value.as_ref().iter())\n                {\n                    *x &= y;\n                }\n                self\n            }\n        }\n\n        impl BitOr for LargeIntImpl {\n            type Output = Self;\n\n            fn bitor(mut self, other: Self) -> Self {\n                for (x, y) in self\n                    .value\n                    .as_mut()\n                    .iter_mut()\n                    .zip(other.value.as_ref().iter())\n                {\n                    *x |= y;\n                }\n                self\n            }\n        }\n\n        impl BitXor for LargeIntImpl {\n            type Output = Self;\n\n            fn bitxor(mut self, other: Self) -> Self {\n                for (x, y) in self\n                    .value\n                    .as_mut()\n                    .iter_mut()\n                    .zip(other.value.as_ref().iter())\n                {\n                    *x ^= y;\n                }\n                self\n            }\n        }\n\n        impl BitOrAssign for LargeIntImpl {\n            fn bitor_assign(&mut self, other: Self) {\n                for (x, y) in self\n                    .value\n                    .as_mut()\n                    .iter_mut()\n                    .zip(other.value.as_ref().iter())\n                {\n                    *x |= y;\n                }\n            }\n        }\n\n        impl BitAndAssign for LargeIntImpl {\n            fn bitand_assign(&mut self, other: Self) {\n                for (x, y) in self\n                    .value\n                    .as_mut()\n                    .iter_mut()\n                    .zip(other.value.as_ref().iter())\n                {\n                    *x &= y;\n                }\n            }\n        }\n\n        impl Not for LargeIntImpl {\n            type Output = Self;\n\n            fn not(mut self) -> Self::Output {\n                for limb in self.value.as_mut() {\n                    *limb = !*limb;\n                }\n                self\n            }\n        }\n\n        impl AddAssign for LargeIntImpl {\n            fn add_assign(&mut self, other: Self) {\n                self.value.add_with_carry(&other.value);\n            }\n        }\n\n        impl Add for LargeIntImpl {\n            type Output = Self;\n            fn add(mut self, other: Self) -> Self {\n                self.add_assign(other);\n                self\n            }\n        }\n\n        impl SubAssign for LargeIntImpl {\n            fn sub_assign(&mut self, other: Self) {\n                self.value.sub_with_borrow(&other.value);\n            }\n        }\n\n        impl Sub for LargeIntImpl {\n            type Output = Self;\n            fn sub(mut self, other: Self) -> Self {\n                self.sub_assign(other);\n                self\n            }\n        }\n\n        impl Zero for LargeIntImpl {\n            #[inline]\n            fn zero() -> Self {\n                LargeIntImpl::new(<$ark_type as PrimeField>::BigInt::zero())\n            }\n            #[inline]\n            fn is_zero(&self) -> bool {\n                self.value.is_zero()\n            }\n        }\n\n        impl TryFrom<BigUint> for LargeIntImpl {\n            type Error = ();\n\n            fn try_from(n: BigUint) -> Result<Self, ()> {\n                let n = num_bigint::BigUint::from_bytes_le(&n.to_le_bytes());\n                Ok(Self {\n                    value: <$ark_type as PrimeField>::BigInt::try_from(n)?,\n                })\n            }\n        }\n\n        impl LargeInt for LargeIntImpl {\n            const MAX: Self = LargeIntImpl::new(<$ark_type as PrimeField>::BigInt::new(\n                [u64::MAX; <$ark_type as PrimeField>::BigInt::NUM_LIMBS],\n            ));\n            const NUM_BITS: usize = <$ark_type as PrimeField>::BigInt::NUM_LIMBS * 64;\n            #[inline]\n            fn to_arbitrary_integer(self) -> BigUint {\n                BigUint::from_le_bytes(&self.value.to_bytes_le())\n            }\n            fn num_bits(&self) -> usize {\n                self.value.num_bits() as usize\n            }\n            #[inline]\n            fn one() -> Self {\n                LargeIntImpl::new(<$ark_type as PrimeField>::BigInt::one())\n            }\n            #[inline]\n            fn is_one(&self) -> bool {\n                self.value == <$ark_type as PrimeField>::BigInt::one()\n            }\n\n            fn try_into_u64(&self) -> Option<u64> {\n                for v in self.value.0[1..].iter() {\n                    if *v != 0 {\n                        return None;\n                    }\n                }\n                Some(self.value.0[0])\n            }\n\n            fn try_into_u32(&self) -> Option<u32> {\n                let v = self.try_into_u64()?;\n                v.try_into().ok()\n            }\n\n            fn from_hex(s: &str) -> Self {\n                BigUint::from_str_radix(s, 16).unwrap().try_into().unwrap()\n            }\n        }\n\n        impl ConstZero for LargeIntImpl {\n            const ZERO: Self = LargeIntImpl::new(<$ark_type as PrimeField>::BigInt::zero());\n        }\n\n        impl From<BigUint> for $name {\n            fn from(n: BigUint) -> Self {\n                let n = num_bigint::BigUint::from_bytes_le(&n.to_le_bytes());\n                Self { value: n.into() }\n            }\n        }\n\n        impl From<LargeIntImpl> for $name {\n            fn from(n: LargeIntImpl) -> Self {\n                Self {\n                    value: n.value.into(),\n                }\n            }\n        }\n\n        impl From<u32> for $name {\n            fn from(n: u32) -> Self {\n                (<$ark_type>::from(n)).into()\n            }\n        }\n\n        impl From<u64> for $name {\n            fn from(n: u64) -> Self {\n                (<$ark_type>::from(n)).into()\n            }\n        }\n\n        impl From<i32> for $name {\n            fn from(n: i32) -> Self {\n                (<$ark_type>::from(n)).into()\n            }\n        }\n\n        impl From<i64> for $name {\n            fn from(n: i64) -> Self {\n                (<$ark_type>::from(n)).into()\n            }\n        }\n\n        impl From<bool> for $name {\n            fn from(n: bool) -> Self {\n                (<$ark_type>::from(n)).into()\n            }\n        }\n\n        impl FromStr for $name {\n            type Err = String;\n            fn from_str(s: &str) -> Result<Self, Self::Err> {\n                let n = BigUint::from_str(s).map_err(|e| e.to_string())?;\n                let modulus = <$ark_type>::MODULUS.to_bytes_le();\n                if n >= BigUint::from_le_bytes(&modulus) {\n                    Err(format!(\"Decimal number \\\"{s}\\\" too large for field.\"))\n                } else {\n                    Ok(n.into())\n                }\n            }\n        }\n\n        impl fmt::LowerHex for $name {\n            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n                fmt::LowerHex::fmt(&self.to_integer(), f)\n            }\n        }\n\n        impl FieldElement for $name {\n            type Integer = LargeIntImpl;\n            const BITS: u32 = <$ark_type>::MODULUS_BIT_SIZE;\n\n            fn known_field() -> Option<KnownField> {\n                Some(KnownField::$name)\n            }\n\n            fn from_str_radix(s: &str, radix: u32) -> Result<Self, String> {\n                let n = BigUint::from_str_radix(s, radix).map_err(|e| e.to_string())?;\n                let modulus = <$ark_type>::MODULUS.to_bytes_le();\n                if n >= BigUint::from_le_bytes(&modulus) {\n                    Err(format!(\"Hexadecimal number \\\"0x{s}\\\" too large for field.\"))\n                } else {\n                    Ok(n.into())\n                }\n            }\n\n            fn checked_from(value: BigUint) -> Option<Self> {\n                let modulus = <$ark_type>::MODULUS.to_bytes_le();\n                if value < BigUint::from_le_bytes(&modulus) {\n                    Some(value.into())\n                } else {\n                    None\n                }\n            }\n\n            fn to_degree(&self) -> DegreeType {\n                let degree: BigUint = self.to_integer().to_arbitrary_integer();\n                degree.try_into().unwrap()\n            }\n\n            fn to_integer(&self) -> Self::Integer {\n                Self::Integer::new(self.value.into_bigint())\n            }\n\n            fn modulus() -> Self::Integer {\n                Self::Integer::new(<$ark_type>::MODULUS)\n            }\n\n            fn pow(self, exponent: Self::Integer) -> Self {\n                Self {\n                    value: self.value.pow(exponent.value),\n                }\n            }\n\n            fn to_bytes_le(&self) -> Vec<u8> {\n                self.value.into_bigint().to_bytes_le()\n            }\n\n            fn from_bytes_le(bytes: &[u8]) -> Self {\n                assert_eq!(\n                    bytes.len(),\n                    <$ark_type as PrimeField>::BigInt::NUM_LIMBS * 8,\n                    \"wrong number of bytes for field type\"\n                );\n\n                let mut limbs = [0u64; <$ark_type as PrimeField>::BigInt::NUM_LIMBS];\n                for (from, to) in bytes.chunks(8).zip(limbs.iter_mut()) {\n                    *to = u64::from_le_bytes(from.try_into().unwrap());\n                }\n\n                Self {\n                    value: <$ark_type as PrimeField>::BigInt::new(limbs).into(),\n                }\n            }\n\n            fn is_in_lower_half(&self) -> bool {\n                self.to_integer().value <= <$ark_type>::MODULUS_MINUS_ONE_DIV_TWO\n            }\n\n            fn has_direct_repr() -> bool {\n                false\n            }\n        }\n\n        impl From<$ark_type> for $name {\n            #[inline]\n            fn from(value: $ark_type) -> Self {\n                Self { value }\n            }\n        }\n\n        // Add\n\n        impl std::ops::Add for $name {\n            type Output = $name;\n\n            #[inline]\n            fn add(self, rhs: Self) -> Self::Output {\n                (self.value + rhs.value).into()\n            }\n        }\n\n        impl AddAssign for $name {\n            fn add_assign(&mut self, rhs: Self) {\n                self.value.add_assign(rhs.value);\n            }\n        }\n\n        // Sub\n\n        impl std::ops::Sub for $name {\n            type Output = $name;\n\n            fn sub(self, rhs: Self) -> Self::Output {\n                (self.value - rhs.value).into()\n            }\n        }\n\n        impl SubAssign for $name {\n            fn sub_assign(&mut self, rhs: Self) {\n                self.value.sub_assign(rhs.value);\n            }\n        }\n\n        // Mul\n\n        impl std::ops::Mul for $name {\n            type Output = $name;\n\n            fn mul(self, rhs: Self) -> Self::Output {\n                (self.value * rhs.value).into()\n            }\n        }\n\n        impl std::ops::MulAssign for $name {\n            fn mul_assign(&mut self, rhs: Self) {\n                self.value.mul_assign(rhs.value);\n            }\n        }\n\n        // Div\n\n        impl std::ops::Div for $name {\n            type Output = $name;\n\n            fn div(self, rhs: Self) -> Self::Output {\n                (self.value / rhs.value).into()\n            }\n        }\n\n        impl std::ops::Neg for $name {\n            type Output = $name;\n\n            #[inline]\n            fn neg(self) -> Self::Output {\n                (-self.value).into()\n            }\n        }\n\n        impl Zero for $name {\n            #[inline]\n            fn zero() -> Self {\n                <$ark_type>::ZERO.into()\n            }\n            #[inline]\n            fn is_zero(&self) -> bool {\n                self.value == <$ark_type>::ZERO\n            }\n        }\n\n        impl ConstZero for $name {\n            const ZERO: Self = Self {\n                value: <$ark_type>::ZERO,\n            };\n        }\n\n        impl One for $name {\n            #[inline]\n            fn one() -> Self {\n                <$ark_type>::ONE.into()\n            }\n            #[inline]\n            fn is_one(&self) -> bool {\n                self.value == <$ark_type>::ONE\n            }\n        }\n\n        impl ConstOne for $name {\n            const ONE: Self = Self {\n                value: <$ark_type>::ONE,\n            };\n        }\n\n        impl fmt::Display for $name {\n            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n                let value = self.to_integer().value;\n                write!(f, \"{value}\")\n            }\n        }\n    };\n}\n"
  },
  {
    "path": "number/src/mersenne31.rs",
    "content": "use p3_mersenne_31::Mersenne31;\n\nuse crate::powdr_field_plonky3;\n\npowdr_field_plonky3!(Mersenne31Field, Mersenne31);\n\n#[cfg(test)]\nmod test {\n    use crate::traits::int_from_hex_str;\n    use test_log::test;\n\n    use super::*;\n\n    #[test]\n    fn bitwise() {\n        let n = int_from_hex_str::<Mersenne31Field>(\"00ff00ff\");\n        let p = int_from_hex_str::<Mersenne31Field>(\"f00ff00f\");\n        let not_n = int_from_hex_str::<Mersenne31Field>(\"ff00ff00\");\n        let n_shr_4 = int_from_hex_str::<Mersenne31Field>(\"000ff00f\");\n        let n_shl_4 = int_from_hex_str::<Mersenne31Field>(\"0ff00ff0\");\n        let n_or_p = int_from_hex_str::<Mersenne31Field>(\"f0fff0ff\");\n        let n_and_p = int_from_hex_str::<Mersenne31Field>(\"000f000f\");\n        let n_xor_p = int_from_hex_str::<Mersenne31Field>(\"f0f0f0f0\");\n\n        assert_eq!(n.not().not(), n);\n        assert_eq!(n.not(), not_n);\n        assert_eq!(n >> 4, n_shr_4);\n        assert_eq!(n << 4, n_shl_4);\n        assert_eq!(n & p, n_and_p);\n        assert_eq!(n | p, n_or_p);\n        assert_eq!(n ^ p, n_xor_p);\n    }\n\n    #[test]\n    fn zero_one() {\n        let x = Mersenne31Field::ZERO;\n        assert_eq!(x, Mersenne31Field::zero());\n        assert_eq!(x.to_canonical_u32(), 0);\n        let y = Mersenne31Field::ONE;\n        assert_eq!(y, Mersenne31Field::one());\n        assert_eq!(y.to_canonical_u32(), 1);\n        let z = x + y + y;\n        assert_eq!(z.to_canonical_u32(), 2);\n    }\n\n    #[test]\n    fn lower_half() {\n        let x = Mersenne31Field::from(0);\n        assert!(x.is_in_lower_half());\n        assert!(!(x - 1.into()).is_in_lower_half());\n\n        let y = Mersenne31Field::from_str_radix(\"3fffffff\", 16).unwrap();\n        assert!(y.is_in_lower_half());\n        assert!(!(y + 1.into()).is_in_lower_half());\n    }\n\n    #[test]\n    #[should_panic]\n    fn integer_div_by_zero() {\n        let _ = Mersenne31Field::from(1).to_arbitrary_integer()\n            / Mersenne31Field::from(0).to_arbitrary_integer();\n    }\n\n    #[test]\n    #[should_panic]\n    fn div_by_zero() {\n        let _ = Mersenne31Field::from(1) / Mersenne31Field::from(0);\n    }\n}\n"
  },
  {
    "path": "number/src/plonky3_macros.rs",
    "content": "#[macro_export]\nmacro_rules! powdr_field_plonky3 {\n    ($name:ident, $p3_type:ty) => {\n        use schemars::{\n            schema::{Schema, SchemaObject},\n            JsonSchema,\n        };\n        use serde::{Deserialize, Serialize};\n\n        use num_traits::{ConstOne, ConstZero};\n        use std::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Not, Sub, SubAssign};\n        use std::str::FromStr;\n        use std::{collections::BTreeSet, fmt::LowerHex};\n\n        use ark_ff::{One, Zero};\n        use $crate::{BigUint, FieldElement, KnownField, LargeInt};\n\n        use core::fmt::{self, Debug, Formatter};\n        use core::hash::Hash;\n\n        use p3_field::{AbstractField, Field, PrimeField32};\n\n        #[derive(\n            Debug,\n            Copy,\n            Clone,\n            Default,\n            Eq,\n            Hash,\n            PartialEq,\n            Ord,\n            PartialOrd,\n            Serialize,\n            Deserialize,\n            derive_more::Display,\n        )]\n        pub struct $name($p3_type);\n\n        impl $name {\n            #[inline(always)]\n            fn from_canonical_u32(n: u32) -> Self {\n                Self(<$p3_type>::from_canonical_u32(n))\n            }\n\n            #[inline]\n            fn to_canonical_u32(self) -> u32 {\n                self.0.as_canonical_u32()\n            }\n\n            pub fn into_inner(self) -> $p3_type {\n                self.0\n            }\n\n            pub fn from_inner(e: $p3_type) -> Self {\n                Self(e)\n            }\n        }\n\n        impl FieldElement for $name {\n            type Integer = BBLargeInt;\n\n            const BITS: u32 = 31;\n\n            fn to_degree(&self) -> $crate::DegreeType {\n                self.to_canonical_u32() as u64\n            }\n\n            fn to_integer(&self) -> Self::Integer {\n                self.to_canonical_u32().into()\n            }\n\n            #[inline]\n            fn modulus() -> Self::Integer {\n                let p: u32 = <$p3_type>::order().try_into().unwrap();\n                p.into()\n            }\n\n            fn pow(self, exp: Self::Integer) -> Self {\n                Self(<$p3_type>::exp_u64_generic(\n                    self.0,\n                    exp.try_into_u64().unwrap(),\n                ))\n            }\n\n            fn to_bytes_le(&self) -> Vec<u8> {\n                self.to_canonical_u32().to_le_bytes().to_vec()\n            }\n\n            fn from_bytes_le(bytes: &[u8]) -> Self {\n                let u = u32::from_le_bytes(bytes.try_into().unwrap());\n                Self::from_canonical_u32(u)\n            }\n\n            fn from_str_radix(s: &str, radix: u32) -> Result<Self, String> {\n                u32::from_str_radix(s, radix)\n                    .map(Self::from_canonical_u32)\n                    .map_err(|e| e.to_string())\n            }\n\n            fn checked_from(value: ibig::UBig) -> Option<Self> {\n                if value < Self::modulus().to_arbitrary_integer() {\n                    Some(u32::try_from(value).unwrap().into())\n                } else {\n                    None\n                }\n            }\n\n            fn is_in_lower_half(&self) -> bool {\n                let p: u32 = <$p3_type>::order().try_into().unwrap();\n                self.to_canonical_u32() <= (p - 1) / 2\n            }\n\n            fn known_field() -> Option<$crate::KnownField> {\n                Some(KnownField::$name)\n            }\n\n            fn has_direct_repr() -> bool {\n                // No direct repr, because 'mod' is not always applied.\n                false\n            }\n        }\n\n        impl LowerHex for $name {\n            fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n                LowerHex::fmt(&self.to_canonical_u32(), f)\n            }\n        }\n\n        impl From<bool> for $name {\n            fn from(b: bool) -> Self {\n                Self(<$p3_type>::from_bool(b))\n            }\n        }\n\n        impl From<i64> for $name {\n            fn from(n: i64) -> Self {\n                Self::from(if n < 0 {\n                    // If n < 0, then this is guaranteed to overflow since\n                    // both arguments have their high bit set, so the result\n                    // is in the canonical range.\n                    Self::modulus()\n                        .try_into_u64()\n                        .unwrap()\n                        .wrapping_add(n as u64)\n                } else {\n                    n as u64\n                })\n            }\n        }\n\n        impl From<i32> for $name {\n            fn from(n: i32) -> Self {\n                From::<i64>::from(n as i64)\n            }\n        }\n\n        impl From<u32> for $name {\n            fn from(n: u32) -> Self {\n                Self(<$p3_type>::from_wrapped_u32(n))\n            }\n        }\n\n        impl From<u64> for $name {\n            #[inline]\n            fn from(n: u64) -> Self {\n                Self(<$p3_type>::from_wrapped_u64(n))\n            }\n        }\n\n        impl From<$crate::BigUint> for $name {\n            fn from(n: $crate::BigUint) -> Self {\n                u64::try_from(n).unwrap().into()\n            }\n        }\n\n        impl From<BBLargeInt> for $name {\n            #[inline]\n            fn from(n: BBLargeInt) -> Self {\n                n.0.into()\n            }\n        }\n\n        impl ConstZero for $name {\n            const ZERO: Self = $name(<$p3_type>::new(0));\n        }\n\n        impl Zero for $name {\n            fn zero() -> Self {\n                Self(<$p3_type>::zero())\n            }\n\n            fn is_zero(&self) -> bool {\n                self.0.is_zero()\n            }\n        }\n\n        impl ConstOne for $name {\n            const ONE: Self = $name(<$p3_type>::new(1));\n        }\n\n        impl One for $name {\n            fn one() -> Self {\n                Self(<$p3_type>::one())\n            }\n\n            fn is_one(&self) -> bool {\n                self.to_canonical_u32() == 1\n            }\n        }\n\n        impl FromStr for $name {\n            type Err = String;\n            fn from_str(s: &str) -> Result<Self, Self::Err> {\n                let n = BigUint::from_str(s).map_err(|e| e.to_string())?;\n                let modulus = Self::modulus();\n                if n >= modulus.to_arbitrary_integer() {\n                    Err(format!(\"Decimal number \\\"{s}\\\" too large for field.\"))\n                } else {\n                    Ok(n.into())\n                }\n            }\n        }\n\n        impl Neg for $name {\n            type Output = Self;\n\n            #[inline]\n            fn neg(self) -> Self {\n                Self(self.0.neg())\n            }\n        }\n\n        impl Add for $name {\n            type Output = Self;\n\n            #[inline]\n            fn add(self, rhs: Self) -> Self {\n                Self(self.0.add(rhs.0))\n            }\n        }\n\n        impl AddAssign for $name {\n            #[inline]\n            fn add_assign(&mut self, rhs: Self) {\n                self.0.add_assign(rhs.0)\n            }\n        }\n\n        impl Sub for $name {\n            type Output = Self;\n\n            #[inline]\n            fn sub(self, rhs: Self) -> Self {\n                Self(self.0.sub(rhs.0))\n            }\n        }\n\n        impl SubAssign for $name {\n            #[inline]\n            fn sub_assign(&mut self, rhs: Self) {\n                self.0.sub_assign(rhs.0)\n            }\n        }\n\n        impl Mul for $name {\n            type Output = Self;\n\n            fn mul(self, rhs: Self) -> Self {\n                Self(self.0.mul(rhs.0))\n            }\n        }\n\n        impl MulAssign for $name {\n            fn mul_assign(&mut self, rhs: Self) {\n                self.0.mul_assign(rhs.0)\n            }\n        }\n\n        impl Div for $name {\n            type Output = Self;\n\n            fn div(self, rhs: Self) -> Self::Output {\n                Self(self.0.div(rhs.0))\n            }\n        }\n\n        impl JsonSchema for $name {\n            fn schema_name() -> String {\n                \"$name\".to_string()\n            }\n\n            fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> Schema {\n                // Since $p3_type is just a wrapper around u32, use the schema for u32\n                let u32_schema = gen.subschema_for::<u32>();\n\n                SchemaObject {\n                    // Define the schema for $name, where field is of type $p3_type (which is u32)\n                    instance_type: Some(schemars::schema::InstanceType::Object.into()),\n                    object: Some(Box::new(schemars::schema::ObjectValidation {\n                        properties: vec![(\"field\".to_string(), u32_schema)]\n                            .into_iter()\n                            .collect(),\n                        required: BTreeSet::from([\"field\".to_string()]), // Convert Vec to BTreeSet\n                        ..Default::default()\n                    })),\n                    ..Default::default()\n                }\n                .into()\n            }\n        }\n\n        #[derive(\n            Clone,\n            Copy,\n            PartialEq,\n            Eq,\n            Debug,\n            Default,\n            PartialOrd,\n            Ord,\n            Hash,\n            derive_more::Display,\n            Serialize,\n            Deserialize,\n            JsonSchema,\n            derive_more::Mul,\n            derive_more::Add,\n            derive_more::Sub,\n            derive_more::AddAssign,\n            derive_more::SubAssign,\n            derive_more::MulAssign,\n            derive_more::Shr,\n            derive_more::Shl,\n            derive_more::BitAnd,\n            derive_more::BitOr,\n            derive_more::BitXor,\n            derive_more::BitAndAssign,\n            derive_more::BitOrAssign,\n            derive_more::BitXorAssign,\n        )]\n        pub struct BBLargeInt(u32);\n\n        impl LargeInt for BBLargeInt {\n            const MAX: Self = Self(u32::MAX);\n            const NUM_BITS: usize = 32;\n\n            fn to_arbitrary_integer(self) -> ibig::UBig {\n                self.0.into()\n            }\n\n            fn num_bits(&self) -> usize {\n                Self::NUM_BITS - self.0.leading_zeros() as usize\n            }\n\n            fn one() -> Self {\n                Self(1)\n            }\n\n            fn is_one(&self) -> bool {\n                self.0 == 1\n            }\n\n            fn try_into_u64(&self) -> Option<u64> {\n                Some(self.0 as u64)\n            }\n\n            fn try_into_u32(&self) -> Option<u32> {\n                Some(self.0)\n            }\n\n            fn from_hex(s: &str) -> Self {\n                Self(u32::from_str_radix(s, 16).unwrap())\n            }\n        }\n\n        impl From<u32> for BBLargeInt {\n            fn from(value: u32) -> Self {\n                Self(value)\n            }\n        }\n\n        impl From<u64> for BBLargeInt {\n            fn from(value: u64) -> Self {\n                Self(value as u32)\n            }\n        }\n\n        impl Zero for BBLargeInt {\n            fn zero() -> Self {\n                Self(0)\n            }\n\n            fn is_zero(&self) -> bool {\n                self.0 == 0\n            }\n        }\n\n        impl ConstZero for BBLargeInt {\n            const ZERO: Self = Self(0);\n        }\n\n        impl LowerHex for BBLargeInt {\n            fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n                LowerHex::fmt(&self.0, f)\n            }\n        }\n\n        impl Not for BBLargeInt {\n            type Output = Self;\n\n            fn not(self) -> Self::Output {\n                Self(!self.0)\n            }\n        }\n    };\n}\n"
  },
  {
    "path": "number/src/serialize.rs",
    "content": "use std::{\n    fs::File,\n    io::{self, BufWriter, Read, Write},\n    path::Path,\n};\n\nuse ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate};\nuse csv::{Reader, Writer};\nuse serde::{de::DeserializeOwned, Serialize};\nuse serde_with::{DeserializeAs, SerializeAs};\n\nuse crate::FieldElement;\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]\npub enum CsvRenderMode {\n    SignedBase10,\n    UnsignedBase10,\n    #[default]\n    Hex,\n}\n\nconst ROW_NAME: &str = \"Row\";\n\npub fn write_polys_csv_file<T: FieldElement>(\n    file: impl Write,\n    render_mode: CsvRenderMode,\n    polys: &[(&String, &[T])],\n) {\n    let mut writer = Writer::from_writer(file);\n\n    // Write headers, adding a \"Row\" column\n    let mut headers = vec![ROW_NAME];\n    headers.extend(polys.iter().map(|(name, _)| {\n        assert!(*name != ROW_NAME);\n        name.as_str()\n    }));\n    writer.write_record(&headers).unwrap();\n\n    let max_len = polys.iter().map(|p| p.1.len()).max().unwrap();\n    for row_index in 0..max_len {\n        let mut row = Vec::new();\n        row.push(format!(\"{row_index}\"));\n        for (_, values) in polys {\n            let value = values\n                .get(row_index)\n                .map(|v| match render_mode {\n                    CsvRenderMode::SignedBase10 => format!(\"{v}\"),\n                    CsvRenderMode::UnsignedBase10 => format!(\"{}\", v.to_integer()),\n                    CsvRenderMode::Hex => format!(\"0x{:x}\", v.to_integer()),\n                })\n                .unwrap_or_default();\n            row.push(value);\n        }\n        writer.write_record(&row).unwrap();\n    }\n\n    writer.flush().unwrap();\n}\n\npub fn read_polys_csv_file<T: FieldElement>(file: impl Read) -> Vec<(String, Vec<T>)> {\n    let mut reader = Reader::from_reader(file);\n    let headers = reader.headers().unwrap();\n\n    let mut polys = headers\n        .iter()\n        .map(|name| (name.to_string(), Vec::new()))\n        .collect::<Vec<_>>();\n\n    for result in reader.records() {\n        let record = result.unwrap();\n        for (idx, value) in record.iter().enumerate() {\n            // shorter polys/columns end in empty cells\n            if value.trim().is_empty() {\n                continue;\n            }\n            let value = if let Some(value) = value.strip_prefix(\"0x\") {\n                T::from_str_radix(value, 16).unwrap()\n            } else if let Some(value) = value.strip_prefix('-') {\n                -T::from_str(value).unwrap()\n            } else {\n                T::from_str(value).unwrap()\n            };\n            polys[idx].1.push(value);\n        }\n    }\n\n    // Remove \"Row\" column, which was added by write_polys_csv_file()\n    polys\n        .into_iter()\n        .filter(|(name, _)| name != ROW_NAME)\n        .collect()\n}\n\npub fn buffered_write_file<R>(\n    path: &Path,\n    do_write: impl FnOnce(&mut BufWriter<File>) -> R,\n) -> Result<R, io::Error> {\n    let mut writer = BufWriter::new(File::create(path)?);\n    let result = do_write(&mut writer);\n    writer.flush()?;\n\n    Ok(result)\n}\n\npub trait ReadWrite {\n    fn read(file: &mut impl Read) -> Self;\n    fn write(&self, path: &Path) -> Result<(), serde_cbor::Error>;\n}\n\nimpl<T: DeserializeOwned + Serialize> ReadWrite for T {\n    fn read(file: &mut impl Read) -> Self {\n        serde_cbor::from_reader(file).unwrap()\n    }\n    fn write(&self, path: &Path) -> Result<(), serde_cbor::Error> {\n        buffered_write_file(path, |writer| serde_cbor::to_writer(writer, &self))??;\n        Ok(())\n    }\n}\n\n// Serde wrappers for serialize/deserialize\n\npub fn ark_se<S, A: CanonicalSerialize>(a: &A, s: S) -> Result<S::Ok, S::Error>\nwhere\n    S: serde::Serializer,\n{\n    let mut bytes = vec![];\n    a.serialize_with_mode(&mut bytes, Compress::Yes)\n        .map_err(serde::ser::Error::custom)?;\n    serde_with::Bytes::serialize_as(&bytes, s)\n}\n\npub fn ark_de<'de, D, A: CanonicalDeserialize>(data: D) -> Result<A, D::Error>\nwhere\n    D: serde::de::Deserializer<'de>,\n{\n    let s: Vec<u8> = serde_with::Bytes::deserialize_as(data)?;\n    let a = A::deserialize_with_mode(s.as_slice(), Compress::Yes, Validate::Yes);\n    a.map_err(serde::de::Error::custom)\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::Bn254Field;\n    use std::io::Cursor;\n\n    use super::*;\n    use test_log::test;\n\n    fn test_polys() -> Vec<(String, Vec<Bn254Field>)> {\n        vec![\n            (\"a\".to_string(), (0..16).map(Bn254Field::from).collect()),\n            (\"b\".to_string(), (-16..0).map(Bn254Field::from).collect()),\n        ]\n    }\n\n    #[test]\n    fn write_read() {\n        let mut buf: Vec<u8> = vec![];\n\n        let polys = test_polys();\n\n        serde_cbor::to_writer(&mut buf, &polys).unwrap();\n        let read_polys: Vec<(String, Vec<Bn254Field>)> = ReadWrite::read(&mut Cursor::new(buf));\n\n        assert_eq!(read_polys, polys);\n    }\n\n    #[test]\n    fn write_read_csv() {\n        let polys = test_polys()\n            .into_iter()\n            .map(|(name, values)| (name.to_string(), values))\n            .collect::<Vec<_>>();\n        let polys_ref = polys\n            .iter()\n            .map(|(name, values)| (name, values.as_slice()))\n            .collect::<Vec<_>>();\n\n        for render_mode in &[\n            CsvRenderMode::SignedBase10,\n            CsvRenderMode::UnsignedBase10,\n            CsvRenderMode::Hex,\n        ] {\n            let mut buf: Vec<u8> = vec![];\n            write_polys_csv_file(&mut buf, *render_mode, &polys_ref);\n            let read_polys = read_polys_csv_file::<Bn254Field>(&mut Cursor::new(buf));\n\n            assert_eq!(read_polys, polys);\n        }\n    }\n}\n"
  },
  {
    "path": "number/src/traits.rs",
    "content": "use std::{\n    fmt::{self, Display},\n    hash::Hash,\n    ops::*,\n    str::FromStr,\n};\n\nuse ibig::IBig;\nuse num_traits::{ConstOne, ConstZero, One, Zero};\nuse schemars::JsonSchema;\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\n\nuse crate::{BigUint, DegreeType};\n\n/// A fixed-width integer type\npub trait LargeInt:\n    Copy\n    + Send\n    + Sync\n    + PartialEq\n    + Eq\n    + PartialOrd\n    + Ord\n    + Hash\n    + From<u64>\n    + BitAnd<Output = Self>\n    + BitOr<Output = Self>\n    + BitOrAssign\n    + BitAndAssign\n    + AddAssign\n    + Add<Output = Self>\n    + SubAssign\n    + Sub<Output = Self>\n    + fmt::Display\n    + fmt::Debug\n    + Copy\n    + Not<Output = Self>\n    + Shl<usize, Output = Self>\n    + Shr<usize, Output = Self>\n    + BitXor<Output = Self>\n    + Zero\n    + ConstZero\n    + fmt::LowerHex\n{\n    /// The largest value of this type, i.e. 2**NUM_BITS - 1\n    const MAX: Self;\n    /// Number of bits of this base type. Not to be confused with the number of bits\n    /// of the field elements!\n    const NUM_BITS: usize;\n    fn to_arbitrary_integer(self) -> BigUint;\n    /// Number of bits required to encode this particular number.\n    fn num_bits(&self) -> usize;\n\n    /// Returns the constant one.\n    /// We are not implementing num_traits::One because it also requires multiplication.\n    fn one() -> Self;\n\n    /// Checks if the number is one.\n    fn is_one(&self) -> bool;\n\n    /// Tries to convert to u64.\n    ///\n    /// Returns None if value is out of u64 range.\n    fn try_into_u64(&self) -> Option<u64>;\n\n    /// Tries to convert to u32.\n    ///\n    /// Returns None if value is out of u32 range.\n    fn try_into_u32(&self) -> Option<u32>;\n\n    /// Creates a LargeInt from a hex string.\n    /// Panics on failure - intended for testing.\n    fn from_hex(s: &str) -> Self;\n}\n\npub enum FieldSize {\n    /// Fields that fit a 29-Bit number, but not much more.\n    Small,\n    /// Fields that at least fit a product of two 32-Bit numbers\n    /// (Goldilocks and larger)\n    Large,\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]\npub enum KnownField {\n    BabyBearField,\n    KoalaBearField,\n    Mersenne31Field,\n    GoldilocksField,\n    Bn254Field,\n}\n\nimpl KnownField {\n    pub fn field_size(&self) -> FieldSize {\n        match self {\n            KnownField::BabyBearField\n            | KnownField::KoalaBearField\n            | KnownField::Mersenne31Field => FieldSize::Small,\n            KnownField::GoldilocksField | KnownField::Bn254Field => FieldSize::Large,\n        }\n    }\n}\n\nimpl Display for KnownField {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            KnownField::BabyBearField => write!(f, \"BabyBear\"),\n            KnownField::KoalaBearField => write!(f, \"KoalaBear\"),\n            KnownField::Mersenne31Field => write!(f, \"Mersenne31\"),\n            KnownField::GoldilocksField => write!(f, \"Goldilocks\"),\n            KnownField::Bn254Field => write!(f, \"Bn254\"),\n        }\n    }\n}\n\n/// A field element\npub trait FieldElement:\n    'static\n    + Sync\n    + Send\n    + Default\n    + Copy\n    + PartialEq\n    + Eq\n    + Send\n    + Sync\n    + PartialOrd\n    + Ord\n    + Hash\n    + Add<Output = Self>\n    + AddAssign\n    + Sub<Output = Self>\n    + SubAssign\n    + Mul<Output = Self>\n    + MulAssign\n    + Div<Output = Self>\n    + Neg<Output = Self>\n    + Zero\n    + ConstZero\n    + ConstOne\n    + One\n    + fmt::Display\n    + fmt::Debug\n    + From<Self::Integer>\n    + From<crate::BigUint>\n    + FromStr<Err = String>\n    + From<u32>\n    + From<u64>\n    + From<i32>\n    + From<i64>\n    + From<bool>\n    + fmt::LowerHex\n    + Serialize\n    + DeserializeOwned\n    + JsonSchema\n{\n    /// The underlying fixed-width integer type\n    type Integer: LargeInt;\n    /// Number of bits required to represent elements of this field.\n    const BITS: u32;\n\n    fn to_degree(&self) -> DegreeType;\n\n    fn to_integer(&self) -> Self::Integer;\n\n    fn to_arbitrary_integer(&self) -> BigUint {\n        self.to_integer().to_arbitrary_integer()\n    }\n\n    fn modulus() -> Self::Integer;\n\n    fn pow(self, exponent: Self::Integer) -> Self;\n\n    fn to_bytes_le(&self) -> Vec<u8>;\n\n    fn from_bytes_le(bytes: &[u8]) -> Self;\n\n    fn from_str_radix(s: &str, radix: u32) -> Result<Self, String>;\n\n    /// Only converts the value to a field element if it is less than the modulus.\n    fn checked_from(value: BigUint) -> Option<Self>;\n\n    /// Returns true if the value is in the \"lower half\" of the field,\n    /// i.e. the value <= (modulus() - 1) / 2\n    fn is_in_lower_half(&self) -> bool;\n\n    /// If the field is a known field (as listed in the `KnownField` enum), returns the field variant.\n    fn known_field() -> Option<KnownField>;\n\n    /// Converts to a signed integer.\n    ///\n    /// Negative values are in relation to 0 in the field.\n    /// Values up to the modulus / 2 are positive, values above are negative.\n    fn to_signed_integer(&self) -> IBig {\n        if self.is_in_lower_half() {\n            self.to_arbitrary_integer().into()\n        } else {\n            IBig::from(self.to_arbitrary_integer())\n                - IBig::from(Self::modulus().to_arbitrary_integer())\n        }\n    }\n\n    /// Returns `true` if values of this type are directly stored as their integer\n    /// value, i.e\n    /// - montgomery representation is not used\n    /// - values are always canonical (i.e. smaller than the modulus)\n    /// - there are no additional fields and\n    /// - `repr(transparent)` is used.\n    ///\n    /// In other words, the `to_integer` function can be implemented as\n    /// a mem::transmute operation on pointers.\n    fn has_direct_repr() -> bool;\n}\n\n#[cfg(test)]\npub fn int_from_hex_str<T: FieldElement>(s: &str) -> T::Integer {\n    T::Integer::from_hex(s)\n}\n"
  },
  {
    "path": "openvm/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[features]\ndefault = []\naot = [\"openvm-sdk/aot\"]\ntco = [\"openvm-sdk/tco\"]\nmetrics = [\"openvm-sdk/metrics\", \"openvm-stark-backend/metrics\", \"openvm-stark-sdk/metrics\"]\ncuda = [\"openvm-sdk/cuda\", \"openvm-circuit-primitives/cuda\", \"dep:openvm-cuda-backend\", \"dep:openvm-cuda-common\", \"dep:openvm-cuda-builder\"]\ntest-utils = [\"dep:pretty_assertions\"]\n\n[dependencies]\nitertools.workspace = true\nopenvm-circuit.workspace = true\nopenvm-circuit-primitives.workspace = true\nopenvm-instructions.workspace = true\nopenvm-stark-backend.workspace = true\nopenvm-stark-sdk.workspace = true\npowdr-autoprecompiles.workspace = true\npowdr-expression.workspace = true\npowdr-openvm-bus-interaction-handler.workspace = true\nopenvm-circuit-derive.workspace = true\nopenvm-circuit-primitives-derive.workspace = true\nopenvm-sdk.workspace = true\nopenvm-cuda-backend = { workspace = true, optional = true }\nopenvm-cuda-common = { workspace = true, optional = true }\nopenvm-transpiler.workspace = true\nopenvm-native-circuit.workspace = true\nserde.workspace = true\npowdr-number.workspace = true\nderive_more.workspace = true\npowdr-constraint-solver.workspace = true\ntracing.workspace = true\nrustc-demangle = \"0.1.25\"\nmetrics.workspace = true\nindicatif = \"0.18.3\"\ncfg-if = \"1.0.4\"\npowdr-riscv-elf.workspace = true\npretty_assertions = { workspace = true, optional = true }\n\n[build-dependencies]\nopenvm-cuda-builder = { workspace = true, optional = true }\n"
  },
  {
    "path": "openvm/build.rs",
    "content": "#[cfg(feature = \"cuda\")]\nuse openvm_cuda_builder::{cuda_available, CudaBuilder};\n\nfn main() {\n    #[cfg(feature = \"cuda\")]\n    {\n        if !cuda_available() {\n            return; // Skip CUDA compilation\n        }\n\n        let builder: CudaBuilder = CudaBuilder::new()\n            .include_from_dep(\"DEP_CIRCUIT_PRIMITIVES_CUDA_INCLUDE\") // Point to header file folder of crate with path `DEP_CIRCUIT_PRIMITIVES_CUDA_INCLUDE`\n            .include_from_dep(\"DEP_CUDA_COMMON_INCLUDE\") // The only dependency of our dependency `DEP_CIRCUIT_PRIMITIVES_CUDA_INCLUDE`\n            .include(\"cuda/include\") // Point to header file folder of this crate\n            .watch(\"cuda\") // Watch file changes of this crate for recompilation\n            .library_name(\"powdr_gpu\") // Library name of this crate; doesn't affect import name\n            .files_from_glob(\"cuda/src/**/*.cu\"); // Import all `.cu` files with zero or more nested sub-folders under `cuda/src`of this crate\n\n        builder.emit_link_directives();\n        builder.build();\n    }\n}\n"
  },
  {
    "path": "openvm/cuda/src/apc_apply_bus.cu",
    "content": "#include <stdint.h>\n#include <assert.h>\n#include <stdio.h>\n#include \"primitives/buffer_view.cuh\"\n#include \"primitives/constants.h\"\n#include \"primitives/trace_access.h\"\n#include \"primitives/histogram.cuh\"\n#include \"expr_eval.cuh\"\n\nextern \"C\" {\n  typedef struct {\n    uint32_t bus_id; // Bus id this interaction targets (matches periphery chip bus id)\n    uint32_t num_args; // Number of argument expressions for this interaction\n    uint32_t args_index_off; // Starting index into the `ExprSpan` array for this interaction's args. Layout: [mult, arg0, arg1, ...]\n  } DevInteraction;\n}\n\n// Fixed number of bits for bitwise lookup\nstatic constexpr uint32_t BITWISE_NUM_BITS = 8u;\n\n// Applies bus interactions to periphery histograms for a batch of APC rows\n__global__ void apc_apply_bus_kernel(\n  // APC related\n  const Fp* __restrict__ d_output, // APC trace (column-major)\n  int num_apc_calls, // number of APC calls (rows)\n\n  // Interaction related\n  const uint32_t* __restrict__ d_bytecode, // bytecode for stack-machine expressions\n  size_t bc_len, // bytecode length (u32 words)\n  const DevInteraction* __restrict__ d_interactions, // interactions array\n  size_t n_interactions, // number of interactions\n  const ExprSpan* __restrict__ d_arg_spans, // argument spans array\n  size_t n_arg_spans, // number of arg spans\n\n  // Variable range checker related\n  uint32_t var_range_bus_id, // variable range checker bus id\n  uint32_t* __restrict__ d_var_hist, // variable range histogram buffer\n  size_t var_num_bins, // variable range histogram bin count\n\n  // Tuple range checker related\n  uint32_t tuple2_bus_id, // 2-tuple range checker bus id\n  uint32_t* __restrict__ d_tuple2_hist, // tuple2 histogram buffer\n  uint32_t tuple2_sz0, // tuple2 size dim0\n  uint32_t tuple2_sz1, // tuple2 size dim1\n\n  // Bitwise related\n  uint32_t bitwise_bus_id, // bitwise lookup bus id\n  uint32_t* __restrict__ d_bitwise_hist // bitwise lookup histogram buffer\n) {\n  // The warp this thread belongs to, as a CUDA warp is 32 threads\n  const int warp = (threadIdx.x >> 5);\n  // The thread's position within this wrap\n  const int lane = (threadIdx.x & 31);\n  // The number of warps in a block\n  const int warps_per_block = (blockDim.x >> 5);\n\n  // Each bus interaction is processed by one warp\n  for (int i = blockIdx.x * warps_per_block + warp; i < (int)n_interactions; i += gridDim.x * warps_per_block) {\n    DevInteraction intr = d_interactions[i];\n\n    // Each row is processed by one lane\n    for (int r = lane; r < num_apc_calls; r += 32) {\n      // multiplicity is stored as the first ExprSpan for this interaction\n      ExprSpan mult_span = d_arg_spans[intr.args_index_off + 0];\n      Fp mult = eval_arg(mult_span, d_bytecode, d_output, (size_t)r);\n      // Evaluate args and apply based on bus id\n      if (intr.bus_id == var_range_bus_id) {\n        // expect [value, max_bits]\n        ExprSpan s0 = d_arg_spans[intr.args_index_off + 1];\n        ExprSpan s1 = d_arg_spans[intr.args_index_off + 2];\n        Fp v_fp = eval_arg(s0, d_bytecode, d_output, (size_t)r);\n        Fp b_fp = eval_arg(s1, d_bytecode, d_output, (size_t)r);\n        \n        // histogram `num_bins` and index calculation depend on the `VariableRangeCheckerChipGPU` implementation\n        uint32_t value = v_fp.asUInt32();\n        uint32_t max_bits = b_fp.asUInt32();\n        lookup::Histogram hist(d_var_hist, (uint32_t)var_num_bins);\n        uint32_t idx = (1u << max_bits) + value; // `max_bit` \n\n        // apply multiplicity by looping; warp-level dedup in Histogram minimizes contention\n        for (uint32_t k = 0; k < (uint32_t)mult.asUInt32(); ++k) hist.add_count(idx);\n      } else if (intr.bus_id == tuple2_bus_id) {\n        // expect [v0, v1]\n        ExprSpan s0 = d_arg_spans[intr.args_index_off + 1];\n        ExprSpan s1 = d_arg_spans[intr.args_index_off + 2];\n        Fp v0_fp = eval_arg(s0, d_bytecode, d_output, (size_t)r);\n        Fp v1_fp = eval_arg(s1, d_bytecode, d_output, (size_t)r);\n        \n        // histogram `num_bins` and index calculation depend on the `RangeTupleCheckerChipGpu<2>` implementation\n        uint32_t v0 = v0_fp.asUInt32();\n        uint32_t v1 = v1_fp.asUInt32();\n        lookup::Histogram hist(d_tuple2_hist, tuple2_sz0 * tuple2_sz1);\n        uint32_t idx = v0 * tuple2_sz1 + v1;\n        \n        for (uint32_t k = 0; k < (uint32_t)mult.asUInt32(); ++k) hist.add_count(idx);\n      } else if (intr.bus_id == bitwise_bus_id) {\n        // expect [x, y, x_xor_y, selector]; we only update histogram if selector==range(0) or xor(1)\n        ExprSpan s0 = d_arg_spans[intr.args_index_off + 1];\n        ExprSpan s1 = d_arg_spans[intr.args_index_off + 2];\n        ExprSpan s2 = d_arg_spans[intr.args_index_off + 3];\n        ExprSpan s3 = d_arg_spans[intr.args_index_off + 4];\n        Fp x_fp = eval_arg(s0, d_bytecode, d_output, (size_t)r);\n        Fp y_fp = eval_arg(s1, d_bytecode, d_output, (size_t)r);\n        Fp xy_fp = eval_arg(s2, d_bytecode, d_output, (size_t)r);\n        Fp sel_fp = eval_arg(s3, d_bytecode, d_output, (size_t)r);\n\n        uint32_t x = x_fp.asUInt32();\n        uint32_t y = y_fp.asUInt32();\n        uint32_t xy = xy_fp.asUInt32();\n        uint32_t selector = sel_fp.asUInt32();\n        BitwiseOperationLookup bl(d_bitwise_hist, BITWISE_NUM_BITS);\n        \n        for (uint32_t k = 0; k < (uint32_t)mult.asUInt32(); ++k) {\n          if (selector == 0u) bl.add_range(x, y);\n          else if (selector == 1u) { bl.add_xor(x, y); /* could assert xy correctness on device if needed */ }\n          else { assert(false && \"Invalid selector\"); }\n        }\n        (void)xy;\n      }\n    }\n  }\n}\n\n// ============================================================================================\n// Host launcher wrapper — callable from Rust FFI or cudarc\n// ============================================================================================\n\nextern \"C\" int _apc_apply_bus(\n  // APC related\n  const Fp* d_output, // APC trace (column-major), device pointer\n  int num_apc_calls, // number of APC calls (rows)\n\n  // Interaction related\n  const uint32_t* d_bytecode, // bytecode buffer (device)\n  size_t bytecode_len, // length of bytecode (u32 words)\n  const DevInteraction* d_interactions, // interactions array (device)\n  size_t n_interactions, // number of interactions\n  const ExprSpan* d_arg_spans, // argument spans (device)\n  size_t n_arg_spans, // number of arg spans\n\n  // Variable range checker related\n  uint32_t var_range_bus_id, // variable range checker bus id\n  uint32_t* d_var_hist, // variable range histogram (device)\n  size_t var_num_bins, // number of bins in variable range histogram\n\n  // Tuple range checker related\n  uint32_t tuple2_bus_id, // 2-tuple range checker bus id\n  uint32_t* d_tuple2_hist, // tuple2 histogram (device)\n  uint32_t tuple2_sz0, // tuple2 size dim0\n  uint32_t tuple2_sz1, // tuple2 size dim1\n\n  // Bitwise related\n  uint32_t bitwise_bus_id, // bitwise lookup bus id\n  uint32_t* d_bitwise_hist // bitwise lookup histogram (device)\n) {\n  const int block_x = 256; // 8 warps\n  const dim3 block(block_x, 1, 1);\n  const unsigned warps_per_block = (unsigned)(block_x / 32);\n  size_t g_size = (n_interactions + (size_t)warps_per_block - 1) / (size_t)warps_per_block;\n  unsigned g = (unsigned)g_size;\n  if (g == 0u) g = 1u;\n  const dim3 grid(g, 1, 1); // each warp processes an interaction\n\n  apc_apply_bus_kernel<<<grid, block>>>(\n    // APC related\n    d_output, num_apc_calls,\n\n    // Interaction related\n    d_bytecode, bytecode_len, d_interactions, n_interactions, d_arg_spans, n_arg_spans,\n\n    // Variable range checker related\n    var_range_bus_id, d_var_hist, var_num_bins,\n\n    // Tuple range checker related\n    tuple2_bus_id, d_tuple2_hist, tuple2_sz0, tuple2_sz1,\n\n    // Bitwise related\n    bitwise_bus_id, d_bitwise_hist\n  );\n  return (int)cudaGetLastError();\n}\n"
  },
  {
    "path": "openvm/cuda/src/apc_tracegen.cu",
    "content": "#include \"primitives/buffer_view.cuh\"\n#include \"primitives/constants.h\"\n#include \"primitives/trace_access.h\"\n#include \"expr_eval.cuh\"\n\n// ============================================================================================\n// Types\n// ============================================================================================\n\nstruct OriginalAir {\n    int width;               // number of columns\n    int height;              // number of rows (Ha)\n    const Fp* buffer;        // column-major base: col*height + row\n    int row_block_size;      // stride between used rows\n};\n\nstruct Subst {\n    int air_index; // index into d_original_airs\n    int col;      // source column within this AIR\n    int row;      // base row offset within the row-block\n    int apc_col;  // destination APC column\n};\n\nextern \"C\" {\n  typedef struct {\n    uint64_t col_base; // precomputed destination base offset = apc_col_index * H\n    ExprSpan span;   // expression span encoding this column's value\n  } DerivedExprSpec;\n}\n\n// ============================================================================================\n// Kernel: each thread iterates rows and processes all substitutions.\n// ============================================================================================\n\n__global__ void apc_tracegen_kernel(\n    Fp* __restrict__ d_output,                         // column-major\n    size_t H,                                          // height of the output\n    const OriginalAir* __restrict__ d_original_airs,   // metadata per AIR\n    const Subst* __restrict__ d_subs,                  // all substitutions\n    size_t n_subs,                                     // number of substitutions\n    int num_apc_calls                                  // number of APC calls\n) {\n    const size_t total_threads = (size_t)gridDim.x * (size_t)blockDim.x;\n    const size_t tid = (size_t)blockIdx.x * (size_t)blockDim.x + (size_t)threadIdx.x;\n\n    for (size_t r = tid; r < H; r += total_threads) {\n        const bool row_in_range = r < (size_t)num_apc_calls;\n\n        for (size_t i = 0; i < n_subs; ++i) {\n            const Subst sub = d_subs[i];\n            const size_t dst_idx = (size_t)sub.apc_col * H + r;\n\n            if (!row_in_range) {\n                d_output[dst_idx] = Fp(0);\n                continue;\n            }\n\n            const size_t air_idx = (size_t)sub.air_index;\n            const OriginalAir air = d_original_airs[air_idx];\n            const Fp* __restrict__ src_base = air.buffer;\n            const size_t src_col_base = (size_t)sub.col * (size_t)air.height;\n            const size_t src_r = (size_t)sub.row + r * (size_t)air.row_block_size;\n            d_output[dst_idx] = src_base[src_col_base + src_r];\n        }\n    }\n}\n\n// ============================================================================================\n// Derived expressions: lane-per-row evaluator, sequential over derived columns per row\n// ============================================================================================\n\n__global__ void apc_apply_derived_expr_kernel(\n    Fp* __restrict__ d_output,   // APC trace (column-major)\n    size_t H,                    // rows (height)\n    int num_apc_calls,           // number of valid rows\n    const DerivedExprSpec* __restrict__ d_specs, // derived expression specs\n    size_t n_cols,               // number of derived columns\n    const uint32_t* __restrict__ d_bytecode // shared bytecode buffer\n) {\n    const size_t total_threads = (size_t)gridDim.x * (size_t)blockDim.x;\n    const size_t tid = (size_t)blockIdx.x * (size_t)blockDim.x + (size_t)threadIdx.x;\n\n    for (size_t r = tid; r < H; r += total_threads) {\n        if (r < (size_t)num_apc_calls) {\n            // Compute and write each derived column for this row\n            for (size_t i = 0; i < n_cols; ++i) {\n                const DerivedExprSpec spec = d_specs[i];\n                const size_t col_base = (size_t)spec.col_base;\n                const Fp v = eval_arg(spec.span, d_bytecode, d_output, r);\n                d_output[col_base + r] = v;\n            }\n        } else {\n            // Zero-fill non-APC rows\n            for (size_t i = 0; i < n_cols; ++i) {\n                const size_t col_base = (size_t)d_specs[i].col_base;\n                d_output[col_base + r] = Fp(0);\n            }\n        }\n    }\n}\n\n// ============================================================================================\n// Host launcher wrappers — callable from Rust FFI or cudarc\n// ============================================================================================\n\nextern \"C\" int _apc_apply_derived_expr(\n    Fp*                d_output,\n    size_t             H,\n    int                num_apc_calls,\n    const DerivedExprSpec* d_specs,\n    size_t             n_cols,\n    const uint32_t*    d_bytecode\n) {\n    if (n_cols == 0) return 0;\n    const int block_x = 256; // more lanes to cover rows\n    const dim3 block(block_x, 1, 1);\n    unsigned g = (unsigned)((H + block_x - 1) / block_x);\n    if (g == 0u) g = 1u;\n    const dim3 grid(g, 1, 1);\n    apc_apply_derived_expr_kernel<<<grid, block>>>(\n        d_output, H, num_apc_calls, d_specs, n_cols, d_bytecode\n    );\n    return (int)cudaGetLastError();\n}\n\nextern \"C\" int _apc_tracegen(\n    Fp*                      d_output,          // [output_height * output_width], column-major\n    size_t                   output_height,     // H_out\n    const OriginalAir*       d_original_airs,   // device array of AIR metadata\n    const Subst*             d_subs,            // device array of all substitutions\n    size_t                   n_subs,            // number of substitutions\n    int                      num_apc_calls      // number of APC calls\n) {\n    assert((output_height & (output_height - 1)) == 0);  // power-of-two height check\n\n    const int block_x = 256;\n    const dim3 block(block_x, 1, 1);\n    unsigned g = (unsigned)((output_height + block_x - 1) / block_x);\n    if (g == 0u) g = 1u;\n    const dim3 grid(g, 1, 1);\n\n    apc_tracegen_kernel<<<grid, block>>>(\n        d_output, output_height, d_original_airs, d_subs, n_subs, num_apc_calls\n    );\n    return (int)cudaGetLastError();\n}\n"
  },
  {
    "path": "openvm/cuda/src/expr_eval.cuh",
    "content": "#pragma once\n\n#include <stdint.h>\n#include <assert.h>\n\n// This header provides a tiny stack-machine evaluator for algebraic expressions\n// used by both bus and derived-expression evaluation kernels.\n//\n// It assumes the including translation unit has included the definitions of `Fp`,\n// `inv`, and any required primitives.\n\nenum OpCode : uint32_t {\n  OP_PUSH_APC = 0, // Push the APC value onto the stack. Must be followed by the index of the value in the APC device buffer.\n  OP_PUSH_CONST = 1, // Push a constant value onto the stack. Must be followed by the constant value.\n  OP_ADD = 2, // Add the top two values on the stack.\n  OP_SUB = 3, // Subtract the top two values on the stack.\n  OP_MUL = 4, // Multiply the top two values on the stack.\n  OP_NEG = 5, // Negate the top value on the stack.\n  OP_INV_OR_ZERO = 6, // Invert the top value on the stack if it is not zero, otherwise pop and push zero.\n};\n\nstatic constexpr int STACK_CAPACITY = 16;\n\n// Inline helpers to safely manipulate the evaluation stack\n__device__ __forceinline__ void stack_push(Fp* stack, int& sp, Fp value) {\n  assert(sp < STACK_CAPACITY && \"Stack overflow\");\n  stack[sp++] = value;\n}\n\n__device__ __forceinline__ Fp stack_pop(Fp* stack, int& sp) {\n  assert(sp > 0 && \"Stack underflow\");\n  return stack[--sp];\n}\n\n// Evaluate expression encoded as u32 bytecode starting at `expr` for length `len` on a given APC row `r` of `apc_trace`.\n__device__ __forceinline__ Fp eval_expr(const uint32_t* expr, uint32_t len,\n                                        const Fp* __restrict__ apc_trace, size_t r) {\n  Fp stack[STACK_CAPACITY];\n  int sp = 0;\n  for (uint32_t ip = 0; ip < len;) {\n    const uint32_t op = expr[ip++];\n    switch (op) {\n      case OP_PUSH_APC: {\n        const uint32_t base = expr[ip++];\n        stack_push(stack, sp, apc_trace[base + r]);\n        break;\n      }\n      case OP_PUSH_CONST: {\n        const uint32_t u = expr[ip++];\n        stack_push(stack, sp, Fp(u));\n        break;\n      }\n      case OP_ADD: {\n        const Fp b = stack_pop(stack, sp);\n        const Fp a = stack_pop(stack, sp);\n        stack_push(stack, sp, a + b);\n        break;\n      }\n      case OP_SUB: {\n        const Fp b = stack_pop(stack, sp);\n        const Fp a = stack_pop(stack, sp);\n        stack_push(stack, sp, a - b);\n        break;\n      }\n      case OP_MUL: {\n        const Fp b = stack_pop(stack, sp);\n        const Fp a = stack_pop(stack, sp);\n        stack_push(stack, sp, a * b);\n        break;\n      }\n      case OP_NEG: {\n        const Fp a = stack_pop(stack, sp);\n        stack_push(stack, sp, -a);\n        break;\n      }\n      case OP_INV_OR_ZERO: {\n        const Fp a = stack_pop(stack, sp);\n        const Fp out = (a == Fp::zero()) ? Fp::zero() : inv(a);\n        stack_push(stack, sp, out);\n        break;\n      }\n      default: {\n        assert(false && \"Unknown opcode\");\n      }\n    }\n  }\n  assert(sp == 1);\n  return stack[sp - 1];\n}\n\n// Span (offset, length) of a sub-expression within a shared bytecode buffer\nstruct ExprSpan {\n  uint32_t off;\n  uint32_t len;\n};\n\n// Evaluate an argument span from a shared bytecode buffer for APC row `r`\n__device__ __forceinline__ Fp eval_arg(\n  const ExprSpan& span,\n  const uint32_t* __restrict__ d_bytecode,\n  const Fp* __restrict__ apc_trace,\n  size_t r\n) {\n  return eval_expr(d_bytecode + span.off, span.len, apc_trace, r);\n}\n\n"
  },
  {
    "path": "openvm/metrics-viewer/CLAUDE.md",
    "content": "# Metrics Viewer\n\nSingle-page web app for visualizing proof metrics from OpenVM benchmarks. This is a web port of the Python scripts [`basic_metrics.py`](../../openvm-riscv/scripts/basic_metrics.py) and [`plot_trace_cells.py`](../../openvm-riscv/scripts/plot_trace_cells.py), following the same pattern as the [autoprecompile-analyzer](../../autoprecompile-analyzer/index.html).\n\nThe goal is to make benchmark results shareable via URL without needing a Python environment.\n\n## Project Structure\n```\nindex.html          # SPA with embedded JS/CSS (D3.js v7, Bootstrap 5.3)\nspec.py             # Python reference implementation of metric computations (for auditing)\nCLAUDE.md           # This file\n```\n\n## Data Format\n\nInput can be either of these formats:\n\n1. **Combined metrics JSON** — produced by `basic_metrics.py combine`. It maps run names to raw metrics objects:\n\n```json\n{\n  \"<run_name>\": {\n    \"counter\": [\n      { \"labels\": [[\"group\", \"app_proof\"], [\"air_name\", \"SomeAir\"], [\"segment\", \"0\"], ...], \"metric\": \"cells\", \"value\": \"123456\" },\n      ...\n    ],\n    \"gauge\": [\n      { \"labels\": [[\"group\", \"app_proof\"], ...], \"metric\": \"total_proof_time_ms\", \"value\": \"45678\" },\n      ...\n    ]\n  },\n  \"<run_name_2>\": { ... }\n}\n```\n\n2. **Raw metrics JSON** — a single experiment object with top-level `counter` and `gauge` keys:\n\n```json\n{\n  \"counter\": [ ... ],\n  \"gauge\": [ ... ]\n}\n```\n\nIf the top-level object has both `counter` and `gauge`, the viewer treats it as a raw metrics file and renders a single experiment. Otherwise it treats the object as combined metrics and validates each experiment entry.\n\nEach entry in `counter` / `gauge` must have:\n- `labels`: Array of `[key, value]` pairs.\n- `metric`: Metric name string.\n- `value`: String-encoded numeric value (or numeric in practice; the UI accepts both).\n\n### OpenVM 1 Schema\n\nProduced by OpenVM 1 (STARK-based prover with FRI).\n\n**Label keys**: `group`, `air_name`, `air_id`, `segment`, `idx`, `trace_height_constraint`.\n\n**Group values**: `app_proof`, `leaf_*` (e.g. `leaf_0`), `internal_*` (e.g. `internal_0`).\n\n**Counter metrics** (with `group`):\n- `cells`, `rows`, `main_cols`, `prep_cols`, `perm_cols` — trace dimensions (per AIR/segment)\n- `total_cells`, `total_cells_used`, `main_cells_used` — cell counts including padding (per segment, no `air_name`)\n- `constraints`, `interactions` — per-AIR constraint/interaction counts (no `group`/`segment` labels)\n- `quotient_deg`, `fri.log_blowup`, `num_children` — FRI/quotient parameters\n- `threshold`, `weighted_sum`, `execute_metered_insns`, `execute_preflight_insns`\n\n**Gauge metrics** (timing, with `group`):\n- `total_proof_time_ms` — total time per group (app/leaf/internal)\n- `stark_prove_excluding_trace_time_ms` — STARK prover time minus trace generation\n- `trace_gen_time_ms`, `system_trace_gen_time_ms`, `single_trace_gen_time_ms`\n- `execute_preflight_time_ms`, `execute_metered_time_ms`\n- `main_trace_commit_time_ms`, `perm_trace_commit_time_ms`\n- `generate_perm_trace_time_ms`, `memory_to_vec_partition_time_ms`\n- `quotient_poly_compute_time_ms`, `quotient_poly_commit_time_ms`, `pcs_opening_time_ms`\n- `single_leaf_agg_time_ms`, `single_internal_agg_time_ms`, `agg_layer_time_ms`\n- `app_prove_time_ms`, `prove_segment_time_ms`\n- `total_apc_gen_time_ms`, `memory_finalize_time_ms`, `compute_user_public_values_proof_time_ms`\n- `dummy_proof_and_keygen_time_ms`\n\n### OpenVM 2 Schema\n\nProduced by OpenVM 2 (uses GKR/LogUp-based prover with WHIR).\n\n**Label keys**: All V1 keys plus `air`, `module`, `phase`.\n\n**Group values**: `app_proof`, `leaf`, `compression`, `internal_for_leaf`, `internal_recursive.0`, `internal_recursive.1`, `internal_recursive.2`.\n\nKey differences from V1:\n- `compression` is a new proving phase (not present in V1)\n- `leaf` has no numeric suffix (V1 used `leaf_*`)\n- Internal groups split into `internal_for_leaf` and `internal_recursive.N`\n\n**Counter metrics**:\n- Same as V1: `cells`, `rows`, `main_cols`, `prep_cols`, `perm_cols`, `total_cells`, `constraints`, `interactions`\n- New: `constraint_deg` (replaces V1's `quotient_deg`)\n- Removed: `total_cells_used`, `main_cells_used`, `quotient_deg`, `fri.log_blowup`, `num_children`\n\n**Gauge metrics** — timing breakdown is hierarchical with `prover.*` prefix:\n- Top-level (same as V1): `total_proof_time_ms`, `stark_prove_excluding_trace_time_ms`, `trace_gen_time_ms`, `execute_preflight_time_ms`, `execute_metered_time_ms`\n- New `prover.*` sub-metrics:\n  - `prover.main_trace_commit_time_ms` — trace commitment\n  - `prover.rap_constraints_time_ms` — constraint evaluation (parent)\n    - `prover.rap_constraints.logup_gkr_time_ms` — LogUp GKR\n    - `prover.rap_constraints.logup_gkr.input_evals_time_ms`\n    - `prover.rap_constraints.round0_time_ms`, `prover.rap_constraints.ple_round0_time_ms`, `prover.rap_constraints.mle_rounds_time_ms`\n  - `prover.openings_time_ms` — opening proofs (parent)\n    - `prover.openings.stacked_reduction_time_ms`, `prover.openings.whir_time_ms`\n    - `prover.openings.stacked_reduction.round0_time_ms`, `prover.openings.stacked_reduction.mle_rounds_time_ms`\n- New GPU metrics: `fractional_sumcheck_gpu_time_ms`, `prove_zerocheck_and_logup_gpu_time_ms`, `compute_merkle_precomputation_cuda_time_ms`\n- New GPU memory: `gpu_mem.current_bytes`, `gpu_mem.local_peak_bytes`, `gpu_mem.reserved_bytes`, `gpu_mem.timestamp_ms`\n- New phases: `compression_time_ms`, `generate_cached_trace_time_ms`, `generate_proving_ctxs_time_ms`, `generate_blob_time_ms`, `set_initial_memory_time_ms`\n- `module` label: prover sub-module breakdown (e.g. `prover.merkle_tree`, `prover.openings`, `frac_sumcheck.*`, `tracegen.*`)\n- `air` label: used by `generate_cached_trace_time_ms` and `single_trace_gen_time_ms` (contains full Rust type name, distinct from `air_name`)\n- Removed: `dummy_proof_and_keygen_time_ms`, `generate_perm_trace_time_ms`, `perm_trace_commit_time_ms`, `quotient_poly_*_time_ms`, `pcs_opening_time_ms`, `memory_to_vec_partition_time_ms`\n\n### Version Detection\n\nThe viewer auto-detects the OpenVM version by checking for `logup_gkr` in metric names (V2-only). The detected version is displayed as a badge in the navbar.\n\n### Proof Time Hierarchy\n\nIn both V1 and V2, `execute_metered_time_ms` runs *before* segment proving and sits *outside* per-segment `total_proof_time_ms`. The viewer reports metered execution as a separate top-level phase and uses `sum(total_proof_time_ms)` for the app phase.\n\n**V1**:\n```\ntotal = metered + sum(app.total_proof_time_ms) + leaf.total_proof_time_ms + internal.total_proof_time_ms\napp.total_proof_time_ms ≈ sum_per_segment(preflight + trace_gen + stark_excl) + small overhead\n```\n\n**V2**:\n```\ntotal = metered + sum(app.total_proof_time_ms) + leaf.total_proof_time_ms + internal.total_proof_time_ms + compression.total_proof_time_ms\napp.total_proof_time_ms ≈ sum_per_segment(preflight + set_initial_memory + trace_gen + stark_excl) + small overhead\nstark_excl ≈ prover.main_trace_commit + prover.rap_constraints + prover.openings\n```\n\nThe V2 stacked bar chart breaks STARK into three sub-components (constraints, openings, trace commit) plus a small \"STARK other\" residual.\n\nGenerate a combined file with:\n```bash\npython3 openvm-riscv/scripts/basic_metrics.py combine **/metrics.json > combined_metrics.json\n```\n\nExample input files:\n- OpenVM 1 — Keccak: https://github.com/powdr-labs/bench-results/blob/gh-pages/results/2026-03-23-0535/keccak/combined_metrics.json\n- OpenVM 1 — Reth (older format, no constraints/interactions): https://github.com/powdr-labs/bench-results/blob/gh-pages/results/2026-03-23-0535/reth/combined_metrics.json\n- OpenVM 2 — Pairing: https://gist.githubusercontent.com/leonardoalt/3074cb729c03470b1116674618b97267/raw/eec5e5a086bf07a57e2215843f0a3f1ada9d0d5c/metrics_v2_pairing_combined.json\n\n## Testing\n\nStart server and open with example data:\n```bash\ncd openvm/metrics-viewer && python3 -m http.server 8000\n```\n\nLoad data via file upload (drag-drop) or URL parameter:\n```\nhttp://localhost:8000/?data=<url>&run=<name>\n```\n\nFor raw metrics JSON loaded from a URL, the viewer infers the experiment name from the path (for example `/apc030/metrics.json` becomes `apc030`).\n\nExample, using the data above and pre-selecting the `apc030` run:\n```\nhttp://localhost:8000/?data=https%3A%2F%2Fgithub.com%2Fpowdr-labs%2Fbench-results%2Fblob%2Fgh-pages%2Fresults%2F2026-03-19-0538%2Fkeccak%2Fcombined_metrics.json&baseline=apc000&run=apc030\n```\n\nVerify:\n- Summary table shows key metrics for all runs\n- Stacked bar chart shows proof time breakdown; \"By Component\" tab shows grouped bars\n- Clicking a run shows experiment details (details table + trace cell pie chart)\n- URL updates with selected run and data source\n- Version badge in navbar shows \"OpenVM 1\" or \"OpenVM 2\"\n- For OpenVM 2: compression time appears in the breakdown, \"App Cells (without padding)\" row is hidden\n\n## URL Parameters\n\n```\n?data=<url>           # Data source (loads raw or combined metrics JSON; GitHub blob URLs auto-converted to raw)\n&run=<name>           # Pre-select a run by name\n```\n\n## Code Structure\n\nThe JavaScript in `index.html` is organized into clearly separated sections:\n\n1. **Data Processing** — ports of Python logic, these are the core functions that compute all displayed numbers:\n   - `normalizeMetricsData(json, sourceLabel)` — validates the incoming JSON shape, distinguishes raw-vs-combined input, and wraps raw files as a single experiment.\n   - `detectOpenVmVersion(combinedData)` — returns `1` or `2` by checking for `logup_gkr` in metric names (V2-only).\n   - `loadMetricsDataframes(json)` — port of [`metrics_utils.py:load_metrics_dataframes`](../../openvm-riscv/scripts/metrics_utils.py). Flattens `counter`+`gauge` arrays into entries, splits by `group` prefix into `app`, `leaf`, `internal`, `compression`.\n   - `isNormalInstructionAir(name)` — port of [`metrics_utils.py:is_normal_instruction_air`](../../openvm-riscv/scripts/metrics_utils.py). Classifies AIR names as normal RISC-V instructions vs. precompiles.\n   - `getMetric(entries, name)` — sums `value` for all entries matching a metric name.\n   - `extractMetrics(runName, json)` — port of [`basic_metrics.py:extract_metrics`](../../openvm-riscv/scripts/basic_metrics.py). Computes all summary metrics (proof times, cell counts, ratios) from raw JSON.\n   - `computeCellsByAir(json)` — port of [`plot_trace_cells.py:compute_cells_by_air`](../../openvm-riscv/scripts/plot_trace_cells.py). Aggregates cells by AIR name with 1.5% threshold.\n\n2. **Metric Descriptions** — `METRIC_INFO` object (search for `const METRIC_INFO`). Single source of truth for human-readable descriptions and Python code snippets for every computed metric. Displayed as info-icon tooltips in the detail tables. When adding a new metric to the detail rows, add a corresponding entry here.\n\n3. **Constants** — `COMPONENTS_V1`/`COMPONENTS_V2` (proof time breakdown components with colors), `TABLE_COLUMNS`, detail row arrays (`BASIC_STATS_ROWS_V1`/`V2`, `PROOF_TIME_ROWS_V1`/`V2`). Version-aware getters (`getComponents()`, `getBasicStatsRows()`, `getProofTimeRows()`) return the right variant.\n\n4. **Chart Components** — `createBarChart()`, `createGroupedBarChart()`, `createPieChart()`, each rendering into their container.\n\n5. **Table Components** — `createSummaryTable()`, `renderDetails()`.\n\n6. **Data Loading & URL Handling** — file upload, URL fetch, parameter sync.\n"
  },
  {
    "path": "openvm/metrics-viewer/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"UTF-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n    <title>OpenVM Metrics Viewer</title>\n    <link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css\" rel=\"stylesheet\">\n    <script src=\"https://d3js.org/d3.v7.min.js\"></script>\n    <style>\n        body {\n            background-color: #f8f9fa;\n        }\n\n        .navbar {\n            background: white;\n            border-bottom: 1px solid #dee2e6;\n            box-shadow: 0 1px 4px rgba(0,0,0,0.06);\n        }\n        .navbar-brand { color: #333; font-weight: 700; cursor: pointer; text-decoration: none; }\n        .navbar-brand:hover { color: #000; }\n        .data-source {\n            color: #888; font-size: 0.85rem; margin-left: 1rem;\n            flex: 1 1 auto; max-width: calc(100vw - 300px);\n            white-space: nowrap; overflow: hidden; text-overflow: ellipsis;\n            text-align: right; font-family: 'Courier New', monospace;\n        }\n        .data-source a { color: #666; text-decoration: underline; }\n        .data-source a:hover { color: #333; }\n        .copy-icon:hover { color: #333 !important; }\n        .copy-icon .copy-check { display: none; }\n        .copy-icon .copy-default { display: inline; }\n        .copy-icon.copied .copy-check { display: inline; }\n        .copy-icon.copied .copy-default { display: none; }\n        .copy-icon.copied { color: #1a7f37 !important; }\n\n        /* Upload section */\n        #uploadSection {\n            display: flex;\n            justify-content: center;\n            align-items: center;\n            min-height: 80vh;\n        }\n        .upload-container { max-width: 600px; width: 100%; }\n        .dropzone {\n            border: 3px dashed #dee2e6;\n            border-radius: 10px;\n            padding: 40px;\n            text-align: center;\n            cursor: pointer;\n            transition: all 0.3s;\n            background-color: white;\n        }\n        .dropzone:hover, .dropzone.drag-over {\n            border-color: #0d6efd;\n            background-color: #e7f1ff;\n        }\n        .dropzone h4 { color: #333; }\n        .url-input-group { margin-top: 1.5rem; }\n        .btn-load {\n            background: #0d6efd;\n            border: none;\n            color: white;\n        }\n        .btn-load:hover { background: #0b5ed7; color: white; }\n\n        /* App section */\n        .format-hint {\n            margin-top: 0.75rem;\n            text-align: center;\n            color: #6c757d;\n            font-size: 0.9rem;\n        }\n        #appSection { display: none; }\n        .pane {\n            background-color: white;\n            border-radius: 10px;\n            padding: 20px;\n            margin-bottom: 1rem;\n            box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);\n        }\n        .pane h5 {\n            color: #333;\n            margin-bottom: 0.75rem;\n            font-weight: 600;\n        }\n\n        /* Summary table */\n        .summary-table {\n            font-size: 0.85rem;\n        }\n        .summary-table th {\n            background: #f8f9fa;\n            color: #333;\n            padding: 8px;\n            white-space: nowrap;\n            cursor: pointer;\n            user-select: none;\n            border-bottom: 2px solid #dee2e6;\n        }\n        .summary-table th:hover { background: #e9ecef; }\n        .summary-table td {\n            padding: 6px 8px;\n            border-bottom: 1px solid #dee2e6;\n            white-space: nowrap;\n        }\n        .summary-table tr {\n            cursor: pointer;\n            transition: background 0.15s;\n        }\n        .summary-table tr:hover { background: #e7f1ff; }\n        .summary-table tr.selected { background: #cfe2ff; outline: 2px solid #0d6efd; }\n        .summary-table .baseline-radio { text-align: center; width: 1px; }\n        .summary-table .baseline-radio input { cursor: pointer; }\n\n        /* Pie chart placeholder */\n        .pie-placeholder {\n            display: flex;\n            justify-content: center;\n            align-items: center;\n            min-height: 300px;\n            color: #999;\n            font-style: italic;\n        }\n\n        /* Details table */\n        .details-table {\n            width: 100%;\n            font-size: 0.82rem;\n            margin-bottom: 0.25rem;\n        }\n        .details-table th {\n            background: #f8f9fa;\n            color: #666;\n            padding: 4px 8px;\n            font-weight: 600;\n            white-space: nowrap;\n            width: 50%;\n        }\n        .details-table td {\n            padding: 4px 8px;\n            white-space: nowrap;\n        }\n        .details-table tr { border-bottom: 1px solid #eee; }\n\n        /* Page max width */\n        #appSection { max-width: 1600px; margin-left: auto; margin-right: auto; }\n\n        /* Equal height chart row */\n        .chart-row { display: flex; }\n        .chart-row > .chart-col-left { flex: 1; min-width: 0; display: flex; flex-direction: column; }\n        .chart-row > .chart-col-right { flex: 1; min-width: 0; display: flex; flex-direction: column; }\n        .chart-row > .chart-col-right > .pane { flex: 1; }\n        .chart-row .pane { display: flex; flex-direction: column; }\n        .chart-row .pane-body { flex: 1; overflow-y: auto; }\n\n        /* Details layout: table + pie side by side */\n        .details-row { display: flex; gap: 1rem; }\n        .details-row .details-table-col { flex: 1; min-width: 0; }\n        .details-row .details-pie-col { flex-shrink: 0; }\n\n        /* Chart tooltips */\n        .chart-tooltip {\n            position: absolute;\n            pointer-events: none;\n            background: rgba(255, 255, 255, 0.97);\n            border: 1px solid #dee2e6;\n            border-radius: 6px;\n            padding: 8px 12px;\n            font-size: 0.8rem;\n            color: #333;\n            z-index: 100;\n            white-space: nowrap;\n            box-shadow: 0 2px 8px rgba(0,0,0,0.15);\n        }\n\n        /* Chart tabs */\n        .nav-tabs .nav-link {\n            color: #666;\n            font-size: 0.85rem;\n            padding: 0.3rem 0.75rem;\n        }\n        .nav-tabs .nav-link.active {\n            color: #333;\n            font-weight: 600;\n        }\n\n        /* Stacked bar chart */\n        .bar-group { cursor: pointer; }\n        .bar-group:hover rect { opacity: 0.85; }\n\n        /* Info icon tooltip */\n        .details-table th { position: relative; padding-right: 24px; }\n        .metric-info {\n            position: absolute;\n            right: 4px;\n            top: 50%;\n            transform: translateY(-50%);\n            width: 14px;\n            height: 14px;\n            line-height: 14px;\n            text-align: center;\n            font-size: 9px;\n            font-style: normal;\n            font-weight: 700;\n            color: #bbb;\n            border: 1px solid #ddd;\n            border-radius: 50%;\n            cursor: help;\n        }\n        .metric-info:hover { color: #555; border-color: #999; }\n        .metric-info-tooltip {\n            display: none;\n            position: fixed;\n            z-index: 200;\n            background: #fff;\n            border: 1px solid #ccc;\n            border-radius: 6px;\n            padding: 8px 10px;\n            font-size: 0.78rem;\n            font-weight: 400;\n            color: #333;\n            white-space: normal;\n            width: 500px;\n            max-width: 90vw;\n            box-shadow: 0 2px 8px rgba(0,0,0,0.13);\n            line-height: 1.4;\n        }\n        .metric-info-tooltip code {\n            display: block;\n            margin-top: 5px;\n            padding: 6px 8px;\n            background: #f8f8f8;\n            border: 1px solid #e8e8e8;\n            border-radius: 4px;\n            font-size: 0.73rem;\n            white-space: pre-wrap;\n            word-break: break-all;\n            color: #333;\n            line-height: 1.5;\n        }\n        .metric-info-tooltip code .kw { color: #8700af; font-weight: 600; }\n        .metric-info-tooltip code .fn { color: #005f87; }\n        .metric-info-tooltip code .st { color: #4e9a06; }\n        .metric-info-tooltip code .cm { color: #8f8f8f; font-style: italic; }\n        .metric-info-tooltip code .nb { color: #b07800; }\n\n        /* Collapsible detail table rows */\n        .details-table tr.collapsible-parent th {\n            cursor: pointer;\n            user-select: none;\n        }\n        .details-table tr.collapsible-parent th .toggle-arrow {\n            display: inline-block;\n            width: 1em;\n            font-size: 0.7em;\n            transition: transform 0.15s;\n        }\n        .details-table tr.collapsible-parent.collapsed th .toggle-arrow {\n            transform: rotate(-90deg);\n        }\n        .details-table tr.collapsed-child {\n            display: none;\n        }\n\n        /* Loading */\n        .loading-overlay {\n            position: fixed;\n            top: 0; left: 0; right: 0; bottom: 0;\n            background: rgba(248, 249, 250, 0.9);\n            display: flex;\n            justify-content: center;\n            align-items: center;\n            z-index: 1000;\n        }\n        .loading-overlay .spinner-border { color: #0d6efd; }\n    </style>\n</head>\n<body>\n    <nav class=\"navbar\">\n        <div class=\"container-fluid\">\n            <a class=\"navbar-brand\" href=\"#\" onclick=\"window.location.href = window.location.pathname; return false;\">OpenVM Metrics Viewer</a>\n            <span id=\"versionBadge\" style=\"display:none; font-size:0.75rem; padding:2px 8px; border-radius:4px; background:#e9ecef; color:#495057; font-weight:600; margin-left:0.5rem\"></span>\n            <span class=\"data-source\" id=\"dataSourceLabel\"></span>\n        </div>\n    </nav>\n\n    <div id=\"uploadSection\">\n        <div class=\"upload-container\">\n            <div class=\"dropzone\" id=\"dropzone\">\n                <h4>Drop metrics JSON here</h4>\n                <p class=\"text-muted\">or click to select a file</p>\n                <input type=\"file\" id=\"fileInput\" accept=\".json\" style=\"display:none\">\n            </div>\n            <div class=\"format-hint\">Supports combined metrics JSON and raw metrics JSON with <code>counter</code> + <code>gauge</code>.</div>\n            <div class=\"url-input-group input-group\">\n                <input type=\"text\" class=\"form-control\" id=\"urlInput\"\n                       placeholder=\"Or paste a URL to metrics JSON...\">\n                <button class=\"btn btn-load\" id=\"urlLoadBtn\">Load</button>\n            </div>\n        </div>\n    </div>\n\n    <div id=\"appSection\" class=\"container-fluid mt-3\">\n        <div class=\"chart-row\" style=\"gap:1rem; align-items:start\">\n            <div class=\"chart-col-left\" style=\"gap:1rem\">\n                <div class=\"pane\">\n                    <h5>Summary Table</h5>\n                    <div style=\"overflow-x:auto\">\n                        <table class=\"summary-table\" style=\"width:100%\" id=\"summaryTable\">\n                            <thead><tr></tr></thead>\n                            <tbody></tbody>\n                        </table>\n                    </div>\n                </div>\n                <div class=\"pane\">\n                    <div style=\"display:flex; align-items:center; gap:0.5rem; margin-bottom:0.75rem\">\n                        <h5 style=\"margin:0\">Proof Time Breakdown</h5>\n                        <ul class=\"nav nav-tabs\" style=\"margin:0; border:none; margin-left:auto\">\n                            <li class=\"nav-item\">\n                                <a class=\"nav-link active\" href=\"#\" id=\"tabStacked\" onclick=\"switchChartTab('stacked'); return false;\">Stacked</a>\n                            </li>\n                            <li class=\"nav-item\">\n                                <a class=\"nav-link\" href=\"#\" id=\"tabGrouped\" onclick=\"switchChartTab('grouped'); return false;\">By Component</a>\n                            </li>\n                        </ul>\n                    </div>\n                    <div class=\"pane-body\">\n                        <div id=\"barChart\"></div>\n                    </div>\n                </div>\n            </div>\n            <div class=\"chart-col-right\">\n                <div class=\"pane\">\n                    <h5>Experiment Details <span id=\"pieRunName\" class=\"text-muted\" style=\"font-size:0.8rem\"></span></h5>\n                    <div class=\"pane-body\">\n                        <div id=\"detailsSection\">\n                            <div class=\"pie-placeholder\">Select an experiment to view details</div>\n                        </div>\n                    </div>\n                </div>\n            </div>\n        </div>\n    </div>\n\n    <div class=\"loading-overlay\" id=\"loadingOverlay\" style=\"display:none\">\n        <div class=\"spinner-border\" role=\"status\">\n            <span class=\"visually-hidden\">Loading...</span>\n        </div>\n    </div>\n\n<script>\n// ============================================================\n// State\n// ============================================================\nlet combinedData = null;\nlet allMetrics = [];\nlet selectedRunName = null;\nlet baselineRunName = null;\nlet currentChartTab = 'stacked';\nlet detectedOpenVmVersion = null; // 1 or 2\n\n// ============================================================\n// Data Shape Validation\n// ============================================================\nfunction assert(condition, message) {\n    if (!condition) throw new Error(message);\n}\n\nfunction isObject(value) {\n    return value !== null && typeof value === 'object' && !Array.isArray(value);\n}\n\nfunction hasRawMetricsShape(value) {\n    return isObject(value) && Object.prototype.hasOwnProperty.call(value, 'counter')\n        && Object.prototype.hasOwnProperty.call(value, 'gauge');\n}\n\nfunction assertMetricEntries(entries, fieldName, context) {\n    assert(Array.isArray(entries), `${context}.${fieldName} must be an array.`);\n    entries.forEach((entry, idx) => {\n        const entryContext = `${context}.${fieldName}[${idx}]`;\n        assert(isObject(entry), `${entryContext} must be an object.`);\n        assert(Array.isArray(entry.labels), `${entryContext}.labels must be an array.`);\n        entry.labels.forEach((label, labelIdx) => {\n            assert(Array.isArray(label) && label.length === 2, `${entryContext}.labels[${labelIdx}] must be a [key, value] pair.`);\n        });\n        assert(typeof entry.metric === 'string', `${entryContext}.metric must be a string.`);\n        assert(typeof entry.value === 'string' || typeof entry.value === 'number', `${entryContext}.value must be a string or number.`);\n    });\n}\n\nfunction assertRawMetricsShape(metricsJson, context) {\n    assert(isObject(metricsJson), `${context} must be a JSON object.`);\n    assert(hasRawMetricsShape(metricsJson), `${context} must contain both \"counter\" and \"gauge\" fields.`);\n    assertMetricEntries(metricsJson.counter, 'counter', context);\n    assertMetricEntries(metricsJson.gauge, 'gauge', context);\n}\n\nfunction inferRawRunName(source) {\n    if (!source) return 'experiment';\n\n    const sourceName = source.trim().split('?')[0].split('#')[0];\n    if (/^metrics\\.json$/i.test(sourceName)) return 'experiment';\n\n    try {\n        const url = new URL(source, window.location.href);\n        const parts = url.pathname.split('/').filter(Boolean);\n        const fileName = parts[parts.length - 1] || '';\n        const parentName = parts[parts.length - 2] || '';\n        if (fileName === 'metrics.json' && parentName) return parentName;\n        if (fileName) return fileName.replace(/\\.json$/i, '') || 'experiment';\n    } catch (_) {\n        const fileName = source.split('/').pop() || source;\n        if (/^metrics\\.json$/i.test(fileName)) return 'experiment';\n        return fileName.replace(/\\.json$/i, '') || 'experiment';\n    }\n\n    return 'experiment';\n}\n\nfunction normalizeMetricsData(data, sourceLabel) {\n    assert(isObject(data), 'Metrics JSON must be a JSON object.');\n\n    if (hasRawMetricsShape(data)) {\n        const runName = inferRawRunName(sourceLabel);\n        assertRawMetricsShape(data, `metrics.${runName}`);\n        return { [runName]: data };\n    }\n\n    const entries = Object.entries(data);\n    assert(entries.length > 0, 'Combined metrics JSON must contain at least one experiment.');\n    entries.forEach(([runName, metricsJson]) => assertRawMetricsShape(metricsJson, `metrics.${runName}`));\n    return data;\n}\n\n// ============================================================\n// OpenVM Version Detection\n// ============================================================\n\n// Detects whether metrics come from OpenVM 1 or OpenVM 2.\n// OpenVM 2 uses LogUp GKR (metric names containing 'logup_gkr').\nfunction detectOpenVmVersion(combinedData) {\n    const firstRun = Object.values(combinedData)[0];\n    if (!firstRun) return 1;\n    const allMetricNames = [\n        ...firstRun.counter.map(e => e.metric),\n        ...firstRun.gauge.map(e => e.metric),\n    ];\n    if (allMetricNames.some(n => n.includes('logup_gkr'))) return 2;\n    return 1;\n}\n\n// ============================================================\n// Data Processing — ports of Python logic\n//\n// These functions compute all displayed numbers from raw JSON.\n// See CLAUDE.md for the mapping to Python source functions.\n// ============================================================\n\n// Port of metrics_utils.py:load_metrics_dataframes\n// Flattens counter+gauge arrays into flat objects keyed by label names,\n// then splits into app/leaf/internal/compression by the \"group\" label prefix.\nfunction loadMetricsDataframes(metricsJson) {\n    const entries = [...metricsJson.counter, ...metricsJson.gauge].map(c => {\n        const obj = Object.fromEntries(c.labels);\n        obj.metric = c.metric;\n        obj.value = c.value;\n        return obj;\n    });\n\n    let app = entries.filter(e => (e.group || '').startsWith('app_proof'));\n    if (app.length === 0) {\n        app = entries.filter(e => (e.group || '').startsWith('reth'));\n    }\n    const leaf = entries.filter(e => (e.group || '').startsWith('leaf'));\n    const internal = entries.filter(e => (e.group || '').startsWith('internal'));\n    const compression = entries.filter(e => (e.group || '') === 'compression');\n\n    return { app, leaf, internal, compression };\n}\n\n// Port of metrics_utils.py:is_normal_instruction_air\n// Returns true for standard RISC-V instruction AIRs (VmAirWrapper with 4 limbs),\n// false for precompiles (FieldExpressionCoreAir, non-4-limb, or non-VmAirWrapper).\nfunction isNormalInstructionAir(airName) {\n    const match = (airName || '').match(/^VmAirWrapper<[^,]+,\\s*([^>]+?)(?:<(\\d+)(?:,\\s*\\d+)*>)?\\s*>$/);\n    if (!match) return false;\n    const coreName = match[1];\n    const numLimbs = match[2];\n    if (coreName === 'FieldExpressionCoreAir') return false;\n    if (numLimbs && parseInt(numLimbs) !== 4) return false;\n    return true;\n}\n\n// Sum all values for entries matching a given metric name.\nfunction getMetric(entries, metricName) {\n    return entries.filter(e => e.metric === metricName)\n                  .reduce((sum, e) => sum + parseFloat(e.value), 0);\n}\n\n// Get the value of a metric that must appear exactly once.\nfunction getUniqueMetric(entries, metricName) {\n    const matches = entries.filter(e => e.metric === metricName);\n    if (matches.length !== 1) throw new Error(`Expected exactly 1 entry for \"${metricName}\", found ${matches.length}`);\n    return parseFloat(matches[0].value);\n}\n\n// Port of basic_metrics.py:extract_metrics\n// Computes all summary metrics for a single run from its raw JSON.\n// Each metric maps directly to a getMetric() call on a specific group + metric name.\nfunction extractMetrics(runName, metricsJson) {\n    const { app, leaf, internal, compression } = loadMetricsDataframes(metricsJson);\n    const allEntries = [...metricsJson.counter, ...metricsJson.gauge].map(c => {\n        const obj = Object.fromEntries(c.labels);\n        obj.metric = c.metric;\n        obj.value = c.value;\n        return obj;\n    });\n    const m = {};\n\n    m.name = runName;\n\n    // Classify app AIRs into powdr / normal instruction / openvm precompile\n    const powdrAir = app.filter(e => (e.air_name || '').startsWith('PowdrAir'));\n    const nonPowdrAir = app.filter(e => !(e.air_name || '').startsWith('PowdrAir'));\n    const normalInstructionAir = nonPowdrAir.filter(e => isNormalInstructionAir(e.air_name || ''));\n    const openVmPrecompileAir = nonPowdrAir.filter(e => !isNormalInstructionAir(e.air_name || ''));\n\n    // Proof times by phase.\n    // execute_metered runs *before* segment proving and is outside per-segment\n    // total_proof_time_ms. We report it as a separate top-level phase.\n    m.execute_metered_time_ms = getMetric(app, 'execute_metered_time_ms');\n    m.app_proof_time_ms = getMetric(app, 'total_proof_time_ms');\n    m.leaf_proof_time_ms = getMetric(leaf, 'total_proof_time_ms');\n    m.inner_recursion_proof_time_ms = getMetric(internal, 'total_proof_time_ms');\n    m.compression_proof_time_ms = getMetric(compression, 'total_proof_time_ms');\n    m.total_proof_time_ms = m.execute_metered_time_ms + m.app_proof_time_ms + m.leaf_proof_time_ms\n        + m.inner_recursion_proof_time_ms + m.compression_proof_time_ms;\n\n    // STARK prove time excluding trace generation\n    m.app_proof_time_excluding_trace_ms = getMetric(app, 'stark_prove_excluding_trace_time_ms');\n    m.leaf_proof_time_excluding_trace_ms = getMetric(leaf, 'stark_prove_excluding_trace_time_ms');\n    m.inner_recursion_proof_time_excluding_trace_ms = getMetric(internal, 'stark_prove_excluding_trace_time_ms');\n    m.compression_proof_time_excluding_trace_ms = getMetric(compression, 'stark_prove_excluding_trace_time_ms');\n    m.total_proof_time_excluding_trace_ms = m.app_proof_time_excluding_trace_ms + m.leaf_proof_time_excluding_trace_ms\n        + m.inner_recursion_proof_time_excluding_trace_ms + m.compression_proof_time_excluding_trace_ms;\n\n    // Column counts (summed over all segments)\n    const mainCols = getMetric(app, 'main_cols');\n    const prepCols = getMetric(app, 'prep_cols');\n    const permCols = getMetric(app, 'perm_cols');\n    m.app_proof_cols = mainCols + prepCols + permCols;\n\n    m.num_segments = app.filter(e => 'segment' in e).reduce((max, e) => Math.max(max, parseInt(e.segment)), -1) + 1;\n    m.num_air_instances = app.filter(e => e.metric === 'rows').length;\n\n    m.app_proof_cells = getMetric(app, 'total_cells');\n    m.app_proof_cells_used = getMetric(app, 'total_cells_used'); // V1 only, null-ish in V2\n    m.app_execute_preflight_time_ms = getMetric(app, 'execute_preflight_time_ms');\n    m.app_trace_gen_time_ms = getMetric(app, 'trace_gen_time_ms');\n\n    // V2: STARK sub-components (prover.* metrics)\n    m.app_trace_commit_time_ms = getMetric(app, 'prover.main_trace_commit_time_ms');\n    m.app_rap_constraints_time_ms = getMetric(app, 'prover.rap_constraints_time_ms');\n    m.app_openings_time_ms = getMetric(app, 'prover.openings_time_ms');\n    m.app_stark_other_ms = m.app_proof_time_excluding_trace_ms\n        - m.app_trace_commit_time_ms - m.app_rap_constraints_time_ms - m.app_openings_time_ms;\n\n    // V2: rap_constraints sub-components (additive: logup_gkr + round0 + mle_rounds = rap)\n    m.app_rap_logup_gkr_time_ms = getMetric(app, 'prover.rap_constraints.logup_gkr_time_ms');\n    m.app_rap_round0_time_ms = getMetric(app, 'prover.rap_constraints.round0_time_ms');\n    m.app_rap_mle_rounds_time_ms = getMetric(app, 'prover.rap_constraints.mle_rounds_time_ms');\n    m.app_rap_other_ms = m.app_rap_constraints_time_ms\n        - m.app_rap_logup_gkr_time_ms - m.app_rap_round0_time_ms - m.app_rap_mle_rounds_time_ms;\n\n    // V2: openings sub-components (additive: whir + stacked_reduction = openings)\n    m.app_openings_whir_time_ms = getMetric(app, 'prover.openings.whir_time_ms');\n    m.app_openings_stacked_reduction_time_ms = getMetric(app, 'prover.openings.stacked_reduction_time_ms');\n    m.app_openings_other_ms = m.app_openings_time_ms\n        - m.app_openings_whir_time_ms - m.app_openings_stacked_reduction_time_ms;\n\n    // V2: additional per-segment sub-components\n    m.app_set_initial_memory_time_ms = getMetric(app, 'set_initial_memory_time_ms');\n\n    // \"App other\" = app proof time minus all known sub-components.\n    // \"App other\" = app proof time minus all known sub-components.\n    // execute_metered is a separate top-level phase, not inside app_proof_time_ms.\n    // Can be negative when sub-timers overlap (parallelism). The raw value\n    // is stored for the detail table; the bar chart clamps to 0.\n    m.app_other_ms = m.app_proof_time_ms\n        - m.app_proof_time_excluding_trace_ms\n        - m.app_execute_preflight_time_ms\n        - m.app_trace_gen_time_ms\n        - m.app_set_initial_memory_time_ms;\n\n    // Cell ratios by AIR classification\n    const normalCells = getMetric(normalInstructionAir, 'cells');\n    const openVmCells = getMetric(openVmPrecompileAir, 'cells');\n    const powdrCells = getMetric(powdrAir, 'cells');\n\n    m.normal_instruction_ratio = m.app_proof_cells > 0 ? normalCells / m.app_proof_cells : 0;\n    m.openvm_precompile_ratio = m.app_proof_cells > 0 ? openVmCells / m.app_proof_cells : 0;\n    m.powdr_ratio = m.app_proof_cells > 0 ? powdrCells / m.app_proof_cells : 0;\n    m.powdr_rows = getMetric(powdrAir, 'rows');\n\n    // Constraints and bus interactions — per-AIR metrics (no group/segment labels).\n    // Since they're per-AIR, we weight by segment count (for totals) or row count (for instances).\n    const hasConstraints = allEntries.some(e => e.metric === 'constraints');\n    const hasInteractions = allEntries.some(e => e.metric === 'interactions');\n\n    // Build per-AIR weights from app rows entries:\n    //   segmentsByAppAir[key] = number of segments the AIR appears in\n    //   rowsByAppAir[key] = total rows across all segments\n    // We key by \"air_id:air_name\" because air_id alone is only unique within a\n    // proving phase — different phases (app, leaf, compression) reuse the same\n    // air_id for unrelated AIRs.\n    const segmentsByAppAir = {};\n    const rowsByAppAir = {};\n    app.filter(e => e.metric === 'rows').forEach(e => {\n        const key = e.air_id + ':' + (e.air_name || '');\n        segmentsByAppAir[key] = (segmentsByAppAir[key] || 0) + 1;\n        rowsByAppAir[key] = (rowsByAppAir[key] || 0) + parseFloat(e.value);\n    });\n\n    // weighted_sum: for each all_entries entry matching metricName, multiply by weight\n    function weightedSum(metricName, weights) {\n        return allEntries.filter(e => e.metric === metricName)\n            .reduce((sum, e) => sum + parseFloat(e.value) * (weights[e.air_id + ':' + (e.air_name || '')] || 0), 0);\n    }\n\n    m.constraints = hasConstraints ? weightedSum('constraints', segmentsByAppAir) : null;\n    m.bus_interactions = hasInteractions ? weightedSum('interactions', segmentsByAppAir) : null;\n    m.constraint_instances = hasConstraints ? weightedSum('constraints', rowsByAppAir) : null;\n    m.bus_interaction_messages = hasInteractions ? weightedSum('interactions', rowsByAppAir) : null;\n\n    return m;\n}\n\n// Port of plot_trace_cells.py:compute_cells_by_air\n// Aggregates cells by air_name from the app group, sorts descending,\n// and groups AIRs below 1.5% of total into \"Other\".\nfunction computeCellsByAir(metricsJson) {\n    const { app } = loadMetricsDataframes(metricsJson);\n    const cellEntries = app.filter(e => e.metric === 'cells');\n\n    const cellsByAir = {};\n    cellEntries.forEach(e => {\n        const name = e.air_name;\n        cellsByAir[name] = (cellsByAir[name] || 0) + parseFloat(e.value);\n    });\n\n    const sorted = Object.entries(cellsByAir).sort((a, b) => b[1] - a[1]);\n    const total = sorted.reduce((s, [, v]) => s + v, 0);\n\n    const threshold = total * 0.015;\n    const powdrItems = [];\n    const otherItems = [];\n    let otherSum = 0;\n    for (const [name, val] of sorted) {\n        if (val < threshold) {\n            otherSum += val;\n        } else if (name.startsWith('PowdrAir')) {\n            powdrItems.push({ name, value: val });\n        } else {\n            otherItems.push({ name, value: val });\n        }\n    }\n    if (otherSum > 0) {\n        otherItems.push({ name: 'Other', value: otherSum });\n    }\n    // PowdrAir items first, then the rest sorted by size\n    const items = [...powdrItems, ...otherItems];\n\n    return { items, total };\n}\n\n// ============================================================\n// Python syntax highlighting for metric info tooltips\n// ============================================================\nfunction highlightPython(code) {\n    if (!code) return '';\n    const esc = s => s.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;');\n    const KW = new Set(['def','for','in','if','and','or','not','return','else','import','from',\n        'True','False','None','lambda','with','as','class','is','raise','try','except',\n        'finally','while','break','continue','pass','yield','del','assert','elif']);\n    const FN = new Set(['sum','max','min','int','float','str','len','set','dict','list','range',\n        'print','sorted','any','all','map','filter','zip','enumerate','isinstance','getattr',\n        'hasattr','type','abs','round','open','startswith']);\n    // Single-pass tokenizer: match comments, strings, words, numbers, or any other char\n    const TOKEN = /#[^\\n]*|'(?:[^'\\\\]|\\\\.)*'|\"(?:[^\"\\\\]|\\\\.)*\"|\\b[a-zA-Z_]\\w*\\b|\\b\\d+\\.?\\d*\\b|./gs;\n    let out = '';\n    let m;\n    while ((m = TOKEN.exec(code)) !== null) {\n        const t = m[0];\n        if (t.startsWith('#')) { out += '<span class=\"cm\">' + esc(t) + '</span>'; }\n        else if (t.startsWith(\"'\") || t.startsWith('\"')) { out += '<span class=\"st\">' + esc(t) + '</span>'; }\n        else if (KW.has(t)) { out += '<span class=\"kw\">' + esc(t) + '</span>'; }\n        else if (FN.has(t)) { out += '<span class=\"fn\">' + esc(t) + '</span>'; }\n        else if (/^\\d/.test(t)) { out += '<span class=\"nb\">' + esc(t) + '</span>'; }\n        else { out += esc(t); }\n    }\n    return out;\n}\n\n// ============================================================\n// Metric Descriptions (single source of truth)\n//\n// Each key matches a field produced by extractMetrics().\n//   `desc` — human-readable explanation (use `descV1`/`descV2` for version-specific text)\n//   `code` — Python snippet showing how this metric is computed\n//            (use `codeV1`/`codeV2` for version-specific code)\n//\n// Code snippets assume pre-filtered entry lists (app, leaf, internal, compression)\n// and a helper:\n//   def sum_metric(entries, metric):\n//       return sum(float(e['value']) for e in entries if e['metric'] == metric)\n// ============================================================\nconst METRIC_INFO = {\n    // --- Basic Stats ---\n    num_segments: {\n        desc: 'Number of proving segments the app proof is split into.',\n        code: `max(int(e['segment']) for e in app_entries) + 1`,\n    },\n    num_air_instances: {\n        desc: 'Total number of AIR instances across all segments (each AIR in each segment counts as one instance).',\n        code: `len([e for e in app if e['metric'] == 'rows'])`,\n    },\n    app_proof_cols: {\n        desc: 'Total column count in the app proof, summed over all AIRs and segments (main + preprocessed + permutation trace columns).',\n        code: `sum_metric(app, 'main_cols')\n+ sum_metric(app, 'prep_cols')\n+ sum_metric(app, 'perm_cols')`,\n    },\n    app_proof_cells: {\n        descV1: 'Total trace cells in the app proof, summed over all segments. Includes main, preprocessed, and permutation cells. Also includes padding.',\n        descV2: 'Total trace cells in the app proof, summed over all segments. Includes main, preprocessed, and permutation cells.',\n        code: \"sum_metric(app, 'total_cells')\",\n    },\n    app_proof_cells_used: {\n        desc: 'Trace cells actually used before padding. Includes main and permutation cells, NOT preprocessed cells. The percentage shows utilization (used / total).',\n        code: \"sum_metric(app, 'total_cells_used')\",\n    },\n    constraints: {\n        desc: 'Total constraint polynomials across all app proof AIRs and segments.',\n        code: `segments_by_air = {}  # air -> segment count, from app rows\nweighted_sum(\"constraints\", segments_by_air)`,\n    },\n    constraint_instances: {\n        desc: 'Total constraint evaluations: each AIR\\'s constraint count weighted by its total row count across all segments.',\n        code: `rows_by_air = {}  # air -> total rows, from app rows\nweighted_sum(\"constraints\", rows_by_air)`,\n    },\n    bus_interactions: {\n        desc: 'Total bus interaction definitions across all app proof AIRs and segments.',\n        code: `weighted_sum(\"interactions\", segments_by_air)`,\n    },\n    bus_interaction_messages: {\n        desc: 'Total bus messages: each AIR\\'s interaction count weighted by its total row count across all segments.',\n        code: `weighted_sum(\"interactions\", rows_by_air)`,\n    },\n\n    // --- Proof Time ---\n    execute_metered_time_ms: {\n        desc: 'Metered execution: instrumented run that counts executed instructions per AIR to guide segment splitting. Runs before segment proving, reported as a separate top-level phase.',\n        code: \"sum_metric(app, 'execute_metered_time_ms')\",\n    },\n    app_proof_time_ms: {\n        desc: 'Sum of per-segment proving times. Does not include metered execution (which is a separate phase).',\n        code: \"sum_metric(app, 'total_proof_time_ms')\",\n    },\n    app_proof_time_excluding_trace_ms: {\n        descV1: 'STARK prover time excluding trace generation: commitments, quotient computation, FRI opening.',\n        descV2: 'STARK prover time excluding trace generation: trace commit, constraint evaluation, LogUp GKR, and WHIR opening proofs.',\n        code: \"sum_metric(app, 'stark_prove_excluding_trace_time_ms')\",\n    },\n    app_rap_constraints_time_ms: {\n        desc: 'Time for RAP constraint evaluation, including zerocheck and the LogUp GKR protocol for bus interactions.',\n        code: \"sum_metric(app, 'prover.rap_constraints_time_ms')\",\n    },\n    app_rap_logup_gkr_time_ms: {\n        desc: 'Time for the LogUp GKR protocol, which proves bus interaction correctness via a GKR-based sumcheck.',\n        code: \"sum_metric(app, 'prover.rap_constraints.logup_gkr_time_ms')\",\n    },\n    app_rap_round0_time_ms: {\n        desc: 'Time for Round 0 of the constraint evaluation sumcheck (initial round before MLE rounds).',\n        code: \"sum_metric(app, 'prover.rap_constraints.round0_time_ms')\",\n    },\n    app_rap_mle_rounds_time_ms: {\n        desc: 'Time for MLE (multilinear extension) rounds of the constraint evaluation sumcheck.',\n        code: \"sum_metric(app, 'prover.rap_constraints.mle_rounds_time_ms')\",\n    },\n    app_rap_other_ms: {\n        desc: 'Residual: constraint evaluation time minus LogUp GKR, Round 0, and MLE rounds.',\n        code: `rap_constraints - logup_gkr - round0 - mle_rounds`,\n    },\n    app_openings_time_ms: {\n        descV1: 'Time for polynomial opening proofs (FRI-based PCS opening).',\n        descV2: 'Time for polynomial opening proofs using WHIR (a FRI-like protocol) with stacked polynomial reduction.',\n        code: \"sum_metric(app, 'prover.openings_time_ms')\",\n    },\n    app_openings_whir_time_ms: {\n        desc: 'Time for the WHIR polynomial commitment opening protocol.',\n        code: \"sum_metric(app, 'prover.openings.whir_time_ms')\",\n    },\n    app_openings_stacked_reduction_time_ms: {\n        desc: 'Time for stacked polynomial reduction, which batches multiple polynomial openings before WHIR.',\n        code: \"sum_metric(app, 'prover.openings.stacked_reduction_time_ms')\",\n    },\n    app_openings_other_ms: {\n        desc: 'Residual: openings time minus WHIR and stacked reduction.',\n        code: `openings - whir - stacked_reduction`,\n    },\n    app_trace_commit_time_ms: {\n        desc: 'Time for committing the main execution trace via Merkle tree construction.',\n        code: \"sum_metric(app, 'prover.main_trace_commit_time_ms')\",\n    },\n    app_stark_other_ms: {\n        desc: 'Residual: STARK time minus constraint evaluation, openings, and trace commit.',\n        code: `stark_excl_trace - constraints - openings - trace_commit`,\n    },\n    app_execute_preflight_time_ms: {\n        desc: 'Preflight execution: a fast initial pass that determines segment boundaries and per-AIR trace heights without generating the full trace.',\n        code: \"sum_metric(app, 'execute_preflight_time_ms')\",\n    },\n    app_set_initial_memory_time_ms: {\n        desc: 'Time to initialize the memory state at the start of each proving segment (V2 only).',\n        code: \"sum_metric(app, 'set_initial_memory_time_ms')\",\n    },\n    app_trace_gen_time_ms: {\n        desc: 'Trace generation: re-executes the program to produce the full execution trace (witness) consumed by the STARK prover.',\n        code: \"sum_metric(app, 'trace_gen_time_ms')\",\n    },\n    app_other_ms: {\n        desc: 'Residual: app proof time minus all known sub-components. Metered execution is a separate top-level phase. May be negative when sub-timers overlap due to parallelism.',\n        codeV1: `app_proof - stark_excl_trace - preflight - trace_gen`,\n        codeV2: `app_proof - stark_excl_trace - preflight\n  - trace_gen - set_initial_memory`,\n    },\n    leaf_proof_time_ms: {\n        descV1: 'Total proving time for the leaf aggregation layer (leaf_0, leaf_1, ...). Aggregates app proof segments into a smaller number of proofs.',\n        codeV1: \"sum_metric(leaf, 'total_proof_time_ms')\",\n        descV2: 'Total proving time for the leaf aggregation layer. Aggregates app proof segments into a smaller number of proofs.',\n        codeV2: \"sum_metric(leaf, 'total_proof_time_ms')\",\n    },\n    inner_recursion_proof_time_ms: {\n        descV1: 'Total proving time for inner recursion layers (internal_0, internal_1, ...). Recursively aggregates leaf proofs.',\n        codeV1: \"sum_metric(internal, 'total_proof_time_ms')\",\n        descV2: 'Total proving time for inner recursion layers (internal_for_leaf, internal_recursive.*). Recursively aggregates leaf proofs.',\n        codeV2: \"sum_metric(internal, 'total_proof_time_ms')\",\n    },\n    compression_proof_time_ms: {\n        desc: 'Total proving time for the compression layer (V2 only). Produces a compact final proof from the recursion output.',\n        code: \"sum_metric(compression, 'total_proof_time_ms')\",\n    },\n    total_proof_time_ms: {\n        descV1: 'Sum of all proving phases: metered execution + app proof + leaf recursion + inner recursion.',\n        descV2: 'Sum of all proving phases: metered execution + app proof + leaf recursion + inner recursion + compression.',\n        codeV1: `metered + app + leaf + inner_recursion`,\n        codeV2: `metered + app + leaf + inner_recursion + compression`,\n    },\n\n    // --- Cell Distribution ---\n    powdr_ratio: {\n        desc: 'Fraction of app proof trace cells belonging to powdr autoprecompile AIRs (air_name starts with \"PowdrAir\"). These are synthesized circuits that replace sequences of basic instructions.',\n        code: `sum(cells for air if air_name.startswith('PowdrAir'))\n/ total_cells`,\n    },\n    normal_instruction_ratio: {\n        desc: 'Fraction of app proof trace cells belonging to standard RISC-V instruction AIRs: VmAirWrapper types with 4 limbs, excluding FieldExpressionCoreAir.',\n        code: `sum(cells for air if is_normal_instruction_air(air_name))\n/ total_cells`,\n    },\n    openvm_precompile_ratio: {\n        desc: 'Fraction of app proof trace cells belonging to OpenVM built-in precompile AIRs (neither powdr nor normal instruction AIRs). Includes SHA-256, Keccak, ECC, etc.',\n        code: `sum(cells for air if not powdr and not normal)\n/ total_cells`,\n    },\n};\n\n// ============================================================\n// Constants — proof time breakdown components\n// Colors and order match basic_metrics.py:plot()\n// Version-dependent: V2 adds compression phase.\n// ============================================================\nconst COMPONENTS_V1 = [\n    { key: 'inner_recursion_proof_time_ms', label: 'Inner recursion', color: '#9b3e00' },\n    { key: 'leaf_proof_time_ms', label: 'Leaf recursion', color: '#d69600' },\n    { key: 'execute_metered_time_ms', label: 'Metered execution', color: '#c6dbef' },\n    { key: 'app_proof_time_excluding_trace_ms', label: 'App STARK (excl. trace)', color: '#1f77b4' },\n    { key: 'app_trace_gen_time_ms', label: 'App trace gen', color: '#6baed6' },\n    { key: 'app_execute_preflight_time_ms', label: 'App preflight', color: '#9ecae1' },\n    { key: 'app_other_ms', label: 'App other', color: '#08519c' },\n];\n\nconst COMPONENTS_V2 = [\n    { key: 'compression_proof_time_ms', label: 'Compression', color: '#7b2d8e' },\n    { key: 'inner_recursion_proof_time_ms', label: 'Inner recursion', color: '#9b3e00' },\n    { key: 'leaf_proof_time_ms', label: 'Leaf recursion', color: '#d69600' },\n    { key: 'execute_metered_time_ms', label: 'Metered execution', color: '#dadaeb' },\n    { key: 'app_proof_time_excluding_trace_ms', label: 'App STARK (excl. trace)', color: '#1f77b4' },\n    { key: 'app_trace_gen_time_ms', label: 'App trace gen', color: '#9ecae1' },\n    { key: 'app_set_initial_memory_time_ms', label: 'App set memory', color: '#a1d99b' },\n    { key: 'app_execute_preflight_time_ms', label: 'App preflight', color: '#c6dbef' },\n    { key: 'app_other_ms', label: 'App other', color: '#969696' },\n];\n\nfunction getComponents() {\n    return detectedOpenVmVersion === 2 ? COMPONENTS_V2 : COMPONENTS_V1;\n}\n\n// ============================================================\n// Format helpers\n// ============================================================\n// Data source display — matches autoprecompile-analyzer pattern\nfunction shortenUrl(urlText) {\n    if (urlText.length <= 70) return urlText;\n    return urlText.slice(0, 35) + '...' + urlText.slice(-35);\n}\n\nfunction updateDataSourceDisplay(source) {\n    const el = document.getElementById('dataSourceLabel');\n    if (source) {\n        const displayText = shortenUrl(source.replace(/^https?:\\/\\//i, ''));\n        el.innerHTML = `Data: <a href=\"${source}\" target=\"_blank\" rel=\"noopener noreferrer\">${displayText}</a>`\n            + ` <svg class=\"copy-icon\" onclick=\"navigator.clipboard.writeText('${source.replace(/'/g, \"\\\\'\")}').then(() => { this.classList.add('copied'); this.setAttribute('title','Copied!'); setTimeout(() => { this.classList.remove('copied'); this.setAttribute('title','Copy data URL'); }, 2000); })\" `\n            + `title=\"Copy data URL\" width=\"16\" height=\"16\" viewBox=\"0 0 16 16\" fill=\"currentColor\" style=\"margin-left:0.4rem; cursor:pointer; color:#888; vertical-align:middle;\">`\n            + `<path class=\"copy-default\" d=\"M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 010 1.5h-1.5a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-1.5a.75.75 0 011.5 0v1.5A1.75 1.75 0 019.25 16h-7.5A1.75 1.75 0 010 14.25z\"/>`\n            + `<path class=\"copy-default\" d=\"M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0114.25 11h-7.5A1.75 1.75 0 015 9.25zm1.75-.25a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-7.5a.25.25 0 00-.25-.25z\"/>`\n            + `<path class=\"copy-check\" d=\"M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z\"/>`\n            + `</svg>`;\n        el.style.display = 'inline';\n    } else {\n        el.innerHTML = '';\n        el.style.display = 'none';\n    }\n}\n\nfunction fmtSeconds(ms) { return (ms / 1000).toFixed(1) + 's'; }\nfunction fmtCells(n) {\n    if (n >= 1e9) return (n / 1e9).toFixed(2) + 'B';\n    if (n >= 1e6) return (n / 1e6).toFixed(1) + 'M';\n    return n.toLocaleString();\n}\nfunction fmtPct(ratio) { return (ratio * 100).toFixed(1) + '%'; }\n\n// Format comparison vs baseline. \"Lower is better\" for all numeric stats.\nfunction fmtComparison(value, baselineValue) {\n    if (baselineValue === 0 || baselineValue == null || value == null) return '';\n    const ratio = value / baselineValue;\n    if (Math.abs(ratio - 1) < 0.005) return '<span style=\"color:#999; font-size:0.75rem\">1.00x →</span>';\n    if (ratio < 1) {\n        return `<span style=\"color:#198754; font-size:0.75rem\">${(1/ratio).toFixed(2)}x ↓</span>`;\n    } else {\n        return `<span style=\"color:#dc3545; font-size:0.75rem\">${ratio.toFixed(2)}x ↑</span>`;\n    }\n}\n\n// ============================================================\n// Chart Legend Helper\n// ============================================================\nfunction layoutLegend(svg, COMPONENTS, x, y, availableWidth) {\n    const legendRowHeight = 18;\n    const colWidth = 180;\n    const cols = Math.max(1, Math.floor(availableWidth / colWidth));\n    const rows = Math.ceil(COMPONENTS.length / cols);\n    const legend = svg.append('g').attr('transform', `translate(${x}, ${y})`);\n    COMPONENTS.forEach((c, i) => {\n        const col = i % cols;\n        const row = Math.floor(i / cols);\n        const lg = legend.append('g').attr('transform', `translate(${col * colWidth}, ${row * legendRowHeight})`);\n        lg.append('rect').attr('width', 12).attr('height', 12).attr('fill', c.color).attr('rx', 2);\n        lg.append('text').attr('x', 16).attr('y', 10).text(c.label)\n            .attr('fill', '#666').attr('font-size', '10px');\n    });\n    return rows * legendRowHeight;\n}\n\n// ============================================================\n// Stacked Bar Chart\n// ============================================================\nfunction createBarChart(metrics) {\n    const COMPONENTS = getComponents();\n    const container = d3.select('#barChart');\n    container.selectAll('*').remove();\n\n    const margin = { top: 30, right: 20, bottom: 140, left: 60 };\n    const width = container.node().getBoundingClientRect().width - margin.left - margin.right;\n    const height = 420 - margin.top - margin.bottom;\n\n    const svg = container.append('svg')\n        .attr('width', width + margin.left + margin.right)\n        .attr('height', height + margin.top + margin.bottom);\n    const g = svg.append('g').attr('transform', `translate(${margin.left},${margin.top})`);\n\n    const runNames = metrics.map(m => m.name);\n\n    // Compute stacked data\n    const stackData = metrics.map(m => {\n        const d = { name: m.name };\n        COMPONENTS.forEach(c => { d[c.key] = m[c.key] / 1000; }); // ms -> s\n        return d;\n    });\n\n    const keys = COMPONENTS.map(c => c.key);\n    const stack = d3.stack().keys(keys);\n    const series = stack(stackData);\n\n    const maxY = d3.max(stackData, d => keys.reduce((s, k) => s + d[k], 0));\n\n    const x = d3.scaleBand().domain(runNames).range([0, width]).padding(0.3);\n    const y = d3.scaleLinear().domain([0, maxY * 1.08]).range([height, 0]);\n\n    // Grid\n    g.append('g').attr('class', 'grid')\n        .call(d3.axisLeft(y).tickSize(-width).tickFormat(''))\n        .selectAll('line').attr('stroke', '#dee2e6').attr('stroke-opacity', 0.7);\n    g.selectAll('.grid .domain').remove();\n\n    // Tooltip\n    const tooltip = d3.select('body').append('div').attr('class', 'chart-tooltip').style('display', 'none');\n\n    // Bars\n    const colorMap = {};\n    COMPONENTS.forEach(c => { colorMap[c.key] = c.color; });\n    const labelMap = {};\n    COMPONENTS.forEach(c => { labelMap[c.key] = c.label; });\n\n    series.forEach(s => {\n        g.selectAll(`.bar-${s.key}`)\n            .data(s)\n            .enter().append('rect')\n            .attr('class', d => `bar-group bar-${d.data.name}`)\n            .attr('x', d => x(d.data.name))\n            .attr('y', d => y(d[1]))\n            .attr('height', d => y(d[0]) - y(d[1]))\n            .attr('width', x.bandwidth())\n            .attr('fill', colorMap[s.key])\n            .attr('data-run', d => d.data.name)\n            .on('click', (event, d) => selectRun(d.data.name))\n            .on('mouseover', (event, d) => {\n                const val = (d[1] - d[0]).toFixed(1);\n                const total = keys.reduce((sum, k) => sum + d.data[k], 0);\n                const pct = total > 0 ? ((d[1] - d[0]) / total * 100).toFixed(1) : '0';\n                tooltip.style('display', 'block')\n                    .html(`<strong>${d.data.name}</strong><br>${labelMap[s.key]}: ${val}s (${pct}%)`);\n            })\n            .on('mousemove', event => {\n                tooltip.style('left', (event.pageX + 12) + 'px').style('top', (event.pageY - 20) + 'px');\n            })\n            .on('mouseout', () => tooltip.style('display', 'none'));\n    });\n\n    // Value labels inside segments — only show if segment is tall enough for text\n    const minSegmentPx = 18; // minimum pixel height to show a label\n    series.forEach(s => {\n        s.forEach(d => {\n            const val = d[1] - d[0];\n            const segmentPx = y(d[0]) - y(d[1]);\n            if (segmentPx < minSegmentPx) return;\n            const total = keys.reduce((sum, k) => sum + d.data[k], 0);\n            const pct = total > 0 ? (val / total * 100).toFixed(1) : '0';\n            g.append('text')\n                .attr('x', x(d.data.name) + x.bandwidth() / 2)\n                .attr('y', y((d[0] + d[1]) / 2))\n                .attr('text-anchor', 'middle')\n                .attr('dominant-baseline', 'middle')\n                .attr('fill', '#333')\n                .attr('font-size', '10px')\n                .attr('font-weight', 'bold')\n                .attr('pointer-events', 'none')\n                .text(`${val.toFixed(1)} (${pct}%)`);\n        });\n    });\n\n    // Total labels on top\n    stackData.forEach(d => {\n        const total = keys.reduce((s, k) => s + d[k], 0);\n        g.append('text')\n            .attr('x', x(d.name) + x.bandwidth() / 2)\n            .attr('y', y(total) - 5)\n            .attr('text-anchor', 'middle')\n            .attr('fill', '#333')\n            .attr('font-size', '11px')\n            .attr('font-weight', 'bold')\n            .text(`Total: ${total.toFixed(1)}s`);\n    });\n\n    // Axes\n    const xAxis = g.append('g').attr('transform', `translate(0,${height})`)\n        .call(d3.axisBottom(x));\n    xAxis.selectAll('text').attr('fill', '#333')\n        .attr('transform', 'rotate(-35)')\n        .attr('text-anchor', 'end')\n        .attr('dx', '-0.5em')\n        .attr('dy', '0.3em');\n    g.append('g')\n        .call(d3.axisLeft(y).tickFormat(d => d.toFixed(1) + 's'))\n        .selectAll('text').attr('fill', '#333');\n\n    // Style axis lines\n    svg.selectAll('.domain').attr('stroke', '#dee2e6');\n    svg.selectAll('.tick line').attr('stroke', '#dee2e6');\n\n    // Measure rotated x-axis label extent, place legend below\n    const xAxisBBox = xAxis.node().getBBox();\n    const legendY = margin.top + height + xAxisBBox.height + 15;\n    const legendHeight = layoutLegend(svg, COMPONENTS, margin.left, legendY, width);\n    svg.attr('height', legendY + legendHeight + 10);\n\n    updateBarSelection();\n}\n\nfunction updateBarSelection() {\n    d3.selectAll('#barChart rect[data-run]').each(function() {\n        const el = d3.select(this);\n        if (el.attr('data-run') === selectedRunName) {\n            el.attr('stroke', '#ffc107').attr('stroke-width', 3);\n        } else {\n            el.attr('stroke', 'none');\n        }\n    });\n}\n\n// ============================================================\n// Grouped Bar Chart (By Component)\n// ============================================================\nfunction createGroupedBarChart(metrics) {\n    const COMPONENTS = getComponents();\n    const container = d3.select('#barChart');\n    container.selectAll('*').remove();\n\n    const margin = { top: 30, right: 20, bottom: 140, left: 60 };\n    const width = container.node().getBoundingClientRect().width - margin.left - margin.right;\n    const height = 420 - margin.top - margin.bottom;\n\n    const svg = container.append('svg')\n        .attr('width', width + margin.left + margin.right)\n        .attr('height', height + margin.top + margin.bottom);\n    const g = svg.append('g').attr('transform', `translate(${margin.left},${margin.top})`);\n\n    const runNames = metrics.map(m => m.name);\n    const keys = COMPONENTS.map(c => c.key);\n\n    const stackData = metrics.map(m => {\n        const d = { name: m.name };\n        COMPONENTS.forEach(c => { d[c.key] = m[c.key] / 1000; });\n        return d;\n    });\n\n    const maxY = d3.max(stackData, d => d3.max(keys, k => d[k]));\n\n    const x0 = d3.scaleBand().domain(runNames).range([0, width]).padding(0.2);\n    const x1 = d3.scaleBand().domain(keys).range([0, x0.bandwidth()]).padding(0.05);\n    const y = d3.scaleLinear().domain([0, maxY * 1.08]).range([height, 0]);\n\n    // Grid\n    g.append('g').attr('class', 'grid')\n        .call(d3.axisLeft(y).tickSize(-width).tickFormat(''))\n        .selectAll('line').attr('stroke', '#dee2e6').attr('stroke-opacity', 0.7);\n    g.selectAll('.grid .domain').remove();\n\n    // Tooltip\n    const tooltip = d3.select('body').append('div').attr('class', 'chart-tooltip').style('display', 'none');\n    const colorMap = {};\n    COMPONENTS.forEach(c => { colorMap[c.key] = c.color; });\n    const labelMap = {};\n    COMPONENTS.forEach(c => { labelMap[c.key] = c.label; });\n\n    // Bars\n    stackData.forEach(d => {\n        keys.forEach(key => {\n            g.append('rect')\n                .attr('x', x0(d.name) + x1(key))\n                .attr('y', y(d[key]))\n                .attr('width', x1.bandwidth())\n                .attr('height', height - y(d[key]))\n                .attr('fill', colorMap[key])\n                .attr('data-run', d.name)\n                .style('cursor', 'pointer')\n                .on('click', () => selectRun(d.name))\n                .on('mouseover', (event) => {\n                    tooltip.style('display', 'block')\n                        .html(`<strong>${d.name}</strong><br>${labelMap[key]}: ${d[key].toFixed(1)}s`);\n                })\n                .on('mousemove', event => {\n                    tooltip.style('left', (event.pageX + 12) + 'px').style('top', (event.pageY - 20) + 'px');\n                })\n                .on('mouseout', () => tooltip.style('display', 'none'));\n        });\n    });\n\n    // Axes\n    const xAxis = g.append('g').attr('transform', `translate(0,${height})`)\n        .call(d3.axisBottom(x0));\n    xAxis.selectAll('text').attr('fill', '#333')\n        .attr('transform', 'rotate(-35)')\n        .attr('text-anchor', 'end')\n        .attr('dx', '-0.5em')\n        .attr('dy', '0.3em');\n    g.append('g')\n        .call(d3.axisLeft(y).tickFormat(d => d.toFixed(1) + 's'))\n        .selectAll('text').attr('fill', '#333');\n\n    svg.selectAll('.domain').attr('stroke', '#dee2e6');\n    svg.selectAll('.tick line').attr('stroke', '#dee2e6');\n\n    // Measure rotated x-axis label extent, place legend below\n    const xAxisBBox = xAxis.node().getBBox();\n    const legendY = margin.top + height + xAxisBBox.height + 15;\n    const legendHeight = layoutLegend(svg, COMPONENTS, margin.left, legendY, width);\n    svg.attr('height', legendY + legendHeight + 10);\n\n    updateBarSelection();\n}\n\n// ============================================================\n// Chart Tab Switching\n// ============================================================\nfunction switchChartTab(tab) {\n    currentChartTab = tab;\n    document.getElementById('tabStacked').classList.toggle('active', tab === 'stacked');\n    document.getElementById('tabGrouped').classList.toggle('active', tab === 'grouped');\n    if (allMetrics.length > 0) {\n        if (tab === 'stacked') createBarChart(allMetrics);\n        else createGroupedBarChart(allMetrics);\n    }\n}\n\nfunction renderCurrentChart() {\n    if (currentChartTab === 'stacked') createBarChart(allMetrics);\n    else createGroupedBarChart(allMetrics);\n}\n\n// ============================================================\n// Summary Table\n// ============================================================\nconst TABLE_COLUMNS = [\n    { key: 'name', label: 'Experiment', fmt: v => v },\n    { key: 'num_segments', label: 'Segments', fmt: v => v },\n    { key: 'app_proof_cells', label: 'Cells', fmt: fmtCells },\n    { key: 'total_proof_time_ms', label: 'Total Time', fmt: fmtSeconds },\n];\n\n// Detail tables: three separate tables shown in the experiment details pane.\n\nconst BASIC_STATS_ROWS_V1 = [\n    { key: 'num_segments', label: 'Segments', fmt: v => v },\n    { key: 'num_air_instances', label: 'AIR Instances', fmt: v => v.toLocaleString() },\n    { key: 'app_proof_cols', label: 'Columns', fmt: v => v.toLocaleString() },\n    { key: 'app_proof_cells', label: 'Cells', fmt: fmtCells },\n    { key: 'app_proof_cells_used', label: 'Cells (without padding)', fmt: (v, m) => {\n        const pct = m.app_proof_cells > 0 ? (v / m.app_proof_cells * 100) : 0;\n        return `${fmtCells(v)} <span style=\"color:#999\">(${pct.toFixed(1)}%)</span>`;\n    }},\n    { key: 'constraints', label: 'Constraints', fmt: v => v.toLocaleString() },\n    { key: 'constraint_instances', label: 'Constraint Instances', fmt: fmtCells },\n    { key: 'bus_interactions', label: 'Bus Interactions', fmt: v => v.toLocaleString() },\n    { key: 'bus_interaction_messages', label: 'Bus Interaction Messages', fmt: fmtCells },\n];\n\nconst BASIC_STATS_ROWS_V2 = [\n    { key: 'num_segments', label: 'Segments', fmt: v => v },\n    { key: 'num_air_instances', label: 'AIR Instances', fmt: v => v.toLocaleString() },\n    { key: 'app_proof_cols', label: 'Columns', fmt: v => v.toLocaleString() },\n    { key: 'app_proof_cells', label: 'Cells', fmt: fmtCells },\n    { key: 'constraints', label: 'Constraints', fmt: v => v.toLocaleString() },\n    { key: 'constraint_instances', label: 'Constraint Instances', fmt: fmtCells },\n    { key: 'bus_interactions', label: 'Bus Interactions', fmt: v => v.toLocaleString() },\n    { key: 'bus_interaction_messages', label: 'Bus Interaction Messages', fmt: fmtCells },\n];\n\n// Nested proof time breakdown — indent level controls visual nesting.\nconst PROOF_TIME_ROWS_V1 = [\n    { key: 'execute_metered_time_ms', label: 'Metered Execution', fmt: fmtSeconds, indent: 0 },\n    { key: 'app_proof_time_ms', label: 'App Proof Time', fmt: fmtSeconds, indent: 0 },\n    { key: 'app_proof_time_excluding_trace_ms', label: 'STARK (excl. trace)', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_execute_preflight_time_ms', label: 'Preflight Execution', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_trace_gen_time_ms', label: 'Trace Gen', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_other_ms', label: 'Other / Overlap', fmt: fmtSeconds, indent: 1, muted: true },\n    { key: 'leaf_proof_time_ms', label: 'Leaf Recursion', fmt: fmtSeconds, indent: 0 },\n    { key: 'inner_recursion_proof_time_ms', label: 'Inner Recursion', fmt: fmtSeconds, indent: 0 },\n    { key: 'total_proof_time_ms', label: 'Total', fmt: fmtSeconds, indent: 0, bold: true },\n];\n\nconst PROOF_TIME_ROWS_V2 = [\n    { key: 'execute_metered_time_ms', label: 'Metered Execution', fmt: fmtSeconds, indent: 0 },\n    { key: 'app_proof_time_ms', label: 'App Proof Time', fmt: fmtSeconds, indent: 0 },\n    { key: 'app_proof_time_excluding_trace_ms', label: 'STARK (excl. trace)', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_rap_constraints_time_ms', label: 'Constraints', fmt: fmtSeconds, indent: 2 },\n    { key: 'app_rap_logup_gkr_time_ms', label: 'LogUp GKR', fmt: fmtSeconds, indent: 3 },\n    { key: 'app_rap_round0_time_ms', label: 'Round 0', fmt: fmtSeconds, indent: 3 },\n    { key: 'app_rap_mle_rounds_time_ms', label: 'MLE Rounds', fmt: fmtSeconds, indent: 3 },\n    { key: 'app_rap_other_ms', label: 'Other', fmt: fmtSeconds, indent: 3, muted: true },\n    { key: 'app_openings_time_ms', label: 'Openings', fmt: fmtSeconds, indent: 2 },\n    { key: 'app_openings_whir_time_ms', label: 'WHIR', fmt: fmtSeconds, indent: 3 },\n    { key: 'app_openings_stacked_reduction_time_ms', label: 'Stacked Reduction', fmt: fmtSeconds, indent: 3 },\n    { key: 'app_openings_other_ms', label: 'Other', fmt: fmtSeconds, indent: 3, muted: true },\n    { key: 'app_trace_commit_time_ms', label: 'Trace Commit', fmt: fmtSeconds, indent: 2 },\n    { key: 'app_stark_other_ms', label: 'Other', fmt: fmtSeconds, indent: 2, muted: true },\n    { key: 'app_execute_preflight_time_ms', label: 'Preflight Execution', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_set_initial_memory_time_ms', label: 'Set Initial Memory', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_trace_gen_time_ms', label: 'Trace Gen', fmt: fmtSeconds, indent: 1 },\n    { key: 'app_other_ms', label: 'Other', fmt: fmtSeconds, indent: 1, muted: true },\n    { key: 'leaf_proof_time_ms', label: 'Leaf Recursion', fmt: fmtSeconds, indent: 0 },\n    { key: 'inner_recursion_proof_time_ms', label: 'Inner Recursion', fmt: fmtSeconds, indent: 0 },\n    { key: 'compression_proof_time_ms', label: 'Compression', fmt: fmtSeconds, indent: 0 },\n    { key: 'total_proof_time_ms', label: 'Total', fmt: fmtSeconds, indent: 0, bold: true },\n];\n\nfunction getBasicStatsRows() {\n    return detectedOpenVmVersion === 2 ? BASIC_STATS_ROWS_V2 : BASIC_STATS_ROWS_V1;\n}\n\nfunction getProofTimeRows() {\n    return detectedOpenVmVersion === 2 ? PROOF_TIME_ROWS_V2 : PROOF_TIME_ROWS_V1;\n}\n\nconst CELL_DISTRIBUTION_ROWS = [\n    { key: 'powdr_ratio', label: 'Powdr', fmt: fmtPct, noCompare: true },\n    { key: 'normal_instruction_ratio', label: 'Normal Instructions', fmt: fmtPct, noCompare: true },\n    { key: 'openvm_precompile_ratio', label: 'OpenVM Precompiles', fmt: fmtPct, noCompare: true },\n];\n\nlet tableSortKey = 'name';\nlet tableSortAsc = true;\n\nfunction getBaselineMetrics() {\n    return allMetrics.find(m => m.name === baselineRunName) || null;\n}\n\nfunction selectBaseline(runName) {\n    baselineRunName = runName;\n    renderTableBody(allMetrics);\n    if (selectedRunName) renderDetails(selectedRunName);\n    updateUrl();\n}\n\nfunction createSummaryTable(metrics) {\n    const table = document.getElementById('summaryTable');\n    const thead = table.querySelector('thead tr');\n    const tbody = table.querySelector('tbody');\n\n    thead.innerHTML = '';\n    // Baseline radio column\n    const thBaseline = document.createElement('th');\n    thBaseline.textContent = 'Baseline';\n    thBaseline.classList.add('baseline-radio');\n    thead.appendChild(thBaseline);\n\n    TABLE_COLUMNS.forEach(col => {\n        const th = document.createElement('th');\n        th.textContent = col.label;\n        th.onclick = () => {\n            if (tableSortKey === col.key) {\n                tableSortAsc = !tableSortAsc;\n            } else {\n                tableSortKey = col.key;\n                tableSortAsc = true;\n            }\n            renderTableBody(metrics);\n        };\n        thead.appendChild(th);\n    });\n\n    renderTableBody(metrics);\n}\n\nfunction renderTableBody(metrics) {\n    const tbody = document.querySelector('#summaryTable tbody');\n    tbody.innerHTML = '';\n    const baseline = getBaselineMetrics();\n\n    const sorted = [...metrics].sort((a, b) => {\n        const va = a[tableSortKey], vb = b[tableSortKey];\n        const cmp = typeof va === 'string' ? va.localeCompare(vb) : va - vb;\n        return tableSortAsc ? cmp : -cmp;\n    });\n\n    sorted.forEach(m => {\n        const tr = document.createElement('tr');\n        if (m.name === selectedRunName) tr.classList.add('selected');\n        tr.onclick = (e) => { if (e.target.tagName !== 'INPUT') selectRun(m.name); };\n\n        // Baseline radio\n        const tdRadio = document.createElement('td');\n        tdRadio.classList.add('baseline-radio');\n        const radio = document.createElement('input');\n        radio.type = 'radio';\n        radio.name = 'baseline';\n        radio.checked = m.name === baselineRunName;\n        radio.onclick = (e) => { e.stopPropagation(); selectBaseline(m.name); };\n        tdRadio.appendChild(radio);\n        tr.appendChild(tdRadio);\n\n        TABLE_COLUMNS.forEach(col => {\n            const td = document.createElement('td');\n            const raw = m[col.key];\n            const formatted = col.fmt(raw);\n            if (raw != null && typeof raw === 'number') td.title = raw.toLocaleString();\n            if (col.key === 'name' || !baseline || m.name === baselineRunName) {\n                td.textContent = formatted;\n            } else {\n                td.innerHTML = formatted + ' ' + fmtComparison(raw, baseline[col.key]);\n            }\n            tr.appendChild(td);\n        });\n        tbody.appendChild(tr);\n    });\n}\n\nfunction updateTableSelection() {\n    document.querySelectorAll('#summaryTable tbody tr').forEach(tr => {\n        const name = tr.querySelectorAll('td')[1]?.textContent;\n        tr.classList.toggle('selected', name === selectedRunName);\n    });\n}\n\n// ============================================================\n// Experiment Details (details table + pie chart)\n// ============================================================\nfunction renderDetailTable(title, rows, m, opts = {}) {\n    const baseline = getBaselineMetrics();\n    const tableId = 'detail-' + title.replace(/\\s+/g, '-').toLowerCase();\n\n    // Filter out negligible \"other\" rows\n    const visibleRows = rows.filter(row => {\n        if (row.muted) {\n            const val = m[row.key];\n            return val != null && Math.abs(val) >= 1;\n        }\n        return true;\n    });\n\n    // Determine which rows are parents (have children at a deeper indent)\n    const parentIndices = new Set();\n    for (let i = 0; i < visibleRows.length; i++) {\n        const indent = visibleRows[i].indent || 0;\n        if (i + 1 < visibleRows.length && (visibleRows[i + 1].indent || 0) > indent) {\n            parentIndices.add(i);\n        }\n    }\n\n    // Default expand level: indent 0 parents are expanded (their indent-1 children visible),\n    // deeper parents are collapsed. User overrides via expandState take precedence.\n    const defaultExpandLevel = opts.defaultExpandLevel != null ? opts.defaultExpandLevel : 0;\n\n    // Pre-compute whether each parent is expanded, respecting persistent expandState.\n    const parentExpanded = new Map(); // index -> boolean\n    for (const i of parentIndices) {\n        const stateKey = tableId + ':' + visibleRows[i].key;\n        if (stateKey in expandState) {\n            parentExpanded.set(i, expandState[stateKey]);\n        } else {\n            parentExpanded.set(i, (visibleRows[i].indent || 0) <= defaultExpandLevel);\n        }\n    }\n\n    // Determine visibility: a row is a collapsed-child if any ancestor parent is collapsed.\n    function isHidden(rowIndex) {\n        const indent = visibleRows[rowIndex].indent || 0;\n        if (indent === 0) return false;\n        // Walk backwards to find ancestor parents\n        for (let j = rowIndex - 1; j >= 0; j--) {\n            const pIndent = visibleRows[j].indent || 0;\n            if (pIndent < indent && parentIndices.has(j)) {\n                if (!parentExpanded.get(j)) return true;\n                // Check this parent's ancestors too\n                return isHidden(j);\n            }\n        }\n        return false;\n    }\n\n    let html = `<h6 style=\"color:#333; font-weight:600; margin-bottom:0.3rem; margin-top:0.75rem\">${title}</h6>`;\n    html += `<table class=\"details-table\" id=\"${tableId}\"><tbody>`;\n    visibleRows.forEach((row, i) => {\n        const indent = row.indent || 0;\n        const val = m[row.key];\n        const isParent = parentIndices.has(i);\n\n        const classes = [];\n        if (isParent) classes.push('collapsible-parent');\n        if (isParent && !parentExpanded.get(i)) classes.push('collapsed');\n        if (isHidden(i)) classes.push('collapsed-child');\n\n        const padding = indent > 0 ? `padding-left:${indent * 1.2 + 0.5}em` : '';\n        const weight = row.bold ? 'font-weight:700' : '';\n        const muted = row.muted ? 'color:#999; font-style:italic' : '';\n        const thStyle = [padding, weight, muted].filter(Boolean).join(';');\n        const tdStyle = [weight, muted].filter(Boolean).join(';');\n        let formatted;\n        if (val == null) {\n            formatted = '<span style=\"color:#999\">N/A</span>';\n        } else if (row.muted && val < 0) {\n            formatted = `<span style=\"color:#c08000\">${fmtSeconds(-val)} overlap</span>`;\n        } else {\n            formatted = row.fmt(val, m);\n        }\n        const comparison = (baseline && m.name !== baselineRunName && !row.noCompare && !row.muted)\n            ? ' ' + fmtComparison(val, baseline[row.key])\n            : '';\n        const arrow = isParent ? `<span class=\"toggle-arrow\">&#9660;</span> ` : '';\n        const info = METRIC_INFO[row.key];\n        let infoIcon = '';\n        if (info) {\n            const ver = detectedOpenVmVersion || 1;\n            const desc = (info['descV' + ver] || info.desc || '').replace(/\"/g, '&quot;');\n            const code = (info['codeV' + ver] || info.code || '').replace(/\"/g, '&quot;');\n            infoIcon = `<span class=\"metric-info\" data-desc=\"${desc}\" data-code=\"${code}\">i</span>`;\n        }\n        const classAttr = classes.length ? ` class=\"${classes.join(' ')}\"` : '';\n        const dataKey = isParent ? ` data-key=\"${row.key}\"` : '';\n        html += `<tr${classAttr} data-indent=\"${indent}\"${dataKey}>`;\n        html += `<th${thStyle ? ` style=\"${thStyle}\"` : ''}>${arrow}${row.label}${infoIcon}</th>`;\n        const rawTitle = (val != null && typeof val === 'number') ? ` title=\"${val.toLocaleString()}\"` : '';\n        html += `<td${tdStyle ? ` style=\"${tdStyle}\"` : ''}${rawTitle}>${formatted}${comparison}</td>`;\n        if (opts.pctOfKey) {\n            const total = m[opts.pctOfKey];\n            const pct = (total > 0 && val != null) ? (val / total * 100).toFixed(1) + '%' : '';\n            html += `<td style=\"color:#888; text-align:right${weight ? ';' + weight : ''}\">${pct}</td>`;\n        }\n        html += '</tr>';\n    });\n    html += '</tbody></table>';\n    return html;\n}\n\n// Shared tooltip element for metric info icons.\nconst metricInfoTip = document.createElement('div');\nmetricInfoTip.className = 'metric-info-tooltip';\ndocument.body.appendChild(metricInfoTip);\n\ndocument.addEventListener('mouseenter', function(e) {\n    if (!e.target.classList || !e.target.classList.contains('metric-info')) return;\n    const icon = e.target;\n    const desc = icon.dataset.desc || '';\n    const code = icon.dataset.code || '';\n    metricInfoTip.innerHTML = desc + (code ? '<code>' + highlightPython(code) + '</code>' : '');\n    metricInfoTip.style.display = 'block';\n    const rect = icon.getBoundingClientRect();\n    let left = rect.right + 6;\n    let top = rect.top - 4;\n    if (left + 510 > window.innerWidth) left = rect.left - 510;\n    if (top + metricInfoTip.offsetHeight > window.innerHeight) top = window.innerHeight - metricInfoTip.offsetHeight - 8;\n    if (top < 4) top = 4;\n    metricInfoTip.style.left = left + 'px';\n    metricInfoTip.style.top = top + 'px';\n}, true);\ndocument.addEventListener('mouseleave', function(e) {\n    if (!e.target.classList || !e.target.classList.contains('metric-info')) return;\n    metricInfoTip.style.display = 'none';\n}, true);\n\n// Persistent expand/collapse state: maps \"tableId:rowKey\" -> true (expanded) | false (collapsed).\n// null means \"use default\". Once a user clicks, the choice sticks across experiment switches.\nconst expandState = {};\n\n// Toggle collapsible rows: clicking a parent shows/hides its children.\ndocument.addEventListener('click', function(e) {\n    if (e.target.closest('.metric-info')) return;\n    const parentRow = e.target.closest('tr.collapsible-parent');\n    if (!parentRow) return;\n    const tbody = parentRow.closest('tbody');\n    if (!tbody) return;\n\n    const parentIndent = parseInt(parentRow.dataset.indent) || 0;\n    const isCollapsing = !parentRow.classList.contains('collapsed');\n    parentRow.classList.toggle('collapsed');\n\n    // Persist expand/collapse state\n    const tableId = parentRow.closest('table')?.id;\n    const rowKey = parentRow.dataset.key;\n    if (tableId && rowKey) expandState[tableId + ':' + rowKey] = !isCollapsing;\n\n    // Walk subsequent rows: toggle children (indent > parentIndent),\n    // stop when we hit a row at parentIndent or shallower.\n    let sibling = parentRow.nextElementSibling;\n    while (sibling) {\n        const sibIndent = parseInt(sibling.dataset.indent) || 0;\n        if (sibIndent <= parentIndent) break;\n        if (isCollapsing) {\n            sibling.classList.add('collapsed-child');\n            // Also collapse any nested parents\n            if (sibling.classList.contains('collapsible-parent')) {\n                sibling.classList.add('collapsed');\n            }\n        } else {\n            // Only show direct children (indent == parentIndent + 1).\n            // Deeper rows stay hidden unless their own parent is expanded.\n            if (sibIndent === parentIndent + 1) {\n                sibling.classList.remove('collapsed-child');\n            }\n        }\n        sibling = sibling.nextElementSibling;\n    }\n});\n\nfunction renderDetails(runName) {\n    const section = document.getElementById('detailsSection');\n    section.innerHTML = '';\n    document.getElementById('pieRunName').textContent = runName ? `(${runName})` : '';\n\n    if (!runName || !combinedData[runName]) {\n        section.innerHTML = '<div class=\"pie-placeholder\">Select an experiment to view details</div>';\n        return;\n    }\n\n    const m = allMetrics.find(m => m.name === runName);\n    if (!m) return;\n\n    // Three detail tables + pie chart side by side\n    let html = '<div class=\"details-row\">';\n    html += '<div class=\"details-table-col\">';\n    html += renderDetailTable('App Proof Basic Stats', getBasicStatsRows(), m);\n    html += renderDetailTable('Proof Time', getProofTimeRows(), m, { pctOfKey: 'total_proof_time_ms', defaultExpandLevel: 0 });\n    html += renderDetailTable('Trace Cell Distribution', CELL_DISTRIBUTION_ROWS, m);\n    html += '</div>';\n    html += '<div class=\"details-pie-col\"><h6 style=\"color:#333; font-weight:600; margin-bottom:0.5rem; margin-top:0.75rem\">Trace Cells by AIR</h6><div id=\"pieChart\"></div></div>';\n    html += '</div>';\n    section.innerHTML = html;\n\n    // Pie chart\n    createPieChart(runName);\n}\n\nfunction createPieChart(runName) {\n    const container = d3.select('#pieChart');\n    container.selectAll('*').remove();\n\n    const { items, total } = computeCellsByAir(combinedData[runName]);\n    if (items.length === 0) {\n        container.append('div').attr('class', 'pie-placeholder').text('No cell data available');\n        return;\n    }\n\n    const size = 320;\n    const radius = size / 2 - 10;\n    const svg = container.append('svg')\n        .attr('width', size)\n        .attr('height', size + items.length * 18 + 20);\n    const g = svg.append('g').attr('transform', `translate(${size/2},${size/2})`);\n\n    const POWDR_COLOR = '#e03e1a';\n    const nonPowdrItems = items.filter(d => !d.name.startsWith('PowdrAir'));\n    const spectralColors = d3.quantize(t => d3.interpolateSpectral(t * 0.8 + 0.1), Math.max(nonPowdrItems.length, 3));\n    const colorMap = {};\n    let ci = 0;\n    items.forEach(d => {\n        colorMap[d.name] = d.name.startsWith('PowdrAir') ? POWDR_COLOR : spectralColors[ci++];\n    });\n    const colors = name => colorMap[name];\n\n    const pie = d3.pie().value(d => d.value).sort(null);\n    const arc = d3.arc().innerRadius(0).outerRadius(radius);\n\n    const arcs = g.selectAll('arc')\n        .data(pie(items))\n        .enter().append('g');\n\n    arcs.append('path')\n        .attr('d', arc)\n        .attr('fill', d => colors(d.data.name))\n        .attr('stroke', 'white')\n        .attr('stroke-width', 1.5);\n\n    // Labels for big slices\n    const labelArcs = arcs.filter(d => d.data.value / total > 0.05);\n    labelArcs.append('text')\n        .attr('transform', d => `translate(${arc.centroid(d)})`)\n        .attr('text-anchor', 'middle')\n        .attr('fill', '#333')\n        .attr('font-size', '10px')\n        .attr('font-weight', 'bold')\n        .each(function(d) {\n            const pct = (d.data.value / total * 100).toFixed(1);\n            const billions = (d.data.value / 1e9).toFixed(2);\n            d3.select(this).append('tspan').attr('x', 0).attr('dy', '-0.4em').text(`${pct}%`);\n            d3.select(this).append('tspan').attr('x', 0).attr('dy', '1.2em').text(`${billions}B`);\n        });\n\n    // Legend below pie\n    const legendG = svg.append('g')\n        .attr('transform', `translate(10, ${size + 10})`);\n\n    items.forEach((item, i) => {\n        const lg = legendG.append('g').attr('transform', `translate(0, ${i * 18})`);\n        lg.append('rect').attr('width', 12).attr('height', 12).attr('fill', colors(item.name)).attr('rx', 2);\n        const pct = (item.value / total * 100).toFixed(1);\n        lg.append('text').attr('x', 18).attr('y', 10)\n            .attr('fill', '#333').attr('font-size', '11px')\n            .text(`${pct}% - ${item.name}`);\n    });\n}\n\n// ============================================================\n// Selection\n// ============================================================\nfunction selectRun(runName) {\n    if (selectedRunName === runName) {\n        selectedRunName = null;\n    } else {\n        selectedRunName = runName;\n    }\n    updateBarSelection();\n    updateTableSelection();\n    renderDetails(selectedRunName);\n    updateUrl();\n}\n\n// ============================================================\n// Data Loading\n// ============================================================\nfunction processAndRender(data, sourceLabel = '') {\n    combinedData = normalizeMetricsData(data, sourceLabel);\n    detectedOpenVmVersion = detectOpenVmVersion(combinedData);\n    allMetrics = Object.entries(combinedData).map(([name, json]) => extractMetrics(name, json));\n\n    if (allMetrics.length === 1) {\n        selectedRunName = allMetrics[0].name;\n    } else if (selectedRunName && !allMetrics.find(m => m.name === selectedRunName)) {\n        selectedRunName = null;\n    }\n\n    // Set default baseline if not already set from URL\n    if (!baselineRunName || !allMetrics.find(m => m.name === baselineRunName)) {\n        const apc000 = allMetrics.find(m => m.name === 'apc000');\n        baselineRunName = apc000 ? 'apc000' : (allMetrics[0]?.name || null);\n    }\n\n    const badge = document.getElementById('versionBadge');\n    badge.textContent = `OpenVM ${detectedOpenVmVersion}`;\n    badge.style.display = 'inline';\n\n    document.getElementById('uploadSection').style.display = 'none';\n    document.getElementById('appSection').style.display = 'block';\n\n    createSummaryTable(allMetrics);\n    renderCurrentChart();\n    renderDetails(selectedRunName);\n    updateUrl();\n}\n\nfunction handleFile(file) {\n    const reader = new FileReader();\n    reader.onload = e => {\n        try {\n            const data = JSON.parse(e.target.result);\n            updateDataSourceDisplay(file.name);\n            processAndRender(data, file.name);\n        } catch (err) {\n            alert('Failed to parse JSON: ' + err.message);\n        }\n    };\n    reader.readAsText(file);\n}\n\nasync function loadFromUrl(url, updateUrlParam = true) {\n    // Convert GitHub blob URLs to raw\n    const rawUrl = url.replace(\n        /github\\.com\\/([^/]+)\\/([^/]+)\\/blob\\/([^/]+)\\/(.*)/,\n        'raw.githubusercontent.com/$1/$2/$3/$4'\n    );\n\n    document.getElementById('loadingOverlay').style.display = 'flex';\n    try {\n        const resp = await fetch(rawUrl);\n        if (!resp.ok) throw new Error(`HTTP ${resp.status}`);\n        const data = await resp.json();\n        updateDataSourceDisplay(url);\n        if (updateUrlParam) {\n            const params = new URLSearchParams(window.location.search);\n            params.set('data', url);\n            history.replaceState(null, '', window.location.pathname + '?' + params.toString());\n        }\n        processAndRender(data, url);\n    } catch (err) {\n        alert('Failed to load: ' + err.message);\n    } finally {\n        document.getElementById('loadingOverlay').style.display = 'none';\n    }\n}\n\n// ============================================================\n// URL Parameters\n// ============================================================\nfunction updateUrl() {\n    const params = new URLSearchParams(window.location.search);\n    if (selectedRunName) {\n        params.set('run', selectedRunName);\n    } else {\n        params.delete('run');\n    }\n    if (baselineRunName) {\n        params.set('baseline', baselineRunName);\n    } else {\n        params.delete('baseline');\n    }\n    const newUrl = window.location.pathname + '?' + params.toString();\n    history.replaceState(null, '', newUrl);\n}\n\n// ============================================================\n// Event Handlers\n// ============================================================\nconst dropzone = document.getElementById('dropzone');\nconst fileInput = document.getElementById('fileInput');\n\ndropzone.addEventListener('click', () => fileInput.click());\ndropzone.addEventListener('dragover', e => { e.preventDefault(); dropzone.classList.add('drag-over'); });\ndropzone.addEventListener('dragleave', () => dropzone.classList.remove('drag-over'));\ndropzone.addEventListener('drop', e => {\n    e.preventDefault();\n    dropzone.classList.remove('drag-over');\n    if (e.dataTransfer.files.length) handleFile(e.dataTransfer.files[0]);\n});\nfileInput.addEventListener('change', () => { if (fileInput.files.length) handleFile(fileInput.files[0]); });\n\ndocument.getElementById('urlLoadBtn').addEventListener('click', () => {\n    const url = document.getElementById('urlInput').value.trim();\n    if (url) loadFromUrl(url);\n});\ndocument.getElementById('urlInput').addEventListener('keydown', e => {\n    if (e.key === 'Enter') {\n        const url = e.target.value.trim();\n        if (url) loadFromUrl(url);\n    }\n});\n\n// Responsive chart redraw\nwindow.addEventListener('resize', () => {\n    if (allMetrics.length > 0) {\n        renderCurrentChart();\n        if (selectedRunName) renderDetails(selectedRunName);\n    }\n});\n\n// Auto-load from URL params\n(function init() {\n    const params = new URLSearchParams(window.location.search);\n    const dataUrl = params.get('data') || params.get('url');\n    const run = params.get('run');\n    const baseline = params.get('baseline');\n    if (run) selectedRunName = run;\n    if (baseline) baselineRunName = baseline;\n    if (dataUrl) loadFromUrl(dataUrl, false);\n})();\n</script>\n</body>\n</html>\n"
  },
  {
    "path": "openvm/metrics-viewer/spec.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nAudit script for OpenVM metrics viewer.\n\nRecomputes the experiment details table from a metrics JSON file,\nprinting all computed values so they can be verified against the web UI.\n\nUsage:\n    python3 audit_metrics.py <metrics_file_or_url> [experiment_name]\n\nThe source can be a local file path or an HTTP(S) URL.\nGitHub blob URLs are auto-converted to raw URLs.\n\nIf the input is a combined metrics file and no experiment is given,\nlists available experiments.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport re\nimport sys\nimport urllib.request\nfrom typing import Any, Callable, Literal\n\n# A single flattened metric entry: {\"group\": ..., \"air_name\": ..., \"air_id\": ..., \"metric\": ..., \"value\": ..., ...}\nEntry = dict[str, str]\n# Raw metrics JSON with \"counter\" and \"gauge\" arrays\nMetricsJson = dict[str, Any]\n# Computed metrics dict returned by extract_metrics\nMetrics = dict[str, Any]\n\n\n# ============================================================\n# Computation — this is the code that needs to be audited.\n# It must match the JS in index.html exactly.\n# ============================================================\n\ndef load_metrics_dataframes(\n    metrics_json: MetricsJson,\n) -> tuple[list[Entry], list[Entry], list[Entry], list[Entry], list[Entry]]:\n    \"\"\"Port of loadMetricsDataframes: flatten entries, split by group prefix.\"\"\"\n    entries: list[Entry] = []\n    for c in metrics_json[\"counter\"] + metrics_json[\"gauge\"]:\n        obj = dict(c[\"labels\"])\n        obj[\"metric\"] = c[\"metric\"]\n        obj[\"value\"] = c[\"value\"]\n        entries.append(obj)\n\n    app = [e for e in entries if e.get(\"group\", \"\").startswith(\"app_proof\")]\n    if not app:\n        app = [e for e in entries if e.get(\"group\", \"\").startswith(\"reth\")]\n    leaf = [e for e in entries if e.get(\"group\", \"\").startswith(\"leaf\")]\n    internal = [e for e in entries if e.get(\"group\", \"\").startswith(\"internal\")]\n    compression = [e for e in entries if e.get(\"group\", \"\") == \"compression\"]\n\n    return entries, app, leaf, internal, compression\n\n\ndef is_normal_instruction_air(name: str) -> bool:\n    \"\"\"Port of isNormalInstructionAir.\"\"\"\n    m = re.match(r\"^VmAirWrapper<[^,]+,\\s*([^>]+?)(?:<(\\d+)(?:,\\s*\\d+)*>)?\\s*>$\", name or \"\")\n    if not m:\n        return False\n    if m.group(1) == \"FieldExpressionCoreAir\":\n        return False\n    if m.group(2) and int(m.group(2)) != 4:\n        return False\n    return True\n\n\ndef sum_metric(entries: list[Entry], metric_name: str) -> float:\n    \"\"\"Sum values for entries matching metric_name.\"\"\"\n    return sum(float(e[\"value\"]) for e in entries if e[\"metric\"] == metric_name)\n\n\ndef unique_metric(entries: list[Entry], metric_name: str) -> float:\n    \"\"\"Get the value of a metric that must appear exactly once.\"\"\"\n    matches = [e for e in entries if e[\"metric\"] == metric_name]\n    assert len(matches) == 1, f\"Expected exactly 1 entry for '{metric_name}', found {len(matches)}\"\n    return float(matches[0][\"value\"])\n\n\ndef detect_version(metrics_json: MetricsJson) -> Literal[1, 2]:\n    \"\"\"Returns 2 if any metric name contains 'logup_gkr' (V2-only), else 1.\"\"\"\n    names = {e[\"metric\"] for e in metrics_json[\"counter\"] + metrics_json[\"gauge\"]}\n    return 2 if any(\"logup_gkr\" in n for n in names) else 1\n\n\ndef extract_metrics(run_name: str, metrics_json: MetricsJson) -> Metrics:\n    \"\"\"Port of extractMetrics from index.html. Returns dict of all computed values.\"\"\"\n    all_entries, app, leaf, internal, compression = load_metrics_dataframes(metrics_json)\n    m: Metrics = {}\n    m[\"name\"] = run_name\n\n    # --- Classify app AIRs ---\n    powdr_air = [e for e in app if (e.get(\"air_name\") or \"\").startswith(\"PowdrAir\")]\n    non_powdr = [e for e in app if not (e.get(\"air_name\") or \"\").startswith(\"PowdrAir\")]\n    normal_air = [e for e in non_powdr if is_normal_instruction_air(e.get(\"air_name\", \"\"))]\n    precompile_air = [e for e in non_powdr if not is_normal_instruction_air(e.get(\"air_name\", \"\"))]\n\n    # --- Basic stats ---\n    m[\"app_proof_cols\"] = sum_metric(app, \"main_cols\") + sum_metric(app, \"prep_cols\") + sum_metric(app, \"perm_cols\")\n    segments = [int(e[\"segment\"]) for e in app if \"segment\" in e]\n    m[\"num_segments\"] = max(segments, default=-1) + 1\n    m[\"num_air_instances\"] = len([e for e in app if e[\"metric\"] == \"rows\"])\n    m[\"app_proof_cells\"] = sum_metric(app, \"total_cells\")\n    m[\"app_proof_cells_used\"] = sum_metric(app, \"total_cells_used\")  # V1 only\n\n    # --- Constraints & bus interactions ---\n    has_constraints = any(e[\"metric\"] == \"constraints\" for e in all_entries)\n    has_interactions = any(e[\"metric\"] == \"interactions\" for e in all_entries)\n\n    # Rows & segments by AIR, summed over all segments.\n    # We key by (air_id, air_name) because air_id alone is only unique within a proving\n    # phase — different phases (app, leaf, compression) reuse the same air_id for\n    # unrelated AIRs. Keying by the pair is a pragmatic fix: it would break if the same\n    # (air_id, air_name) tuple appeared in two different phases, but that is unlikely\n    # since each phase uses a distinct AIR set.\n    segments_by_app_air: dict[str, float] = {}\n    rows_by_app_air: dict[str, float] = {}\n    for e in app:\n        # Rows are indicated per segment and AIR\n        if e[\"metric\"] == \"rows\":\n            key = f\"{e['air_id']}:{e.get('air_name', '')}\"\n            segments_by_app_air[key] = segments_by_app_air.get(key, 0) + 1\n            rows_by_app_air[key] = rows_by_app_air.get(key, 0) + float(e[\"value\"])\n\n    # Constraints and interactions are listed per AIR.\n    # For the number of constraints and interactions, we weight by the number of segments for that AIR;\n    # for the number of instances and messages, we weight by the number of rows (across all segments).\n    def weighted_sum(metric_name: str, weights: dict[str, float]) -> float:\n        return sum(\n            float(e[\"value\"]) * weights.get(f\"{e['air_id']}:{e.get('air_name', '')}\", 0)\n            for e in all_entries if e[\"metric\"] == metric_name\n        )\n\n    m[\"constraints\"] = weighted_sum(\"constraints\", segments_by_app_air) if has_constraints else None\n    m[\"bus_interactions\"] = weighted_sum(\"interactions\", segments_by_app_air) if has_interactions else None\n    m[\"constraint_instances\"] = weighted_sum(\"constraints\", rows_by_app_air) if has_constraints else None\n    m[\"bus_interaction_messages\"] = weighted_sum(\"interactions\", rows_by_app_air) if has_interactions else None\n\n    # --- Proof times by phase ---\n    # execute_metered runs *before* segment proving and is outside per-segment\n    # total_proof_time_ms. We report it as a separate top-level phase.\n    m[\"execute_metered_time_ms\"] = sum_metric(app, \"execute_metered_time_ms\")\n    m[\"app_proof_time_ms\"] = sum_metric(app, \"total_proof_time_ms\")\n    m[\"leaf_proof_time_ms\"] = sum_metric(leaf, \"total_proof_time_ms\")\n    m[\"inner_recursion_proof_time_ms\"] = sum_metric(internal, \"total_proof_time_ms\")\n    m[\"compression_proof_time_ms\"] = sum_metric(compression, \"total_proof_time_ms\")\n    m[\"total_proof_time_ms\"] = (m[\"execute_metered_time_ms\"] + m[\"app_proof_time_ms\"]\n        + m[\"leaf_proof_time_ms\"] + m[\"inner_recursion_proof_time_ms\"]\n        + m[\"compression_proof_time_ms\"])\n\n    # --- STARK time excluding trace ---\n    m[\"app_proof_time_excluding_trace_ms\"] = sum_metric(app, \"stark_prove_excluding_trace_time_ms\")\n\n    # --- App time sub-components ---\n    m[\"app_execute_preflight_time_ms\"] = sum_metric(app, \"execute_preflight_time_ms\")\n    m[\"app_trace_gen_time_ms\"] = sum_metric(app, \"trace_gen_time_ms\")\n    m[\"app_set_initial_memory_time_ms\"] = sum_metric(app, \"set_initial_memory_time_ms\")  # V2 only\n\n    # --- V2: STARK sub-components (prover.*) ---\n    m[\"app_trace_commit_time_ms\"] = sum_metric(app, \"prover.main_trace_commit_time_ms\")\n    m[\"app_rap_constraints_time_ms\"] = sum_metric(app, \"prover.rap_constraints_time_ms\")\n    m[\"app_openings_time_ms\"] = sum_metric(app, \"prover.openings_time_ms\")\n    m[\"app_stark_other_ms\"] = (m[\"app_proof_time_excluding_trace_ms\"]\n        - m[\"app_trace_commit_time_ms\"] - m[\"app_rap_constraints_time_ms\"] - m[\"app_openings_time_ms\"])\n\n    # --- V2: rap_constraints sub-components ---\n    m[\"app_rap_logup_gkr_time_ms\"] = sum_metric(app, \"prover.rap_constraints.logup_gkr_time_ms\")\n    m[\"app_rap_round0_time_ms\"] = sum_metric(app, \"prover.rap_constraints.round0_time_ms\")\n    m[\"app_rap_mle_rounds_time_ms\"] = sum_metric(app, \"prover.rap_constraints.mle_rounds_time_ms\")\n    m[\"app_rap_other_ms\"] = (m[\"app_rap_constraints_time_ms\"]\n        - m[\"app_rap_logup_gkr_time_ms\"] - m[\"app_rap_round0_time_ms\"] - m[\"app_rap_mle_rounds_time_ms\"])\n\n    # --- V2: openings sub-components ---\n    m[\"app_openings_whir_time_ms\"] = sum_metric(app, \"prover.openings.whir_time_ms\")\n    m[\"app_openings_stacked_reduction_time_ms\"] = sum_metric(app, \"prover.openings.stacked_reduction_time_ms\")\n    m[\"app_openings_other_ms\"] = (m[\"app_openings_time_ms\"]\n        - m[\"app_openings_whir_time_ms\"] - m[\"app_openings_stacked_reduction_time_ms\"])\n\n    # --- App other (residual) ---\n    # execute_metered is a separate top-level phase, not inside app_proof_time_ms.\n    m[\"app_other_ms\"] = (m[\"app_proof_time_ms\"]\n        - m[\"app_proof_time_excluding_trace_ms\"]\n        - m[\"app_execute_preflight_time_ms\"]\n        - m[\"app_trace_gen_time_ms\"] - m[\"app_set_initial_memory_time_ms\"])\n\n    # --- Cell ratios ---\n    total = m[\"app_proof_cells\"]\n    m[\"powdr_ratio\"] = sum_metric(powdr_air, \"cells\") / total if total > 0 else 0\n    m[\"normal_instruction_ratio\"] = sum_metric(normal_air, \"cells\") / total if total > 0 else 0\n    m[\"openvm_precompile_ratio\"] = sum_metric(precompile_air, \"cells\") / total if total > 0 else 0\n\n    return m\n\n\n# ============================================================\n# Presentation — formatting and printing (not part of audit)\n# ============================================================\n\nFormatter = Callable[[float], str]\n\n# Basic stats row: (key, label, formatter)\nBasicRow = tuple[str, str, Formatter]\n# Proof time row: (key, label, indent, flags)  — flags: b=bold, r=residual\nProofRow = tuple[str, str, int, str]\n# Union of row types used by print_section\nRow = BasicRow | ProofRow\n\n\ndef fmt_ms(ms: float) -> str:\n    return f\"{ms / 1000:.2f}s ({ms:.0f} ms)\"\n\ndef fmt_cells(v: float) -> str:\n    for threshold, suffix in [(1e9, \"B\"), (1e6, \"M\"), (1e3, \"K\")]:\n        if v >= threshold:\n            return f\"{v / threshold:.2f}{suffix} ({v:,.0f})\"\n    return f\"{v:,.0f}\"\n\ndef fmt_int(v: float) -> str:\n    return f\"{v:,.0f}\"\n\ndef fmt_pct(v: float) -> str:\n    return f\"{v * 100:.1f}%\"\n\n\nBASIC_STATS_V1: list[BasicRow] = [\n    (\"num_segments\",            \"Segments\",                     lambda v: str(int(v))),\n    (\"num_air_instances\",       \"AIR Instances\",                fmt_int),\n    (\"app_proof_cols\",          \"Columns\",                  fmt_int),\n    (\"app_proof_cells\",         \"Cells\",                    fmt_cells),\n    (\"app_proof_cells_used\",    \"Cells (without padding)\",  fmt_cells),\n    (\"constraints\",             \"Constraints\",                  fmt_int),\n    (\"constraint_instances\",    \"Constraint Instances\",         fmt_cells),\n    (\"bus_interactions\",        \"Bus Interactions\",              fmt_int),\n    (\"bus_interaction_messages\", \"Bus Interaction Messages\",     fmt_cells),\n]\n\nBASIC_STATS_V2: list[BasicRow] = [r for r in BASIC_STATS_V1 if r[0] != \"app_proof_cells_used\"]\n\nPROOF_TIME_V1: list[ProofRow] = [\n    (\"execute_metered_time_ms\",          \"Metered Execution\",     0, \"\"),\n    (\"app_proof_time_ms\",                \"App Proof Time\",        0, \"\"),\n    (\"app_proof_time_excluding_trace_ms\",\"  STARK (excl. trace)\", 1, \"\"),\n    (\"app_execute_preflight_time_ms\",    \"  Preflight Execution\", 1, \"\"),\n    (\"app_trace_gen_time_ms\",            \"  Trace Gen\",           1, \"\"),\n    (\"app_other_ms\",                     \"  Other / Overlap\",     1, \"r\"),\n    (\"leaf_proof_time_ms\",               \"Leaf Recursion\",        0, \"\"),\n    (\"inner_recursion_proof_time_ms\",    \"Inner Recursion\",       0, \"\"),\n    (\"total_proof_time_ms\",              \"Total\",                 0, \"\"),\n]\n\nPROOF_TIME_V2: list[ProofRow] = [\n    (\"execute_metered_time_ms\",              \"Metered Execution\",     0, \"\"),\n    (\"app_proof_time_ms\",                    \"App Proof Time\",        0, \"\"),\n    (\"app_proof_time_excluding_trace_ms\",    \"  STARK (excl. trace)\", 1, \"\"),\n    (\"app_rap_constraints_time_ms\",          \"    Constraints\",       2, \"\"),\n    (\"app_rap_logup_gkr_time_ms\",           \"      LogUp GKR\",       3, \"\"),\n    (\"app_rap_round0_time_ms\",              \"      Round 0\",         3, \"\"),\n    (\"app_rap_mle_rounds_time_ms\",          \"      MLE Rounds\",      3, \"\"),\n    (\"app_rap_other_ms\",                    \"      Other\",           3, \"r\"),\n    (\"app_openings_time_ms\",                \"    Openings\",          2, \"\"),\n    (\"app_openings_whir_time_ms\",           \"      WHIR\",            3, \"\"),\n    (\"app_openings_stacked_reduction_time_ms\",\"      Stacked Reduction\", 3, \"\"),\n    (\"app_openings_other_ms\",               \"      Other\",           3, \"r\"),\n    (\"app_trace_commit_time_ms\",            \"    Trace Commit\",      2, \"\"),\n    (\"app_stark_other_ms\",                  \"    Other\",             2, \"r\"),\n    (\"app_execute_preflight_time_ms\",       \"  Preflight Execution\", 1, \"\"),\n    (\"app_set_initial_memory_time_ms\",      \"  Set Initial Memory\",  1, \"\"),\n    (\"app_trace_gen_time_ms\",               \"  Trace Gen\",           1, \"\"),\n    (\"app_other_ms\",                        \"  Other\",               1, \"r\"),\n    (\"leaf_proof_time_ms\",                  \"Leaf Recursion\",        0, \"\"),\n    (\"inner_recursion_proof_time_ms\",       \"Inner Recursion\",       0, \"\"),\n    (\"compression_proof_time_ms\",           \"Compression\",           0, \"\"),\n    (\"total_proof_time_ms\",                 \"Total\",                 0, \"\"),\n]\n\nCELL_DISTRIBUTION: list[BasicRow] = [\n    (\"powdr_ratio\",               \"Powdr\",              fmt_pct),\n    (\"normal_instruction_ratio\",  \"Normal Instructions\", fmt_pct),\n    (\"openvm_precompile_ratio\",   \"OpenVM Precompiles\",  fmt_pct),\n]\n\n\ndef print_section(\n    title: str, rows: list[Row], m: Metrics, *, pct_of_key: str | None = None\n) -> None:\n    print(f\"\\n  {title}\")\n    print(f\"  {'─' * 58}\")\n    width = max(len(r[1]) for r in rows)\n    total = m.get(pct_of_key, 0) if pct_of_key else 0\n\n    for row in rows:\n        key, label = row[0], row[1]\n        val: float | None = m.get(key)\n\n        if key == \"total_proof_time_ms\":\n            print(f\"  {'─' * 58}\")\n\n        if val is None:\n            print(f\"  {label:<{width}}  N/A\")\n            continue\n\n        # Determine formatter and flags\n        if len(row) == 3:\n            fmt: Formatter = row[2]  # type: ignore[assignment]\n            flags = \"\"\n        else:\n            fmt = fmt_ms\n            flags: str = row[3]  # type: ignore[no-redef]\n\n        suffix = \" (residual)\" if \"r\" in flags else \"\"\n\n        pct = f\"  ({val / total * 100:5.1f}%)\" if total > 0 else \"\"\n        print(f\"  {label:<{width}}  {fmt(val)}{pct}{suffix}\")\n\n\n# ============================================================\n# Data loading (IO)\n# ============================================================\n\ndef load_data(source: str) -> tuple[dict[str, Any], str]:\n    \"\"\"Load JSON from a file path or URL. Returns (data, source_label).\"\"\"\n    if source.startswith(\"http://\") or source.startswith(\"https://\"):\n        url = re.sub(r\"github\\.com/(.+)/blob/\", r\"raw.githubusercontent.com/\\1/\", source)\n        with urllib.request.urlopen(url) as resp:\n            return json.loads(resp.read()), url.split(\"/\")[-1]\n    else:\n        with open(source) as f:\n            return json.load(f), source.split(\"/\")[-1]\n\n\ndef resolve_experiments(\n    data: dict[str, Any], source_label: str, experiment: str | None\n) -> dict[str, MetricsJson]:\n    \"\"\"Normalize raw/combined input and select experiment(s). Returns dict of {name: json}.\"\"\"\n    if \"counter\" in data and \"gauge\" in data:\n        name = experiment or source_label.replace(\".json\", \"\")\n        return {name: data}\n\n    if experiment:\n        if experiment not in data:\n            sys.exit(f\"Error: '{experiment}' not found. Available: {', '.join(sorted(data))}\")\n        return {experiment: data[experiment]}\n\n    if len(data) == 1:\n        return data\n\n    print(f\"Combined file with {len(data)} experiments:\")\n    for name in sorted(data):\n        print(f\"  - {name}\")\n    print(f\"\\nUsage: {sys.argv[0]} {sys.argv[1]} <experiment_name>\")\n    sys.exit(0)\n\n\n# ============================================================\n# Main\n# ============================================================\n\ndef main() -> None:\n    if len(sys.argv) < 2:\n        print(__doc__.strip())\n        sys.exit(1)\n\n    data, source_label = load_data(sys.argv[1])\n    experiment = sys.argv[2] if len(sys.argv) > 2 else None\n    runs = resolve_experiments(data, source_label, experiment)\n\n    for run_name, metrics_json in runs.items():\n        version = detect_version(metrics_json)\n        m = extract_metrics(run_name, metrics_json)\n\n        print(f\"\\nExperiment: {run_name}  (OpenVM {version})\")\n\n        basic = BASIC_STATS_V2 if version == 2 else BASIC_STATS_V1\n        proof = PROOF_TIME_V2 if version == 2 else PROOF_TIME_V1\n\n        print_section(\"App Proof Basic Stats\", basic, m)\n        print_section(\"Proof Time\", proof, m, pct_of_key=\"total_proof_time_ms\")\n        print_section(\"Trace Cell Distribution\", CELL_DISTRIBUTION, m)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "openvm/src/air_builder.rs",
    "content": "use std::sync::Arc;\n\nuse openvm_stark_backend::air_builders::symbolic::get_symbolic_builder;\nuse openvm_stark_backend::air_builders::symbolic::SymbolicRapBuilder;\nuse openvm_stark_backend::config::Com;\nuse openvm_stark_backend::config::StarkGenericConfig;\nuse openvm_stark_backend::config::Val;\nuse openvm_stark_backend::interaction::RapPhaseSeqKind;\nuse openvm_stark_backend::keygen::types::ProverOnlySinglePreprocessedData;\nuse openvm_stark_backend::keygen::types::TraceWidth;\nuse openvm_stark_backend::keygen::types::VerifierSinglePreprocessedData;\nuse openvm_stark_backend::p3_commit::Pcs;\nuse openvm_stark_backend::p3_matrix::Matrix;\nuse openvm_stark_backend::rap::AnyRap;\n\npub struct PrepKeygenData<SC: StarkGenericConfig> {\n    pub _verifier_data: Option<VerifierSinglePreprocessedData<Com<SC>>>,\n    pub prover_data: Option<ProverOnlySinglePreprocessedData<SC>>,\n}\n\npub struct AirKeygenBuilder<SC: StarkGenericConfig> {\n    air: Arc<dyn AnyRap<SC>>,\n    prep_keygen_data: PrepKeygenData<SC>,\n}\n\nfn compute_prep_data_for_air<SC: StarkGenericConfig>(\n    pcs: &SC::Pcs,\n    air: &dyn AnyRap<SC>,\n) -> PrepKeygenData<SC> {\n    let preprocessed_trace = air.preprocessed_trace();\n    let vpdata_opt = preprocessed_trace.map(|trace| {\n        let domain = pcs.natural_domain_for_degree(trace.height());\n        let (commit, data) = pcs.commit(vec![(domain, trace.clone())]);\n        let vdata = VerifierSinglePreprocessedData { commit };\n        let pdata = ProverOnlySinglePreprocessedData {\n            trace: Arc::new(trace),\n            data: Arc::new(data),\n        };\n        (vdata, pdata)\n    });\n    if let Some((vdata, pdata)) = vpdata_opt {\n        PrepKeygenData {\n            prover_data: Some(pdata),\n            _verifier_data: Some(vdata),\n        }\n    } else {\n        PrepKeygenData {\n            prover_data: None,\n            _verifier_data: None,\n        }\n    }\n}\n\nimpl<SC: StarkGenericConfig> AirKeygenBuilder<SC> {\n    pub fn new(pcs: &SC::Pcs, air: Arc<dyn AnyRap<SC>>) -> Self {\n        let prep_keygen_data = compute_prep_data_for_air(pcs, air.as_ref());\n        AirKeygenBuilder {\n            air,\n            prep_keygen_data,\n        }\n    }\n\n    pub fn get_symbolic_builder(\n        &self,\n        max_constraint_degree: Option<usize>,\n    ) -> SymbolicRapBuilder<Val<SC>> {\n        let width = TraceWidth {\n            preprocessed: self.prep_keygen_data.width(),\n            cached_mains: self.air.cached_main_widths(),\n            common_main: self.air.common_main_width(),\n            after_challenge: vec![],\n        };\n        get_symbolic_builder(\n            self.air.as_ref(),\n            &width,\n            &[],\n            &[],\n            RapPhaseSeqKind::None,\n            max_constraint_degree.unwrap_or(0),\n        )\n    }\n}\n\nimpl<SC: StarkGenericConfig> PrepKeygenData<SC> {\n    pub fn width(&self) -> Option<usize> {\n        self.prover_data.as_ref().map(|d| d.trace.width())\n    }\n}\n"
  },
  {
    "path": "openvm/src/cuda_abi.rs",
    "content": "#![cfg(feature = \"cuda\")]\n\nuse openvm_cuda_backend::base::DeviceMatrix;\nuse openvm_cuda_common::{d_buffer::DeviceBuffer, error::CudaError};\nuse openvm_stark_backend::prover::hal::MatrixDimensions;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\n\nextern \"C\" {\n    /// Launches the GPU kernel that maps original AIR traces into the APC trace buffer.\n    ///\n    /// Safety: All pointers must be valid device pointers for the specified lengths.\n    pub fn _apc_tracegen(\n        d_output: *mut BabyBear,             // column-major\n        output_height: usize,                // H_out\n        d_original_airs: *const OriginalAir, // device array of AIR metadata\n        d_subs: *const Subst,                // device array of all substitutions\n        n_subs: usize,                       // number of substitutions\n        num_apc_calls: i32,                  // number of APC calls\n    ) -> i32;\n\n    /// Applies derived expression columns on the GPU.\n    /// Each thread processes rows; for rows >= num_apc_calls, writes zeros.\n    /// Safety: All device pointers must be valid for the specified lengths.\n    pub fn _apc_apply_derived_expr(\n        d_output: *mut BabyBear,         // APC trace matrix (column-major)\n        output_height: usize,            // rows (height)\n        num_apc_calls: i32,              // number of valid rows\n        d_specs: *const DerivedExprSpec, // device array of derived expression specs\n        n_cols: usize,                   // number of derived columns\n        d_bytecode: *const u32,          // device bytecode buffer\n    ) -> i32;\n\n    /// Launches the GPU kernel that applies bus interactions to periphery histograms.\n    ///\n    /// Safety: All pointers must be valid device pointers for the specified lengths.\n    pub fn _apc_apply_bus(\n        // APC related\n        d_output: *const BabyBear, // APC trace buffer (column-major), device pointer\n        num_apc_calls: i32,        // number of APC calls (rows to process)\n\n        // Interaction related\n        d_bytecode: *const u32, // device bytecode buffer for stack-machine expressions\n        bytecode_len: usize,    // length of bytecode buffer (u32 words)\n        d_interactions: *const DevInteraction, // device array of interactions\n        n_interactions: usize,  // number of interactions\n        d_arg_spans: *const ExprSpan, // device array of arg spans into `d_bytecode`\n        n_arg_spans: usize,     // number of arg spans\n\n        // Variable range checker related\n        var_range_bus_id: u32, // bus id for the variable range checker\n        d_var_hist: *mut u32,  // device histogram for variable range checker\n        var_num_bins: usize,   // number of bins in variable range histogram\n\n        // Tuple range checker related\n        tuple2_bus_id: u32,      // bus id for the 2-tuple range checker\n        d_tuple2_hist: *mut u32, // device histogram for tuple2 checker\n        tuple2_sz0: u32,         // tuple2 dimension 0 size\n        tuple2_sz1: u32,         // tuple2 dimension 1 size\n\n        // Bitwise related\n        bitwise_bus_id: u32,      // bus id for the bitwise lookup\n        d_bitwise_hist: *mut u32, // device histogram for bitwise lookup\n    ) -> i32;\n}\n\n#[repr(C)]\n#[derive(Clone, Copy, Debug)]\npub struct OriginalAir {\n    pub width: i32,              // number of columns\n    pub height: i32,             // number of rows (Ha)\n    pub buffer: *const BabyBear, // column-major base: col*height + row (device ptr)\n    pub row_block_size: i32,     // stride between used rows\n}\n\n#[repr(C)]\n#[derive(Clone, Copy, Debug)]\npub struct Subst {\n    /// Index of the source AIR in `d_original_airs`\n    pub air_index: i32,\n    /// Source column within this AIR\n    pub col: i32,\n    /// Base row offset within the row-block\n    pub row: i32,\n    /// Destination APC column\n    pub apc_col: i32,\n}\n\n#[repr(C)]\n#[derive(Clone, Copy)]\npub struct DerivedExprSpec {\n    /// Precomputed destination APC column base = (apc_col_index * H)\n    pub col_base: u64,\n    /// Expression span inside the shared bytecode buffer\n    pub span: ExprSpan,\n}\n\npub fn apc_tracegen(\n    output: &mut DeviceMatrix<BabyBear>,      // column-major\n    original_airs: DeviceBuffer<OriginalAir>, // device array of AIR metadata\n    substitutions: DeviceBuffer<Subst>,       // device array of all substitutions\n    num_apc_calls: usize,\n) -> Result<(), CudaError> {\n    let output_height = output.height();\n\n    unsafe {\n        CudaError::from_result(_apc_tracegen(\n            output.buffer().as_mut_ptr(),\n            output_height,\n            original_airs.as_ptr(),\n            substitutions.as_ptr(),\n            substitutions.len(),\n            num_apc_calls as i32,\n        ))\n    }\n}\n\n/// High-level wrapper for `_apc_apply_derived_expr`.\n/// Applies derived arbitrary expression columns using the GPU stack machine.\npub fn apc_apply_derived_expr(\n    output: &mut DeviceMatrix<BabyBear>,\n    specs: DeviceBuffer<DerivedExprSpec>,\n    bytecode: DeviceBuffer<u32>,\n    num_apc_calls: usize,\n) -> Result<(), CudaError> {\n    unsafe {\n        CudaError::from_result(_apc_apply_derived_expr(\n            output.buffer().as_mut_ptr(),\n            output.height(),\n            num_apc_calls as i32,\n            specs.as_ptr(),\n            specs.len(),\n            bytecode.as_ptr(),\n        ))\n    }\n}\n\n/// OpCode enum for the GPU stack machine bus evaluator.\n#[repr(u32)]\npub enum OpCode {\n    PushApc = 0, // Push the APC value onto the stack. Must be followed by the index of the value in the APC device buffer.\n    PushConst = 1, // Push a constant value onto the stack. Must be followed by the constant value.\n    Add = 2,     // Add the top two values on the stack.\n    Sub = 3,     // Subtract the top two values on the stack.\n    Mul = 4,     // Multiply the top two values on the stack.\n    Neg = 5,     // Negate the top value on the stack.\n    InvOrZero = 6, // Invert the top value on the stack if it is not zero, otherwise pop and push zero.\n}\n\n/// GPU device representation of a bus interaction.\n#[repr(C)]\n#[derive(Clone, Copy)]\npub struct DevInteraction {\n    /// Bus id this interaction targets (matches periphery chip bus id)\n    pub bus_id: u32,\n    /// Number of argument expressions for this interaction\n    pub num_args: u32,\n    /// Starting index into the `ExprSpan` array for this interaction's args\n    /// Layout: [ multiplicity span, arg0, arg1, ... ]\n    pub args_index_off: u32,\n}\n\n#[repr(C)]\n#[derive(Clone, Copy)]\npub struct ExprSpan {\n    /// Offset (in u32 words) into `bytecode` where this arg expression starts\n    pub off: u32,\n    /// Length (instruction count) of this arg expression\n    pub len: u32,\n}\n\n/// High-level safe wrapper for `_apc_apply_bus`. Applies bus interactions on the GPU,\n/// updating periphery histograms in-place.\n#[allow(clippy::too_many_arguments)]\npub fn apc_apply_bus(\n    // APC related\n    output: &DeviceMatrix<BabyBear>, // APC trace matrix (column-major) on device\n    num_apc_calls: usize,            // number of APC calls (rows to process)\n\n    // Interaction related\n    bytecode: DeviceBuffer<u32>,                // device bytecode buffer\n    interactions: DeviceBuffer<DevInteraction>, // device array of interactions\n    arg_spans: DeviceBuffer<ExprSpan>,          // device array of arg spans\n\n    // Variable range checker related\n    var_range_bus_id: u32, // bus id for variable range checker\n    var_range_count: &DeviceBuffer<BabyBear>, // device histogram for variable range\n\n    // Tuple range checker related\n    tuple2_bus_id: u32, // bus id for tuple range checker (2-ary)\n    tuple2_count: &DeviceBuffer<BabyBear>, // device histogram for tuple2\n    tuple2_sizes: [u32; 2], // tuple2 sizes (dim0, dim1)\n\n    // Bitwise related\n    bitwise_bus_id: u32,                    // bus id for bitwise lookup\n    bitwise_count: &DeviceBuffer<BabyBear>, // device histogram for bitwise lookup\n) -> Result<(), CudaError> {\n    unsafe {\n        CudaError::from_result(_apc_apply_bus(\n            // APC related\n            output.buffer().as_ptr(),\n            num_apc_calls as i32,\n            // Interaction related\n            bytecode.as_ptr(),\n            bytecode.len(),\n            interactions.as_ptr(),\n            interactions.len(),\n            arg_spans.as_ptr(),\n            arg_spans.len(),\n            // Variable range checker related\n            var_range_bus_id,\n            var_range_count.as_mut_ptr() as *mut u32,\n            var_range_count.len(),\n            // Tuple range checker related\n            tuple2_bus_id,\n            tuple2_count.as_mut_ptr() as *mut u32,\n            tuple2_sizes[0],\n            tuple2_sizes[1],\n            // Bitwise related\n            bitwise_bus_id,\n            bitwise_count.as_mut_ptr() as *mut u32,\n        ))\n    }\n}\n"
  },
  {
    "path": "openvm/src/customize_exe.rs",
    "content": "use std::fmt::Display;\nuse std::hash::Hash;\nuse std::iter::once;\nuse std::marker::PhantomData;\nuse std::sync::Arc;\n\nuse crate::extraction_utils::{get_air_metrics, AirWidthsDiff, OriginalAirs};\nuse crate::isa::OpenVmISA;\nuse crate::powdr_extension::chip::PowdrAir;\nuse crate::program::Prog;\nuse crate::OriginalCompiledProgram;\nuse crate::{CompiledProgram, SpecializedConfig};\nuse itertools::Itertools;\nuse openvm_circuit::arch::VmState;\nuse openvm_circuit::system::memory::online::GuestMemory;\nuse openvm_instructions::instruction::Instruction as OpenVmInstruction;\nuse openvm_instructions::program::DEFAULT_PC_STEP;\nuse openvm_instructions::VmOpcode;\nuse openvm_stark_backend::p3_field::{FieldAlgebra, PrimeField32};\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::adapter::{\n    Adapter, AdapterApc, AdapterApcWithStats, ApcWithStats, PgoAdapter,\n};\nuse powdr_autoprecompiles::blocks::{Instruction, PcStep};\nuse powdr_autoprecompiles::empirical_constraints::EmpiricalConstraints;\nuse powdr_autoprecompiles::execution::ExecutionState;\nuse powdr_autoprecompiles::pgo::ApcCandidate;\nuse powdr_autoprecompiles::PowdrConfig;\nuse powdr_autoprecompiles::{InstructionHandler, VmConfig};\nuse powdr_number::{BabyBearField, FieldElement, LargeInt};\nuse powdr_openvm_bus_interaction_handler::bus_map::OpenVmBusType;\nuse serde::{Deserialize, Serialize};\n\nuse crate::powdr_extension::{PowdrOpcode, PowdrPrecompile};\n\npub use powdr_openvm_bus_interaction_handler::{\n    memory_bus_interaction::OpenVmMemoryBusInteraction, OpenVmBusInteractionHandler,\n};\n\npub const POWDR_OPCODE: usize = 0x10ff;\n\n/// An adapter for the BabyBear OpenVM precompiles.\n/// Note: This could be made generic over the field, but the implementation of `Candidate` is BabyBear-specific.\n/// The lifetime parameter is used because we use a reference to the `OpenVmProgram` in the `Prog` type.\npub struct BabyBearOpenVmApcAdapter<'a, ISA> {\n    _marker: std::marker::PhantomData<&'a ISA>,\n}\n\n/// The openvm execution state, used for execution constraint checking\npub struct OpenVmExecutionState<'a, F, ISA> {\n    inner: &'a VmState<F, GuestMemory>,\n    _marker: PhantomData<ISA>,\n}\n\nimpl<'a, F: PrimeField32, ISA> From<&'a VmState<F, GuestMemory>>\n    for OpenVmExecutionState<'a, F, ISA>\n{\n    fn from(inner: &'a VmState<F, GuestMemory>) -> Self {\n        Self {\n            inner,\n            _marker: PhantomData,\n        }\n    }\n}\n// TODO: This is not tested yet as apc compilation does not currently output any optimistic constraints\nimpl<'a, F: PrimeField32, ISA: OpenVmISA> ExecutionState for OpenVmExecutionState<'a, F, ISA> {\n    type RegisterAddress = ();\n    type Value = u32;\n\n    fn pc(&self) -> Self::Value {\n        self.inner.pc()\n    }\n\n    fn reg(&self, _addr: &Self::RegisterAddress) -> Self::Value {\n        unimplemented!(\"optimistic constraints are currently unused\")\n    }\n\n    fn value_limb(_value: Self::Value, _limb_index: usize) -> Self::Value {\n        unimplemented!(\"optimistic constraints are currently unused\")\n    }\n\n    fn global_clk(&self) -> usize {\n        unimplemented!(\"optimistic constraints are currently unused\")\n    }\n}\n\nimpl<'a, ISA: OpenVmISA> Adapter for BabyBearOpenVmApcAdapter<'a, ISA> {\n    type PowdrField = BabyBearField;\n    type Field = BabyBear;\n    type InstructionHandler = OriginalAirs<Self::Field, ISA>;\n    type BusInteractionHandler = OpenVmBusInteractionHandler<Self::PowdrField>;\n    type Program = Prog<'a, Self::Field>;\n    type Instruction = Instr<Self::Field, ISA>;\n    type MemoryBusInteraction<V: Ord + Clone + Eq + Display + Hash> =\n        OpenVmMemoryBusInteraction<Self::PowdrField, V>;\n    type CustomBusTypes = OpenVmBusType;\n    type ApcStats = OvmApcStats;\n    type AirId = String;\n    type ExecutionState = OpenVmExecutionState<'a, BabyBear, ISA>;\n\n    fn into_field(e: Self::PowdrField) -> Self::Field {\n        openvm_stark_sdk::p3_baby_bear::BabyBear::from_canonical_u32(\n            e.to_integer().try_into_u32().unwrap(),\n        )\n    }\n\n    fn from_field(e: Self::Field) -> Self::PowdrField {\n        BabyBearField::from(e.as_canonical_u32())\n    }\n\n    fn apc_stats(\n        apc: Arc<AdapterApc<Self>>,\n        instruction_handler: &Self::InstructionHandler,\n    ) -> Self::ApcStats {\n        // Get the metrics for the apc using the same degree bound as the one used for the instruction chips\n        let apc_metrics = get_air_metrics(\n            Arc::new(PowdrAir::new(apc.machine.clone())),\n            instruction_handler.degree_bound().identities,\n        );\n        let width_after = apc_metrics.widths;\n\n        // Sum up the metrics for each instruction\n        let width_before = apc\n            .instructions()\n            .map(|instr| {\n                instruction_handler\n                    .get_instruction_metrics(instr.inner.opcode)\n                    .unwrap()\n                    .widths\n            })\n            .sum();\n\n        OvmApcStats::new(AirWidthsDiff::new(width_before, width_after))\n    }\n\n    fn is_allowed(instruction: &Self::Instruction) -> bool {\n        ISA::allowed_opcodes().contains(&instruction.inner.opcode)\n    }\n\n    fn is_branching(instruction: &Self::Instruction) -> bool {\n        ISA::branching_opcodes().contains(&instruction.inner.opcode)\n    }\n}\n\n#[derive(Serialize, Deserialize)]\npub struct Instr<F, ISA> {\n    pub inner: OpenVmInstruction<F>,\n    _marker: PhantomData<ISA>,\n}\n\nimpl<F, ISA> From<OpenVmInstruction<F>> for Instr<F, ISA> {\n    fn from(value: OpenVmInstruction<F>) -> Self {\n        Self {\n            inner: value,\n            _marker: PhantomData,\n        }\n    }\n}\n\n// TODO: derive, probably the compiler being too conservative here\nimpl<F, ISA> Clone for Instr<F, ISA>\nwhere\n    OpenVmInstruction<F>: Clone,\n{\n    fn clone(&self) -> Self {\n        Self {\n            inner: self.inner.clone(),\n            _marker: PhantomData,\n        }\n    }\n}\n\nimpl<F: PrimeField32, ISA: OpenVmISA> Display for Instr<F, ISA> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", ISA::format(&self.inner))\n    }\n}\n\nimpl<F, ISA: OpenVmISA> PcStep for Instr<F, ISA> {\n    fn pc_step() -> u32 {\n        DEFAULT_PC_STEP\n    }\n}\n\nimpl<F: PrimeField32, ISA: OpenVmISA> Instruction<F> for Instr<F, ISA> {\n    fn pc_lookup_row(&self, pc: u64) -> Vec<F> {\n        let args = [\n            self.inner.opcode.to_field(),\n            self.inner.a,\n            self.inner.b,\n            self.inner.c,\n            self.inner.d,\n            self.inner.e,\n            self.inner.f,\n            self.inner.g,\n        ];\n        // The PC lookup row has the format:\n        // [pc, opcode, a, b, c, d, e, f, g]\n        let pc = F::from_canonical_u32(pc.try_into().unwrap());\n        once(pc).chain(args).collect()\n    }\n}\n\npub fn customize<'a, ISA: OpenVmISA, P: PgoAdapter<Adapter = BabyBearOpenVmApcAdapter<'a, ISA>>>(\n    original_program: OriginalCompiledProgram<ISA>,\n    config: PowdrConfig,\n    pgo: P,\n    empirical_constraints: EmpiricalConstraints,\n) -> CompiledProgram<ISA> {\n    let original_config = original_program.vm_config.clone();\n    let airs = original_config.airs(config.degree_bound).expect(\"Failed to convert the AIR of an OpenVM instruction, even after filtering by the blacklist!\");\n    let bus_map = original_config.bus_map();\n\n    let vm_config = VmConfig {\n        instruction_handler: &airs,\n        bus_interaction_handler: OpenVmBusInteractionHandler::new(bus_map.clone()),\n        bus_map: bus_map.clone(),\n    };\n\n    let symbols = ISA::get_symbol_table(&original_program.linked_program);\n    let blocks = original_program.collect_basic_blocks();\n    tracing::info!(\n        \"Got {} basic blocks from `collect_basic_blocks`\",\n        blocks.len()\n    );\n    if tracing::enabled!(tracing::Level::DEBUG) {\n        tracing::debug!(\"Basic blocks sorted by execution count (top 10):\");\n        for (count, block) in blocks\n            .iter()\n            .filter_map(|block| Some((pgo.pc_execution_count(block.start_pc)?, block)))\n            .sorted_by_key(|(count, _)| *count)\n            .rev()\n            .take(10)\n        {\n            let name = symbols\n                .try_get_one_or_preceding(block.start_pc)\n                .map(|(symbol, offset)| format!(\"{} + {offset}\", symbol))\n                .unwrap_or_default();\n            tracing::debug!(\"Basic block (executed {count} times), {name}:\\n{block}\",);\n        }\n    }\n\n    let symbols = symbols\n        .into_table()\n        .into_iter()\n        .map(|(key, values)| (key.into(), values))\n        .collect();\n\n    let exe = original_program.exe;\n    let start = std::time::Instant::now();\n    let apcs = pgo.filter_blocks_and_create_apcs_with_pgo(\n        blocks,\n        &config,\n        vm_config,\n        symbols,\n        empirical_constraints.apply_pc_threshold(),\n    );\n    metrics::gauge!(\"total_apc_gen_time_ms\").set(start.elapsed().as_millis() as f64);\n\n    let pc_base = exe.program.pc_base;\n    let pc_step = DEFAULT_PC_STEP;\n    // We need to clone the program because we need to modify it to add the apc instructions.\n    let mut exe = (*exe).clone();\n    let program = &mut exe.program;\n\n    tracing::info!(\"Adjust the program with the autoprecompiles\");\n\n    let extensions = apcs\n        .into_iter()\n        .map(ApcWithStats::into_parts)\n        .enumerate()\n        .map(|(i, (apc, apc_stats, _))| {\n            let opcode = POWDR_OPCODE + i;\n            let start_pc = apc\n                .block\n                .try_as_basic_block()\n                .expect(\"Superblocks not yet supported in OpenVM\")\n                .start_pc;\n            let start_index = ((start_pc - pc_base as u64) / pc_step as u64)\n                .try_into()\n                .unwrap();\n\n            // We encode in the program that the prover should execute the apc instruction instead of the original software version.\n            // This is only for witgen: the program in the program chip is left unchanged.\n            program.add_apc_instruction_at_pc_index(start_index, VmOpcode::from_usize(opcode));\n\n            PowdrPrecompile::new(\n                format!(\"PowdrAutoprecompile_{}\", start_pc),\n                PowdrOpcode {\n                    class_offset: opcode,\n                },\n                apc,\n                apc_stats,\n            )\n        })\n        .collect();\n\n    CompiledProgram {\n        exe: Arc::new(exe),\n        vm_config: SpecializedConfig::new(original_config, extensions, config.degree_bound),\n    }\n}\n\n#[derive(Clone, Serialize, Deserialize)]\npub struct OvmApcStats {\n    pub widths: AirWidthsDiff,\n}\n\nimpl OvmApcStats {\n    pub fn new(widths: AirWidthsDiff) -> Self {\n        Self { widths }\n    }\n}\n\n#[derive(Serialize, Deserialize)]\npub struct OpenVmApcCandidate<ISA: OpenVmISA>(\n    ApcWithStats<BabyBear, Instr<BabyBear, ISA>, (), u32, OvmApcStats>,\n);\n\nimpl<'a, ISA: OpenVmISA> ApcCandidate<BabyBearOpenVmApcAdapter<'a, ISA>>\n    for OpenVmApcCandidate<ISA>\n{\n    fn create(apc_with_stats: AdapterApcWithStats<BabyBearOpenVmApcAdapter<'a, ISA>>) -> Self {\n        Self(apc_with_stats)\n    }\n\n    fn inner(&self) -> &AdapterApcWithStats<BabyBearOpenVmApcAdapter<'a, ISA>> {\n        &self.0\n    }\n\n    fn into_inner(self) -> AdapterApcWithStats<BabyBearOpenVmApcAdapter<'a, ISA>> {\n        self.0\n    }\n\n    fn cost_before_opt(&self) -> usize {\n        self.0.stats().widths.before.total()\n    }\n\n    fn cost_after_opt(&self) -> usize {\n        self.0.stats().widths.after.total()\n    }\n\n    fn value_per_use(&self) -> usize {\n        self.cost_before_opt() - self.cost_after_opt()\n    }\n}\n"
  },
  {
    "path": "openvm/src/empirical_constraints.rs",
    "content": "use crate::isa::OpenVmISA;\nuse crate::program::CompiledProgram;\nuse crate::trace_generation::do_with_cpu_trace;\nuse indicatif::ProgressBar;\nuse indicatif::ProgressStyle;\nuse itertools::Itertools;\nuse openvm_circuit::arch::VmCircuitConfig;\nuse openvm_sdk::StdIn;\nuse openvm_stark_backend::p3_field::FieldAlgebra;\nuse openvm_stark_backend::p3_maybe_rayon::prelude::IntoParallelIterator;\nuse openvm_stark_backend::p3_maybe_rayon::prelude::ParallelIterator;\nuse openvm_stark_sdk::openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::bus_map::BusType;\nuse powdr_autoprecompiles::empirical_constraints::BlockCell;\nuse powdr_autoprecompiles::empirical_constraints::Partition;\nuse powdr_autoprecompiles::empirical_constraints::{DebugInfo, EmpiricalConstraints};\nuse powdr_autoprecompiles::expression::AlgebraicEvaluator;\nuse powdr_autoprecompiles::expression::RowEvaluator;\nuse powdr_autoprecompiles::optimistic::config::optimistic_precompile_config;\nuse powdr_autoprecompiles::DegreeBound;\nuse powdr_openvm_bus_interaction_handler::bus_map::default_openvm_bus_map;\nuse std::collections::btree_map::Entry;\nuse std::collections::BTreeMap;\nuse std::collections::HashMap;\nuse std::iter::once;\n\nuse crate::OriginalCompiledProgram;\n\n#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]\nstruct Timestamp {\n    // Note that the order of the fields matters for correct ordering.\n    segment_idx: usize,\n    value: u32,\n}\n\n/// A single row in the execution trace\n#[derive(Debug)]\nstruct Row {\n    /// The program counter value for this row\n    pc: u32,\n    /// The timestamp for this row (segment index, row index within segment)\n    timestamp: Timestamp,\n    /// The values of the cells in this row\n    cells: Vec<u32>,\n}\n\n/// Materialized execution trace\n#[derive(Default)]\nstruct Trace {\n    /// The raw rows, in any order\n    rows: Vec<Row>,\n}\n\nimpl Trace {\n    /// Groups rows by their program counter value. The order of rows within each PC group is arbitrary.\n    fn rows_by_pc(&self) -> BTreeMap<u32, Vec<&Row>> {\n        self.rows.iter().fold(BTreeMap::new(), |mut acc, row| {\n            acc.entry(row.pc).or_insert(Vec::new()).push(row);\n            acc\n        })\n    }\n\n    /// Returns all rows sorted by their timestamp\n    fn rows_sorted_by_time(&self) -> impl Iterator<Item = &Row> {\n        self.rows.iter().sorted_by_key(|row| &row.timestamp)\n    }\n\n    fn take(&mut self) -> Self {\n        Self {\n            rows: std::mem::take(&mut self.rows),\n        }\n    }\n}\n\npub fn detect_empirical_constraints<ISA: OpenVmISA>(\n    program: &OriginalCompiledProgram<ISA>,\n    degree_bound: DegreeBound,\n    inputs: Vec<StdIn>,\n) -> EmpiricalConstraints {\n    tracing::info!(\"Collecting empirical constraints...\");\n    let blocks = program.collect_basic_blocks();\n    let instruction_counts = blocks\n        .iter()\n        .map(|block| (block.start_pc, block.instructions.len()))\n        .collect();\n\n    // Collect trace, without any autoprecompiles.\n    let program = program.compiled_program(degree_bound);\n\n    let mut constraint_detector = ConstraintDetector::new(instruction_counts);\n\n    let num_inputs = inputs.len();\n    for (i, input) in inputs.into_iter().enumerate() {\n        tracing::info!(\"  Processing input {} / {}\", i + 1, num_inputs);\n        detect_empirical_constraints_from_input(\n            &program,\n            i,\n            input,\n            degree_bound,\n            &mut constraint_detector,\n        );\n    }\n    tracing::info!(\"Done collecting empirical constraints.\");\n\n    constraint_detector.finalize()\n}\n\nfn detect_empirical_constraints_from_input<ISA: OpenVmISA>(\n    program: &CompiledProgram<ISA>,\n    input_index: usize,\n    inputs: StdIn,\n    degree_bound: DegreeBound,\n    constraint_detector: &mut ConstraintDetector,\n) {\n    let mut trace = Trace::default();\n    let mut debug_info = DebugInfo::default();\n\n    let max_segments = optimistic_precompile_config().max_segments;\n\n    do_with_cpu_trace(program, inputs, |seg_idx, vm, _pk, ctx| {\n        let airs = program.vm_config.original.airs(degree_bound).unwrap();\n        let global_airs = vm\n            .config()\n            .create_airs()\n            .unwrap()\n            .into_airs()\n            .enumerate()\n            .collect::<HashMap<_, _>>();\n\n        for (air_id, proving_context) in &ctx.per_air {\n            let main = proving_context.common_main.as_ref().unwrap();\n            let air_name = global_airs[air_id].name();\n            let Some(machine) = &airs.get_air_machine(&air_name) else {\n                // air_name_to_machine only contains instruction AIRs, and we are only\n                // interested in those here.\n                continue;\n            };\n            assert!(\n                proving_context.cached_mains.is_empty(),\n                \"Unexpected cached main in {air_name}.\"\n            );\n\n            // Find the execution bus interaction\n            // This assumes there is exactly one, which is the case for instruction chips\n            let execution_bus_interaction = machine\n                .bus_interactions\n                .iter()\n                .find(|interaction| {\n                    interaction.id\n                        == default_openvm_bus_map()\n                            .get_bus_id(&BusType::ExecutionBridge)\n                            .unwrap()\n                })\n                .unwrap();\n\n            if !debug_info.column_names_by_air_id.contains_key(air_id) {\n                debug_info.column_names_by_air_id.insert(\n                    *air_id,\n                    machine.main_columns().map(|r| (*r.name).clone()).collect(),\n                );\n            }\n\n            for row in main.row_slices() {\n                // Create an evaluator over this row\n                let evaluator = RowEvaluator::new(row);\n\n                // Evaluate the execution bus interaction\n                let execution = evaluator.eval_bus_interaction(execution_bus_interaction);\n\n                // `is_valid` is the multiplicity\n                let is_valid = execution.mult;\n                if is_valid == BabyBear::ZERO {\n                    // If `is_valid` is zero, this is a padding row\n                    continue;\n                }\n\n                // Recover the values of the pc and timestamp\n                let [pc, timestamp] = execution\n                    .args\n                    .map(|v| v.as_canonical_u32())\n                    .collect_vec()\n                    .try_into()\n                    .unwrap();\n\n                // Convert the row to u32s\n                let row = row.iter().map(|v| v.as_canonical_u32()).collect();\n\n                let row = Row {\n                    cells: row,\n                    pc,\n                    timestamp: Timestamp {\n                        segment_idx: seg_idx,\n                        value: timestamp,\n                    },\n                };\n                trace.rows.push(row);\n\n                match debug_info.air_id_by_pc.entry(pc) {\n                    Entry::Vacant(entry) => {\n                        entry.insert(*air_id);\n                    }\n                    Entry::Occupied(existing) => {\n                        assert_eq!(*existing.get(), *air_id);\n                    }\n                }\n            }\n        }\n\n        if (seg_idx + 1) % max_segments == 0 {\n            tracing::info!(\n                \"    Reached segment {} of input {}, processing trace so far...\",\n                seg_idx + 1,\n                input_index + 1\n            );\n            let (trace_to_process, remaining_trace) =\n                take_complete_blocks(constraint_detector, trace.take());\n            trace = remaining_trace;\n            constraint_detector.process_trace(trace_to_process, debug_info.take());\n        }\n    })\n    .unwrap();\n    tracing::info!(\n        \"    Finished execution of input {}, processing (remaining) trace...\",\n        input_index + 1\n    );\n    constraint_detector.process_trace(trace, debug_info);\n}\n\n/// Takes as many complete basic blocks from the trace as possible,\n/// returning the taken trace and the remaining trace.\n/// This is needed because ConstraintDetector::process_trace requires complete basic blocks,\n/// but segmentation might happen within a basic block.\nfn take_complete_blocks(constraint_detector: &ConstraintDetector, trace: Trace) -> (Trace, Trace) {\n    // Find the latest timestamp that begins a basic block\n    let latest_basic_block_beginning = trace\n        .rows\n        .iter()\n        .filter(|row| constraint_detector.is_basic_block_start(row.pc as u64))\n        .map(|row| &row.timestamp)\n        .max()\n        .unwrap()\n        .clone();\n    // Process all rows before that timestamp\n    let (rows_to_process, remaining_rows): (Vec<Row>, Vec<Row>) = trace\n        .rows\n        .into_iter()\n        .partition(|row| row.timestamp < latest_basic_block_beginning);\n\n    let trace_to_process = Trace {\n        rows: rows_to_process,\n    };\n    let remaining_trace = Trace {\n        rows: remaining_rows,\n    };\n\n    (trace_to_process, remaining_trace)\n}\n\nstruct ConstraintDetector {\n    /// Mapping from a basic block ID (= PC of the first instruction) to number\n    /// of instructions in that block\n    block_instruction_counts: HashMap<u64, usize>,\n    empirical_constraints: EmpiricalConstraints,\n}\n\n/// An instance of a basic block in the trace\nstruct ConcreteBlock<'a> {\n    rows: Vec<&'a Row>,\n}\n\nimpl<'a> ConcreteBlock<'a> {\n    fn equivalence_classes(&self) -> Partition<BlockCell> {\n        self.rows\n            .iter()\n            .enumerate()\n            // Map each cell to a (value, (instruction_index, col_index)) pair\n            .flat_map(|(instruction_index, row)| {\n                row.cells\n                    .iter()\n                    .enumerate()\n                    .map(|(col_index, v)| (*v, BlockCell::new(instruction_index, col_index)))\n                    .collect::<Vec<_>>()\n            })\n            // Group by value\n            .into_group_map()\n            .into_values()\n            .collect()\n    }\n}\n\nimpl ConstraintDetector {\n    pub fn new(block_instruction_counts: HashMap<u64, usize>) -> Self {\n        Self {\n            block_instruction_counts,\n            empirical_constraints: EmpiricalConstraints::default(),\n        }\n    }\n\n    pub fn is_basic_block_start(&self, pc: u64) -> bool {\n        self.block_instruction_counts.contains_key(&pc)\n    }\n\n    pub fn finalize(self) -> EmpiricalConstraints {\n        self.empirical_constraints\n    }\n\n    pub fn process_trace(&mut self, trace: Trace, debug_info: DebugInfo) {\n        let pc_counts = trace\n            .rows_by_pc()\n            .into_iter()\n            .map(|(pc, rows)| (pc, rows.len() as u64))\n            .collect();\n        // Compute empirical constraints from the current trace\n        tracing::info!(\"      Detecting equivalence classes by block...\");\n        let equivalence_classes_by_block = self.generate_equivalence_classes_by_block(&trace);\n        tracing::info!(\"      Detecting column ranges by PC...\");\n        let column_ranges_by_pc = self.detect_column_ranges_by_pc(trace);\n        let new_empirical_constraints = EmpiricalConstraints {\n            column_ranges_by_pc,\n            equivalence_classes_by_block,\n            debug_info,\n            pc_counts,\n        };\n\n        // Combine the new empirical constraints and debug info with the existing ones\n        self.empirical_constraints\n            .combine_with(new_empirical_constraints);\n    }\n\n    fn detect_column_ranges_by_pc(&self, trace: Trace) -> BTreeMap<u32, Vec<(u32, u32)>> {\n        // Map all column values to their range (1st and 99th percentile) for each pc\n        trace\n            .rows_by_pc()\n            .into_par_iter()\n            .map(|(pc, rows)| (pc, self.detect_column_ranges(&rows)))\n            .collect()\n    }\n\n    fn detect_column_ranges(&self, rows: &[&Row]) -> Vec<(u32, u32)> {\n        for row in rows {\n            // All rows for a given PC should be in the same chip\n            assert_eq!(row.cells.len(), rows[0].cells.len());\n        }\n\n        (0..rows[0].cells.len())\n            .map(|col_index| {\n                let mut values = rows\n                    .iter()\n                    .map(|row| row.cells[col_index])\n                    .collect::<Vec<_>>();\n                values.sort_unstable();\n                let len = values.len();\n                let p1_index = len / 100; // 1st percentile\n                let p99_index = len * 99 / 100; // 99th percentile\n                (values[p1_index], values[p99_index])\n            })\n            .collect()\n    }\n\n    fn generate_equivalence_classes_by_block(\n        &self,\n        trace: &Trace,\n    ) -> BTreeMap<u64, Partition<BlockCell>> {\n        tracing::info!(\"        Segmenting trace into blocks...\");\n        let blocks = self.get_blocks(trace);\n        tracing::info!(\"        Finding equivalence classes...\");\n        let num_blocks = blocks.len();\n        let pb = ProgressBar::new(num_blocks as u64).with_style(\n            ProgressStyle::with_template(\"[{elapsed_precise}] [{bar:50}] {wide_msg}\").unwrap(),\n        );\n        let partition = blocks\n            .into_iter()\n            .enumerate()\n            .map(|(i, (block_id, block_instances))| {\n                pb.set_message(format!(\n                    \"Block {} / {} ({} instances)\",\n                    i + 1,\n                    num_blocks,\n                    block_instances.len()\n                ));\n\n                // Build partitions for each block instance in parallel\n                let partition_by_block_instance = block_instances\n                    .into_par_iter()\n                    .map(|block| block.equivalence_classes());\n\n                // Intersect the equivalence classes across all instances in parallel\n                let intersected = Partition::parallel_intersect(partition_by_block_instance);\n                pb.inc(1);\n\n                (block_id, intersected)\n            })\n            .collect();\n        pb.finish_with_message(\"Done\");\n        partition\n    }\n\n    /// Segments a trace into basic blocks.\n    /// Returns a mapping from block ID to all instances of that block in the trace.\n    fn get_blocks<'a>(&self, trace: &'a Trace) -> BTreeMap<u64, Vec<ConcreteBlock<'a>>> {\n        trace\n            .rows_sorted_by_time()\n            // take entire blocks from the rows\n            .batching(|it| {\n                let first = it.next()?;\n                let block_id = first.pc as u64;\n\n                if let Some(&count) = self.block_instruction_counts.get(&block_id) {\n                    let rows = once(first).chain(it.take(count - 1)).collect_vec();\n\n                    for (r1, r2) in rows.iter().tuple_windows() {\n                        assert_eq!(r2.pc, r1.pc + 4);\n                    }\n\n                    Some(Some((block_id, ConcreteBlock { rows })))\n                } else {\n                    // Single instruction block, yield `None` to be filtered.\n                    Some(None)\n                }\n            })\n            // filter out single instruction blocks\n            .flatten()\n            // collect by start_pc\n            .fold(Default::default(), |mut block_rows, (block_id, chunk)| {\n                block_rows.entry(block_id).or_insert(Vec::new()).push(chunk);\n                block_rows\n            })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use powdr_autoprecompiles::equivalence_classes::EquivalenceClass;\n\n    use super::*;\n\n    fn make_trace(rows_by_time_with_pc: Vec<(u32, Vec<u32>)>) -> Trace {\n        Trace {\n            rows: rows_by_time_with_pc\n                .into_iter()\n                .enumerate()\n                .map(|(clk, (pc, cells))| Row {\n                    cells,\n                    pc,\n                    timestamp: Timestamp {\n                        segment_idx: 0,\n                        value: clk as u32,\n                    },\n                })\n                .collect(),\n        }\n    }\n\n    #[test]\n    fn test_constraint_detector() {\n        // Assume the following test program:\n        // ADDI x1, x1, 1    // note how the second operand is always 1\n        // BLT x1, x2, -4    // Note how the first operand is always equal to the result of the previous ADDI\n\n        let instruction_counts = vec![(0, 2)].into_iter().collect();\n        let mut detector = ConstraintDetector::new(instruction_counts);\n\n        let trace1 = make_trace(vec![\n            (0, vec![1, 0, 1]),  // ADDI: 0 + 1 = 1\n            (4, vec![0, 1, 2]),  // BLT: 1 < 2 => PC = 0\n            (0, vec![2, 1, 1]),  // ADDI: 1 + 1 = 2\n            (4, vec![12, 2, 2]), // BLT: 2 >= 2 => PC = 8\n        ]);\n        detector.process_trace(trace1, DebugInfo::default());\n\n        let empirical_constraints = detector.finalize();\n\n        assert_eq!(\n            empirical_constraints.column_ranges_by_pc.get(&0),\n            // For the ADDI instruction, the second operand (col 2) is always 1; the other columns vary\n            Some(&vec![(1, 2), (0, 1), (1, 1)])\n        );\n        assert_eq!(\n            empirical_constraints.column_ranges_by_pc.get(&4),\n            // For the BLT instruction, second operand (col 2) is always 2; the other columns vary\n            Some(&vec![(0, 12), (1, 2), (2, 2)])\n        );\n\n        let equivalence_classes = empirical_constraints\n            .equivalence_classes_by_block\n            .get(&0)\n            .unwrap();\n        println!(\"Equivalence classes: {:?}\", equivalence_classes);\n        let expected: Partition<_> = once(\n            // The result of the first instruction (col 0) is always equal to the\n            // first operand of the second instruction (col 1)\n            [BlockCell::new(0, 0), BlockCell::new(1, 1)]\n                .into_iter()\n                .collect::<EquivalenceClass<_>>(),\n        )\n        .collect();\n        assert_eq!(*equivalence_classes, expected,);\n    }\n}\n"
  },
  {
    "path": "openvm/src/extraction_utils.rs",
    "content": "use std::collections::{BTreeMap, HashMap};\nuse std::marker::PhantomData;\nuse std::sync::{Arc, Mutex};\n\nuse itertools::Itertools;\nuse openvm_circuit::arch::{\n    AirInventory, AirInventoryError, ExecutorInventory, ExecutorInventoryError, SystemConfig,\n    VmCircuitConfig, VmExecutionConfig,\n};\nuse openvm_circuit::system::memory::interface::MemoryInterfaceAirs;\nuse openvm_circuit_primitives::bitwise_op_lookup::SharedBitwiseOperationLookupChip;\nuse openvm_circuit_primitives::range_tuple::SharedRangeTupleCheckerChip;\nuse openvm_instructions::VmOpcode;\nuse openvm_stark_backend::air_builders::symbolic::SymbolicRapBuilder;\nuse openvm_stark_backend::interaction::fri_log_up::find_interaction_chunks;\nuse openvm_stark_backend::{\n    air_builders::symbolic::SymbolicConstraints, config::StarkGenericConfig, rap::AnyRap,\n};\nuse openvm_stark_sdk::config::{\n    baby_bear_poseidon2::{config_from_perm, default_perm},\n    fri_params::SecurityParameters,\n};\nuse openvm_stark_sdk::p3_baby_bear::{self, BabyBear};\nuse powdr_autoprecompiles::bus_map::BusType;\nuse powdr_autoprecompiles::evaluation::AirStats;\nuse powdr_autoprecompiles::expression::try_convert;\nuse powdr_autoprecompiles::symbolic_machine::SymbolicMachine;\nuse powdr_autoprecompiles::{Apc, DegreeBound, InstructionHandler};\nuse powdr_openvm_bus_interaction_handler::bus_map::{BusMap, OpenVmBusType};\nuse serde::{Deserialize, Serialize};\nuse std::iter::Sum;\nuse std::ops::Deref;\nuse std::ops::{Add, Sub};\nuse std::sync::MutexGuard;\n\nuse crate::customize_exe::Instr;\nuse crate::isa::{OpenVmISA, OriginalCpuChipComplex};\nuse crate::powdr_extension::executor::RecordArenaDimension;\nuse crate::utils::openvm_bus_interaction_to_powdr;\nuse crate::utils::symbolic_to_algebraic;\nuse crate::utils::UnsupportedOpenVmReferenceError;\nuse crate::AirMetrics;\nuse crate::{air_builder::AirKeygenBuilder, BabyBearSC};\n\n// TODO: Use `<PackedChallenge<BabyBearSC> as FieldExtensionAlgebra<Val<BabyBearSC>>>::D` instead after fixing p3 dependency\nconst EXT_DEGREE: usize = 4;\n\n#[derive(Clone, Serialize, Deserialize)]\npub struct OriginalAirs<F, ISA> {\n    /// The degree bound used when building the airs\n    pub(crate) degree_bound: DegreeBound,\n    /// Maps a VM opcode to the name of the (unique) AIR that implements it.\n    pub(crate) opcode_to_air: HashMap<VmOpcode, String>,\n    /// Maps an AIR name to its symbolic machine and metrics.\n    /// Note that this map only contains AIRs that implement instructions.\n    pub(crate) air_name_to_machine: BTreeMap<String, (SymbolicMachine<F>, AirMetrics)>,\n    _marker: PhantomData<ISA>,\n}\n\nimpl<F, ISA> InstructionHandler for OriginalAirs<F, ISA> {\n    type Field = F;\n    type Instruction = Instr<F, ISA>;\n    type AirId = String;\n\n    fn get_instruction_air_and_id(\n        &self,\n        instruction: &Self::Instruction,\n    ) -> (Self::AirId, &SymbolicMachine<Self::Field>) {\n        let id = self\n            .opcode_to_air\n            .get(&instruction.inner.opcode)\n            .unwrap()\n            .clone();\n        let air = &self.air_name_to_machine.get(&id).unwrap().0;\n        (id, air)\n    }\n\n    fn get_instruction_air_stats(&self, instruction: &Self::Instruction) -> AirStats {\n        self.get_instruction_metrics(instruction.inner.opcode)\n            .map(|metrics| metrics.clone().into())\n            .unwrap()\n    }\n\n    fn degree_bound(&self) -> DegreeBound {\n        self.degree_bound\n    }\n}\n\nimpl<F, ISA> OriginalAirs<F, ISA> {\n    pub fn insert_opcode(\n        &mut self,\n        opcode: VmOpcode,\n        air_name: String,\n        machine: impl Fn(\n            DegreeBound,\n        )\n            -> Result<(SymbolicMachine<F>, AirMetrics), UnsupportedOpenVmReferenceError>,\n    ) -> Result<(), UnsupportedOpenVmReferenceError> {\n        if self.opcode_to_air.contains_key(&opcode) {\n            panic!(\"Opcode {opcode} already exists\");\n        }\n        if !self.air_name_to_machine.contains_key(&air_name) {\n            let machine_instance = machine(self.degree_bound)?;\n            self.air_name_to_machine\n                .insert(air_name.clone(), machine_instance);\n        }\n\n        self.opcode_to_air.insert(opcode, air_name);\n        Ok(())\n    }\n\n    pub fn get_instruction_metrics(&self, opcode: VmOpcode) -> Option<&AirMetrics> {\n        self.opcode_to_air.get(&opcode).and_then(|air_name| {\n            self.air_name_to_machine\n                .get(air_name)\n                .map(|(_, metrics)| metrics)\n        })\n    }\n\n    pub fn allow_list(&self) -> Vec<VmOpcode> {\n        self.opcode_to_air.keys().cloned().collect()\n    }\n\n    pub fn airs_by_name(&self) -> impl Iterator<Item = (&String, &SymbolicMachine<F>)> {\n        self.air_name_to_machine\n            .iter()\n            .map(|(name, (machine, _))| (name, machine))\n    }\n\n    fn with_degree_bound(degree_bound: DegreeBound) -> Self {\n        Self {\n            degree_bound,\n            opcode_to_air: Default::default(),\n            air_name_to_machine: Default::default(),\n            _marker: PhantomData,\n        }\n    }\n\n    pub fn get_air_machine(&self, air_name: &str) -> Option<&SymbolicMachine<F>> {\n        self.air_name_to_machine\n            .get(air_name)\n            .map(|(machine, _)| machine)\n    }\n}\n\npub fn record_arena_dimension_by_air_name_per_apc_call<F, ISA: OpenVmISA>(\n    apc: &Apc<F, Instr<F, ISA>, (), u32>,\n    air_by_opcode_id: &OriginalAirs<F, ISA>,\n) -> BTreeMap<String, RecordArenaDimension> {\n    apc.instructions()\n        .map(|instr| &instr.inner.opcode)\n        .zip_eq(apc.subs.iter().map(|sub| sub.is_empty()))\n        .fold(\n            BTreeMap::new(),\n            |mut acc, (opcode, should_use_dummy_arena)| {\n                let air_name = air_by_opcode_id.opcode_to_air.get(opcode).unwrap();\n\n                let entry = acc.entry(air_name.clone()).or_insert_with(|| {\n                    let (_, air_metrics) =\n                        air_by_opcode_id.air_name_to_machine.get(air_name).unwrap();\n\n                    RecordArenaDimension {\n                        real_height: 0,\n                        width: air_metrics.widths.main,\n                        dummy_height: 0,\n                    }\n                });\n                if should_use_dummy_arena {\n                    entry.dummy_height += 1;\n                } else {\n                    entry.real_height += 1;\n                }\n                acc\n            },\n        )\n}\n\ntype ChipComplex = OriginalCpuChipComplex;\n\ntype LazyChipComplex = Option<ChipComplex>;\ntype CachedChipComplex = Arc<Mutex<LazyChipComplex>>;\n\npub struct ChipComplexGuard<'a> {\n    guard: MutexGuard<'a, LazyChipComplex>,\n}\n\nimpl<'a> Deref for ChipComplexGuard<'a> {\n    type Target = ChipComplex;\n\n    fn deref(&self) -> &Self::Target {\n        self.guard\n            .as_ref()\n            .expect(\"Chip complex should be initialized\")\n    }\n}\n\n#[derive(Serialize, Deserialize, Clone)]\npub struct OriginalVmConfig<ISA: OpenVmISA> {\n    pub config: ISA::Config,\n    #[serde(skip)]\n    pub chip_complex: CachedChipComplex,\n}\n\nimpl<ISA: OpenVmISA> VmCircuitConfig<BabyBearSC> for OriginalVmConfig<ISA> {\n    fn create_airs(&self) -> Result<AirInventory<BabyBearSC>, AirInventoryError> {\n        self.config.create_airs()\n    }\n}\n\nimpl<ISA: OpenVmISA> VmExecutionConfig<BabyBear> for OriginalVmConfig<ISA> {\n    type Executor = <ISA::Config as VmExecutionConfig<BabyBear>>::Executor;\n\n    fn create_executors(\n        &self,\n    ) -> Result<ExecutorInventory<Self::Executor>, ExecutorInventoryError> {\n        self.config.create_executors()\n    }\n}\n\nimpl<ISA: OpenVmISA> AsRef<SystemConfig> for OriginalVmConfig<ISA> {\n    fn as_ref(&self) -> &SystemConfig {\n        self.config.as_ref()\n    }\n}\n\nimpl<ISA: OpenVmISA> AsMut<SystemConfig> for OriginalVmConfig<ISA> {\n    fn as_mut(&mut self) -> &mut SystemConfig {\n        self.config.as_mut()\n    }\n}\n\nimpl<ISA: OpenVmISA> OriginalVmConfig<ISA> {\n    pub fn new(config: ISA::Config) -> Self {\n        Self {\n            config,\n            chip_complex: Default::default(),\n        }\n    }\n\n    pub fn config(&self) -> &ISA::Config {\n        &self.config\n    }\n\n    pub fn config_mut(&mut self) -> &mut ISA::Config {\n        let mut guard = self.chip_complex.lock().expect(\"Mutex poisoned\");\n        *guard = None;\n        &mut self.config\n    }\n\n    pub fn chip_complex(&self) -> ChipComplexGuard<'_> {\n        let mut guard = self.chip_complex.lock().expect(\"Mutex poisoned\");\n\n        if guard.is_none() {\n            let airs = self\n                .config\n                .create_airs()\n                .expect(\"Failed to create air inventory\");\n            let complex = ISA::create_original_chip_complex(&self.config, airs)\n                .expect(\"Failed to create chip complex\");\n            *guard = Some(complex);\n        }\n\n        ChipComplexGuard { guard }\n    }\n\n    pub fn airs(\n        &self,\n        degree_bound: DegreeBound,\n    ) -> Result<OriginalAirs<BabyBear, ISA>, UnsupportedOpenVmReferenceError> {\n        let chip_complex = &self.chip_complex();\n        let chip_inventory = &chip_complex.inventory;\n\n        let executor_inventory = self.create_executors().unwrap();\n        let instruction_allowlist = ISA::allowed_opcodes();\n\n        instruction_allowlist\n            .into_iter()\n            .filter_map(|op| {\n                executor_inventory\n                    .instruction_lookup\n                    .get(&op)\n                    .map(|id| (op, *id as usize))\n            })\n            .map(|(op, executor_id)| {\n                let insertion_index = chip_inventory.executor_idx_to_insertion_idx[executor_id];\n                let air_ref = &chip_inventory.airs().ext_airs()[insertion_index];\n                (op, air_ref)\n            })\n            .try_fold(\n                OriginalAirs::with_degree_bound(degree_bound),\n                |mut airs, (op, air_ref)| {\n                    airs.insert_opcode(op, air_ref.name(), |degree_bound| {\n                        let columns = get_columns(air_ref.clone());\n                        let constraints = get_constraints(air_ref.clone());\n                        let metrics = get_air_metrics(air_ref.clone(), degree_bound.identities);\n\n                        let powdr_exprs = constraints\n                            .constraints\n                            .iter()\n                            .map(|expr| try_convert(symbolic_to_algebraic(expr, &columns)))\n                            .collect::<Result<Vec<_>, _>>()?;\n\n                        let powdr_bus_interactions = constraints\n                            .interactions\n                            .iter()\n                            .map(|expr| openvm_bus_interaction_to_powdr(expr, &columns))\n                            .collect::<Result<_, _>>()?;\n\n                        Ok((\n                            SymbolicMachine {\n                                constraints: powdr_exprs.into_iter().map(Into::into).collect(),\n                                bus_interactions: powdr_bus_interactions,\n                                derived_columns: vec![],\n                            },\n                            metrics,\n                        ))\n                    })?;\n\n                    Ok(airs)\n                },\n            )\n    }\n\n    pub fn bus_map(&self) -> BusMap {\n        let chip_complex = self.chip_complex();\n        let inventory = &chip_complex.inventory;\n\n        let shared_bitwise_lookup = inventory\n            .find_chip::<SharedBitwiseOperationLookupChip<8>>()\n            .next();\n        let shared_range_tuple_checker = inventory\n            .find_chip::<SharedRangeTupleCheckerChip<2>>()\n            .next();\n\n        let system_air_inventory = inventory.airs().system();\n        let connector_air = system_air_inventory.connector;\n        let memory_air = &system_air_inventory.memory;\n\n        BusMap::from_id_type_pairs(\n            {\n                [\n                    (\n                        connector_air.execution_bus.index(),\n                        BusType::ExecutionBridge,\n                    ),\n                    (\n                        // TODO: make getting memory bus index a helper function\n                        match &memory_air.interface {\n                            MemoryInterfaceAirs::Volatile { boundary } => {\n                                boundary.memory_bus.inner.index\n                            }\n                            MemoryInterfaceAirs::Persistent { boundary, .. } => {\n                                boundary.memory_bus.inner.index\n                            }\n                        },\n                        BusType::Memory,\n                    ),\n                    (connector_air.program_bus.index(), BusType::PcLookup),\n                    (\n                        connector_air.range_bus.index(),\n                        BusType::Other(OpenVmBusType::VariableRangeChecker),\n                    ),\n                ]\n                .into_iter()\n            }\n            .chain(shared_bitwise_lookup.into_iter().map(|chip| {\n                (\n                    chip.bus().inner.index,\n                    BusType::Other(OpenVmBusType::BitwiseLookup),\n                )\n            }))\n            .chain(shared_range_tuple_checker.into_iter().map(|chip| {\n                (\n                    chip.bus().inner.index,\n                    BusType::Other(OpenVmBusType::TupleRangeChecker(chip.bus().sizes)),\n                )\n            }))\n            .map(|(id, bus_type)| (id as u64, bus_type)),\n        )\n    }\n\n    pub fn chip_inventory_air_metrics(&self, max_degree: usize) -> HashMap<String, AirMetrics> {\n        let inventory = &self.chip_complex().inventory;\n\n        inventory\n            .airs()\n            .ext_airs()\n            .iter()\n            .map(|air| {\n                let name = air.name();\n                let metrics = get_air_metrics(air.clone(), max_degree);\n                (name, metrics)\n            })\n            .collect()\n    }\n}\n\npub fn get_columns(air: Arc<dyn AnyRap<BabyBearSC>>) -> Vec<Arc<String>> {\n    let width = air.width();\n    air.columns()\n        .inspect(|columns| {\n            assert_eq!(columns.len(), width);\n        })\n        .unwrap_or_else(|| (0..width).map(|i| format!(\"unknown_{i}\")).collect())\n        .into_iter()\n        .map(Arc::new)\n        .collect()\n}\n\npub fn get_name<SC: StarkGenericConfig>(air: Arc<dyn AnyRap<SC>>) -> String {\n    air.name()\n}\n\npub fn get_constraints(\n    air: Arc<dyn AnyRap<BabyBearSC>>,\n) -> SymbolicConstraints<p3_baby_bear::BabyBear> {\n    let builder = symbolic_builder_with_degree(air, None);\n    builder.constraints()\n}\n\npub fn get_air_metrics(air: Arc<dyn AnyRap<BabyBearSC>>, max_degree: usize) -> AirMetrics {\n    let main = air.width();\n\n    let symbolic_rap_builder = symbolic_builder_with_degree(air, Some(max_degree));\n    let preprocessed = symbolic_rap_builder.width().preprocessed.unwrap_or(0);\n\n    let SymbolicConstraints {\n        constraints,\n        interactions,\n    } = symbolic_rap_builder.constraints();\n\n    let log_up = (find_interaction_chunks(&interactions, max_degree)\n        .interaction_partitions()\n        .len()\n        + 1)\n        * EXT_DEGREE;\n\n    AirMetrics {\n        widths: AirWidths {\n            preprocessed,\n            main,\n            log_up,\n        },\n        constraints: constraints.len(),\n        bus_interactions: interactions.len(),\n    }\n}\n\npub fn symbolic_builder_with_degree(\n    air: Arc<dyn AnyRap<BabyBearSC>>,\n    max_constraint_degree: Option<usize>,\n) -> SymbolicRapBuilder<p3_baby_bear::BabyBear> {\n    let perm = default_perm();\n    let security_params = SecurityParameters::standard_fast();\n    let config = config_from_perm(&perm, security_params);\n    let air_keygen_builder = AirKeygenBuilder::new(config.pcs(), air);\n    air_keygen_builder.get_symbolic_builder(max_constraint_degree)\n}\n\n#[derive(Clone, Copy, Serialize, Deserialize, Default, PartialEq, Eq, Debug)]\npub struct AirWidths {\n    pub preprocessed: usize,\n    pub main: usize,\n    pub log_up: usize,\n}\n\nimpl Add for AirWidths {\n    type Output = AirWidths;\n    fn add(self, rhs: AirWidths) -> AirWidths {\n        AirWidths {\n            preprocessed: self.preprocessed + rhs.preprocessed,\n            main: self.main + rhs.main,\n            log_up: self.log_up + rhs.log_up,\n        }\n    }\n}\n\nimpl Sub for AirWidths {\n    type Output = AirWidths;\n    fn sub(self, rhs: AirWidths) -> AirWidths {\n        AirWidths {\n            preprocessed: self.preprocessed - rhs.preprocessed,\n            main: self.main - rhs.main,\n            log_up: self.log_up - rhs.log_up,\n        }\n    }\n}\n\nimpl Sum<AirWidths> for AirWidths {\n    fn sum<I: Iterator<Item = AirWidths>>(iter: I) -> AirWidths {\n        iter.fold(AirWidths::default(), Add::add)\n    }\n}\n\nimpl AirWidths {\n    pub fn total(&self) -> usize {\n        self.preprocessed + self.main + self.log_up\n    }\n}\n\nimpl std::fmt::Display for AirWidths {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"Total Width: {} (Preprocessed: {} Main: {}, Log Up: {})\",\n            self.preprocessed + self.main + self.log_up,\n            self.preprocessed,\n            self.main,\n            self.log_up\n        )\n    }\n}\n\n#[derive(Clone, Copy, Serialize, Deserialize, Default, PartialEq, Eq, Debug)]\npub struct AirWidthsDiff {\n    pub before: AirWidths,\n    pub after: AirWidths,\n}\n\nimpl AirWidthsDiff {\n    pub fn new(before: AirWidths, after: AirWidths) -> Self {\n        Self { before, after }\n    }\n\n    pub fn columns_saved(&self) -> AirWidths {\n        self.before - self.after\n    }\n}\n\nimpl Add for AirWidthsDiff {\n    type Output = AirWidthsDiff;\n\n    fn add(self, rhs: AirWidthsDiff) -> AirWidthsDiff {\n        AirWidthsDiff {\n            before: self.before + rhs.before,\n            after: self.after + rhs.after,\n        }\n    }\n}\n\nimpl Sum<AirWidthsDiff> for AirWidthsDiff {\n    fn sum<I: Iterator<Item = AirWidthsDiff>>(iter: I) -> AirWidthsDiff {\n        let zero = AirWidthsDiff::new(AirWidths::default(), AirWidths::default());\n        iter.fold(zero, Add::add)\n    }\n}\n"
  },
  {
    "path": "openvm/src/isa.rs",
    "content": "use std::collections::{BTreeSet, HashSet};\nuse std::sync::Arc;\n\nuse openvm_circuit::arch::{\n    AirInventory, AirInventoryError, AnyEnum, ChipInventory, ChipInventoryError, DenseRecordArena,\n    Executor, InterpreterExecutor, MatrixRecordArena, MeteredExecutor, PreflightExecutor,\n    VmBuilder, VmChipComplex, VmCircuitExtension, VmConfig, VmExecutionConfig,\n};\n#[cfg(feature = \"cuda\")]\nuse openvm_circuit::system::cuda::SystemChipInventoryGPU;\nuse openvm_circuit::system::SystemChipInventory;\n#[cfg(feature = \"cuda\")]\nuse openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine;\n#[cfg(feature = \"cuda\")]\nuse openvm_cuda_backend::prover_backend::GpuBackend;\nuse openvm_instructions::{instruction::Instruction, VmOpcode};\nuse openvm_sdk::config::TranspilerConfig;\nuse openvm_stark_backend::{config::Val, p3_field::PrimeField32, prover::cpu::CpuBackend};\nuse openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_riscv_elf::debug_info::SymbolTable;\n\nuse crate::powdr_extension::trace_generator::cpu::SharedPeripheryChipsCpu;\n#[cfg(feature = \"cuda\")]\nuse crate::powdr_extension::trace_generator::SharedPeripheryChipsGpu;\nuse crate::program::OriginalCompiledProgram;\nuse crate::{BabyBearSC, Instr, SpecializedExecutor};\n\npub type OriginalCpuChipComplex = VmChipComplex<\n    BabyBearSC,\n    MatrixRecordArena<Val<BabyBearSC>>,\n    CpuBackend<BabyBearSC>,\n    SystemChipInventory<BabyBearSC>,\n>;\npub type OriginalCpuChipInventory =\n    ChipInventory<BabyBearSC, MatrixRecordArena<Val<BabyBearSC>>, CpuBackend<BabyBearSC>>;\n\n#[cfg(feature = \"cuda\")]\npub type OriginalGpuChipComplex =\n    VmChipComplex<BabyBearSC, DenseRecordArena, GpuBackend, SystemChipInventoryGPU>;\n#[cfg(feature = \"cuda\")]\npub type OriginalGpuChipInventory = ChipInventory<BabyBearSC, DenseRecordArena, GpuBackend>;\n\npub type IsaApc<F, ISA> = Arc<powdr_autoprecompiles::Apc<F, Instr<F, ISA>, (), u32>>;\n\npub trait OpenVmISA: Send + Sync + Clone + 'static + Default {\n    /// The original linked program, for example, an elf for riscv. It must allow recovering the jump destinations.\n    type LinkedProgram<'a>;\n\n    type Executor<F: PrimeField32>: AnyEnum\n        + InterpreterExecutor<F>\n        + Executor<F>\n        + MeteredExecutor<F>\n        + PreflightExecutor<F, MatrixRecordArena<F>>\n        + PreflightExecutor<F, DenseRecordArena>\n        + Send\n        + Sync\n        + Into<SpecializedExecutor<F, Self>>;\n\n    type Config: VmConfig<BabyBearSC>\n        + VmExecutionConfig<BabyBear, Executor = Self::Executor<BabyBear>>\n        + TranspilerConfig<BabyBear>;\n\n    type CpuBuilder: Clone\n        + Default\n        + VmBuilder<\n            BabyBearPoseidon2Engine,\n            VmConfig = Self::Config,\n            SystemChipInventory = SystemChipInventory<BabyBearSC>,\n            RecordArena = MatrixRecordArena<Val<BabyBearSC>>,\n        >;\n\n    #[cfg(feature = \"cuda\")]\n    type GpuBuilder: Clone\n        + Default\n        + VmBuilder<\n            GpuBabyBearPoseidon2Engine,\n            VmConfig = Self::Config,\n            SystemChipInventory = SystemChipInventoryGPU,\n            RecordArena = DenseRecordArena,\n        >;\n\n    fn create_dummy_airs<E: VmCircuitExtension<BabyBearSC>>(\n        config: &Self::Config,\n        shared_chips: E,\n    ) -> Result<AirInventory<BabyBearSC>, AirInventoryError>;\n\n    fn create_original_chip_complex(\n        config: &Self::Config,\n        airs: AirInventory<BabyBearSC>,\n    ) -> Result<OriginalCpuChipComplex, ChipInventoryError>;\n\n    fn create_dummy_chip_complex_cpu(\n        config: &Self::Config,\n        circuit: AirInventory<BabyBearSC>,\n        shared_chips: SharedPeripheryChipsCpu<Self>,\n    ) -> Result<OriginalCpuChipComplex, ChipInventoryError>;\n\n    #[cfg(feature = \"cuda\")]\n    fn create_dummy_chip_complex_gpu(\n        config: &Self::Config,\n        circuit: AirInventory<BabyBearSC>,\n        shared_chips: SharedPeripheryChipsGpu<Self>,\n    ) -> Result<OriginalGpuChipComplex, ChipInventoryError>;\n\n    /// The set of branching opcodes\n    fn branching_opcodes() -> HashSet<VmOpcode>;\n\n    /// The set of opcodes which are allowed to be put into autoprecompiles\n    fn allowed_opcodes() -> HashSet<VmOpcode>;\n\n    /// Format an instruction of this ISA\n    fn format<F: PrimeField32>(instruction: &Instruction<F>) -> String;\n\n    fn get_symbol_table<'a>(program: &Self::LinkedProgram<'a>) -> SymbolTable;\n\n    /// Given an original program, return the pcs which correspond to jump destinations\n    fn get_jump_destinations(original_program: &OriginalCompiledProgram<Self>) -> BTreeSet<u64>;\n}\n"
  },
  {
    "path": "openvm/src/lib.rs",
    "content": "#![cfg_attr(feature = \"tco\", allow(internal_features))]\n#![cfg_attr(feature = \"tco\", allow(incomplete_features))]\n#![cfg_attr(feature = \"tco\", feature(explicit_tail_calls))]\n#![cfg_attr(feature = \"tco\", feature(core_intrinsics))]\n\nuse openvm_circuit::arch::{\n    AirInventory, AirInventoryError, ChipInventory, ChipInventoryError, ExecutorInventory,\n    ExecutorInventoryError, InitFileGenerator, MatrixRecordArena, RowMajorMatrixArena,\n    SystemConfig, VmBuilder, VmChipComplex, VmCircuitConfig, VmCircuitExtension, VmExecutionConfig,\n    VmProverExtension,\n};\nuse openvm_circuit::system::SystemChipInventory;\nuse openvm_circuit::{circuit_derive::Chip, derive::AnyEnum};\nuse openvm_circuit_derive::{\n    AotExecutor, AotMeteredExecutor, Executor, MeteredExecutor, PreflightExecutor,\n};\n\nuse openvm_sdk::config::TranspilerConfig;\nuse openvm_sdk::GenericSdk;\nuse openvm_sdk::{\n    config::{AppConfig, DEFAULT_APP_LOG_BLOWUP},\n    StdIn,\n};\nuse openvm_stark_backend::config::{StarkGenericConfig, Val};\nuse openvm_stark_backend::engine::StarkEngine;\nuse openvm_stark_backend::prover::cpu::{CpuBackend, CpuDevice};\nuse openvm_stark_backend::prover::hal::ProverBackend;\nuse openvm_stark_sdk::config::{\n    baby_bear_poseidon2::{BabyBearPoseidon2Config, BabyBearPoseidon2Engine},\n    FriParameters,\n};\nuse openvm_stark_sdk::openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse openvm_transpiler::transpiler::Transpiler;\nuse powdr_autoprecompiles::evaluation::AirStats;\nuse powdr_autoprecompiles::execution_profile::ExecutionProfile;\nuse powdr_autoprecompiles::DegreeBound;\nuse powdr_autoprecompiles::{execution_profile::execution_profile, PowdrConfig};\nuse powdr_extension::PowdrExtension;\nuse serde::{Deserialize, Serialize};\nuse std::iter::Sum;\nuse std::marker::PhantomData;\nuse std::ops::Add;\nuse std::path::Path;\n\nuse crate::isa::OpenVmISA;\nuse crate::powdr_extension::chip::PowdrAir;\npub use crate::program::Prog;\npub use crate::program::{CompiledProgram, OriginalCompiledProgram};\n\nuse crate::extraction_utils::AirWidthsDiff;\nuse crate::extraction_utils::{get_air_metrics, AirWidths, OriginalVmConfig};\nuse crate::powdr_extension::{PowdrExtensionExecutor, PowdrPrecompile};\n\nmod air_builder;\npub mod cuda_abi;\npub mod empirical_constraints;\npub mod extraction_utils;\npub mod program;\npub mod trace_generation;\npub mod utils;\npub use powdr_openvm_bus_interaction_handler::bus_map;\n\n#[cfg(feature = \"test-utils\")]\npub mod test_utils;\n\npub use crate::empirical_constraints::detect_empirical_constraints;\n\npub type BabyBearSC = BabyBearPoseidon2Config;\n\ncfg_if::cfg_if! {\n    if #[cfg(feature = \"cuda\")] {\n        pub use openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine;\n        pub use openvm_native_circuit::NativeGpuBuilder;\n        pub type PowdrSdkGpu<ISA> = GenericSdk<GpuBabyBearPoseidon2Engine, SpecializedConfigGpuBuilder<ISA>, NativeGpuBuilder>;\n        pub type PowdrExecutionProfileSdkGpu<ISA> = GenericSdk<GpuBabyBearPoseidon2Engine, <ISA as OpenVmISA>::GpuBuilder, NativeGpuBuilder>;\n\n        pub use openvm_circuit::system::cuda::{extensions::SystemGpuBuilder, SystemChipInventoryGPU};\n        pub use openvm_sdk::config::SdkVmGpuBuilder;\n        pub use openvm_cuda_backend::prover_backend::GpuBackend;\n        pub use openvm_circuit_primitives::bitwise_op_lookup::BitwiseOperationLookupChipGPU;\n        pub use openvm_circuit_primitives::range_tuple::RangeTupleCheckerChipGPU;\n        pub use openvm_circuit_primitives::var_range::VariableRangeCheckerChipGPU;\n        pub use openvm_cuda_backend::base::DeviceMatrix;\n        pub use openvm_circuit::arch::DenseRecordArena;\n    }\n}\n\nuse openvm_circuit_primitives::bitwise_op_lookup::{\n    BitwiseOperationLookupAir, SharedBitwiseOperationLookupChip,\n};\nuse openvm_circuit_primitives::range_tuple::{RangeTupleCheckerAir, SharedRangeTupleCheckerChip};\nuse openvm_circuit_primitives::var_range::{\n    SharedVariableRangeCheckerChip, VariableRangeCheckerAir,\n};\nuse openvm_native_circuit::NativeCpuBuilder;\npub type PowdrSdkCpu<ISA> =\n    GenericSdk<BabyBearPoseidon2Engine, SpecializedConfigCpuBuilder<ISA>, NativeCpuBuilder>;\npub type PowdrExecutionProfileSdkCpu<ISA> =\n    GenericSdk<BabyBearPoseidon2Engine, <ISA as OpenVmISA>::CpuBuilder, NativeCpuBuilder>;\n\npub const DEFAULT_OPENVM_DEGREE_BOUND: usize = 2 * DEFAULT_APP_LOG_BLOWUP + 1;\npub const DEFAULT_DEGREE_BOUND: DegreeBound = DegreeBound {\n    identities: DEFAULT_OPENVM_DEGREE_BOUND,\n    bus_interactions: DEFAULT_OPENVM_DEGREE_BOUND - 1,\n};\n\npub fn default_powdr_openvm_config(apc: u64, skip: u64) -> PowdrConfig {\n    PowdrConfig::new(apc, skip, DEFAULT_DEGREE_BOUND)\n}\n\npub fn format_fe<F: PrimeField32>(v: F) -> String {\n    let v = v.as_canonical_u32();\n    if v < F::ORDER_U32 / 2 {\n        format!(\"{v}\")\n    } else {\n        format!(\"-{}\", F::ORDER_U32 - v)\n    }\n}\n\n/// We do not use the transpiler, instead we customize an already transpiled program\npub mod customize_exe;\n\npub use customize_exe::{customize, BabyBearOpenVmApcAdapter, Instr, POWDR_OPCODE};\n\n// A module for our extension\npub mod isa;\npub mod powdr_extension;\n\n/// A custom VmConfig that wraps the SdkVmConfig, adding our custom extension.\n#[derive(Serialize, Deserialize, Clone)]\n#[serde(bound = \"\")]\npub struct SpecializedConfig<ISA: OpenVmISA> {\n    pub original: OriginalVmConfig<ISA>,\n    pub powdr: PowdrExtension<BabyBear, ISA>,\n}\n\n#[cfg(feature = \"cuda\")]\n#[derive(Default, Clone)]\npub struct SpecializedConfigGpuBuilder<ISA> {\n    _marker: PhantomData<ISA>,\n}\n\n#[cfg(feature = \"cuda\")]\nimpl<ISA: OpenVmISA> VmBuilder<GpuBabyBearPoseidon2Engine> for SpecializedConfigGpuBuilder<ISA> {\n    type VmConfig = SpecializedConfig<ISA>;\n    type SystemChipInventory = SystemChipInventoryGPU;\n    type RecordArena = DenseRecordArena;\n\n    fn create_chip_complex(\n        &self,\n        config: &SpecializedConfig<ISA>,\n        circuit: AirInventory<BabyBearSC>,\n    ) -> Result<\n        VmChipComplex<BabyBearSC, Self::RecordArena, GpuBackend, Self::SystemChipInventory>,\n        ChipInventoryError,\n    > {\n        let mut chip_complex = VmBuilder::<GpuBabyBearPoseidon2Engine>::create_chip_complex(\n            &<ISA as OpenVmISA>::GpuBuilder::default(),\n            &config.original.config,\n            circuit,\n        )?;\n        let inventory = &mut chip_complex.inventory;\n        VmProverExtension::<GpuBabyBearPoseidon2Engine, _, _>::extend_prover(\n            &PowdrGpuProverExt::<ISA>::default(),\n            &config.powdr,\n            inventory,\n        )?;\n        Ok(chip_complex)\n    }\n}\n\n#[derive(Default, Clone)]\npub struct SpecializedConfigCpuBuilder<ISA> {\n    _marker: PhantomData<ISA>,\n}\n\nimpl<E, ISA: OpenVmISA> VmBuilder<E> for SpecializedConfigCpuBuilder<ISA>\nwhere\n    E: StarkEngine<SC = BabyBearSC, PB = CpuBackend<BabyBearSC>, PD = CpuDevice<BabyBearSC>>,\n    ISA::CpuBuilder: VmBuilder<\n        E,\n        VmConfig = ISA::Config,\n        SystemChipInventory = SystemChipInventory<BabyBearSC>,\n        RecordArena = MatrixRecordArena<Val<BabyBearSC>>,\n    >,\n{\n    type VmConfig = SpecializedConfig<ISA>;\n    type SystemChipInventory = SystemChipInventory<BabyBearSC>;\n    type RecordArena = MatrixRecordArena<Val<BabyBearSC>>;\n\n    fn create_chip_complex(\n        &self,\n        config: &SpecializedConfig<ISA>,\n        circuit: AirInventory<BabyBearSC>,\n    ) -> Result<\n        VmChipComplex<BabyBearSC, Self::RecordArena, E::PB, Self::SystemChipInventory>,\n        ChipInventoryError,\n    > {\n        let mut chip_complex = VmBuilder::<E>::create_chip_complex(\n            &<ISA as OpenVmISA>::CpuBuilder::default(),\n            &config.original.config,\n            circuit,\n        )?;\n        let inventory = &mut chip_complex.inventory;\n        VmProverExtension::<E, _, _>::extend_prover(\n            &PowdrCpuProverExt::<ISA>::default(),\n            &config.powdr,\n            inventory,\n        )?;\n        Ok(chip_complex)\n    }\n}\n\n#[cfg(feature = \"cuda\")]\n#[derive(Default)]\nstruct PowdrGpuProverExt<ISA> {\n    _marker: PhantomData<ISA>,\n}\n\n#[cfg(feature = \"cuda\")]\nimpl<ISA: OpenVmISA>\n    VmProverExtension<GpuBabyBearPoseidon2Engine, DenseRecordArena, PowdrExtension<BabyBear, ISA>>\n    for PowdrGpuProverExt<ISA>\n{\n    fn extend_prover(\n        &self,\n        extension: &PowdrExtension<BabyBear, ISA>,\n        inventory: &mut ChipInventory<BabyBearSC, DenseRecordArena, GpuBackend>,\n    ) -> Result<(), ChipInventoryError> {\n        use std::sync::Arc;\n        // TODO: here we make assumptions about the existence of some chips in the periphery. Make this more flexible\n\n        use crate::powdr_extension::trace_generator::cuda::PowdrPeripheryInstancesGpu;\n        let bitwise_lookup = inventory\n            .find_chip::<Arc<BitwiseOperationLookupChipGPU<8>>>()\n            .next()\n            .cloned();\n        let range_checker = inventory\n            .find_chip::<Arc<VariableRangeCheckerChipGPU>>()\n            .next()\n            .unwrap();\n        let tuple_range_checker = inventory\n            .find_chip::<Arc<RangeTupleCheckerChipGPU<2>>>()\n            .next()\n            .cloned();\n\n        // Create the shared chips and the dummy shared chips\n        let shared_chips_pair = PowdrPeripheryInstancesGpu::new(\n            range_checker.clone(),\n            bitwise_lookup,\n            tuple_range_checker,\n            get_periphery_bus_ids(inventory),\n        );\n\n        for precompile in &extension.precompiles {\n            use crate::powdr_extension::chip::PowdrChipGpu;\n\n            inventory.next_air::<PowdrAir<BabyBear>>()?;\n            let chip = PowdrChipGpu::new(\n                precompile.clone(),\n                extension.airs.clone(),\n                extension.base_config.clone(),\n                shared_chips_pair.clone(),\n            );\n            inventory.add_executor_chip(chip);\n        }\n\n        Ok(())\n    }\n}\n\n#[derive(Clone)]\npub struct PeripheryBusIds {\n    pub range_checker: u16,\n    pub bitwise_lookup: Option<u16>,\n    pub tuple_range_checker: Option<u16>,\n}\n\n#[derive(Clone, Default)]\npub struct PowdrCpuProverExt<ISA> {\n    _marker: PhantomData<ISA>,\n}\n\nimpl<E, RA, ISA: OpenVmISA> VmProverExtension<E, RA, PowdrExtension<BabyBear, ISA>>\n    for PowdrCpuProverExt<ISA>\nwhere\n    E: StarkEngine<SC = BabyBearSC, PB = CpuBackend<BabyBearSC>, PD = CpuDevice<BabyBearSC>>,\n    RA: RowMajorMatrixArena<BabyBear>,\n{\n    fn extend_prover(\n        &self,\n        extension: &PowdrExtension<BabyBear, ISA>,\n        inventory: &mut ChipInventory<<E as StarkEngine>::SC, RA, <E as StarkEngine>::PB>,\n    ) -> Result<(), ChipInventoryError> {\n        // TODO: here we make assumptions about the existence of some chips in the periphery. Make this more flexible\n\n        use crate::powdr_extension::trace_generator::cpu::PowdrPeripheryInstancesCpu;\n        let bitwise_lookup = inventory\n            .find_chip::<SharedBitwiseOperationLookupChip<8>>()\n            .next()\n            .cloned();\n        let range_checker = inventory\n            .find_chip::<SharedVariableRangeCheckerChip>()\n            .next()\n            .unwrap();\n        let tuple_range_checker = inventory\n            .find_chip::<SharedRangeTupleCheckerChip<2>>()\n            .next()\n            .cloned();\n\n        // Create the shared chips and the dummy shared chips\n        let shared_chips_pair = PowdrPeripheryInstancesCpu::new(\n            range_checker.clone(),\n            bitwise_lookup,\n            tuple_range_checker,\n            get_periphery_bus_ids(inventory),\n        );\n\n        for precompile in &extension.precompiles {\n            use crate::powdr_extension::chip::PowdrChipCpu;\n\n            inventory.next_air::<PowdrAir<BabyBear>>()?;\n            let chip = PowdrChipCpu::new(\n                precompile.clone(),\n                extension.airs.clone(),\n                extension.base_config.clone(),\n                shared_chips_pair.clone(),\n            );\n            inventory.add_executor_chip(chip);\n        }\n\n        Ok(())\n    }\n}\n\n// Helper function to get the periphery bus ids from the `AirInventory`.\n// This is the most robust method because bus ids are assigned at air creation time.\nfn get_periphery_bus_ids<SC, RA, PB>(inventory: &ChipInventory<SC, RA, PB>) -> PeripheryBusIds\nwhere\n    SC: StarkGenericConfig,\n    PB: ProverBackend,\n{\n    let air_inventory = inventory.airs();\n    let range_checker_bus_id = air_inventory\n        .find_air::<VariableRangeCheckerAir>()\n        .next()\n        .unwrap()\n        .bus\n        .inner\n        .index;\n    let bitwise_lookup_bus_id = air_inventory\n        .find_air::<BitwiseOperationLookupAir<8>>()\n        .next()\n        .map(|air| air.bus.inner.index);\n    let tuple_range_checker_bus_id = air_inventory\n        .find_air::<RangeTupleCheckerAir<2>>()\n        .next()\n        .map(|air| air.bus.inner.index);\n\n    PeripheryBusIds {\n        range_checker: range_checker_bus_id,\n        bitwise_lookup: bitwise_lookup_bus_id,\n        tuple_range_checker: tuple_range_checker_bus_id,\n    }\n}\n\nimpl<ISA: OpenVmISA> TranspilerConfig<BabyBear> for SpecializedConfig<ISA> {\n    fn transpiler(&self) -> Transpiler<BabyBear> {\n        self.original.config().transpiler()\n    }\n}\n\n// For generation of the init file, we delegate to the underlying SdkVmConfig.\nimpl<ISA: OpenVmISA> InitFileGenerator for SpecializedConfig<ISA> {\n    fn generate_init_file_contents(&self) -> Option<String> {\n        self.original.config().generate_init_file_contents()\n    }\n\n    fn write_to_init_file(\n        &self,\n        manifest_dir: &Path,\n        init_file_name: Option<&str>,\n    ) -> std::io::Result<()> {\n        self.original\n            .config()\n            .write_to_init_file(manifest_dir, init_file_name)\n    }\n}\n\nimpl<ISA: OpenVmISA> AsRef<SystemConfig> for SpecializedConfig<ISA> {\n    fn as_ref(&self) -> &SystemConfig {\n        self.original.as_ref()\n    }\n}\n\nimpl<ISA: OpenVmISA> AsMut<SystemConfig> for SpecializedConfig<ISA> {\n    fn as_mut(&mut self) -> &mut SystemConfig {\n        self.original.as_mut()\n    }\n}\n\n#[allow(clippy::large_enum_variant)]\n#[derive(\n    AnyEnum, Chip, Executor, MeteredExecutor, AotExecutor, AotMeteredExecutor, PreflightExecutor,\n)]\npub enum SpecializedExecutor<F: PrimeField32, ISA: OpenVmISA> {\n    #[any_enum]\n    OriginalExecutor(ISA::Executor<F>),\n    #[any_enum]\n    PowdrExecutor(PowdrExtensionExecutor<ISA>),\n}\n\n// We implement `From` by hand because we cannot prove that `ISA::Executor != PowdrExtensionExecutor`\nimpl<F: PrimeField32, ISA: OpenVmISA> From<PowdrExtensionExecutor<ISA>>\n    for SpecializedExecutor<F, ISA>\n{\n    fn from(value: PowdrExtensionExecutor<ISA>) -> Self {\n        Self::PowdrExecutor(value)\n    }\n}\n\n// TODO: derive VmCircuitConfig, currently not possible because we don't have SC/F everywhere\n// Also `start_new_extension` is normally only used in derive\nimpl<ISA: OpenVmISA> VmCircuitConfig<BabyBearSC> for SpecializedConfig<ISA> {\n    fn create_airs(&self) -> Result<AirInventory<BabyBearSC>, AirInventoryError> {\n        let mut inventory = self.original.create_airs()?;\n        inventory.start_new_extension();\n        self.powdr.extend_circuit(&mut inventory)?;\n        Ok(inventory)\n    }\n}\n\nimpl<ISA: OpenVmISA> VmExecutionConfig<BabyBear> for SpecializedConfig<ISA> {\n    type Executor = SpecializedExecutor<BabyBear, ISA>;\n\n    fn create_executors(\n        &self,\n    ) -> Result<ExecutorInventory<Self::Executor>, ExecutorInventoryError> {\n        let mut inventory = self.original.create_executors()?.transmute();\n        inventory = inventory.extend(&self.powdr)?;\n        Ok(inventory)\n    }\n}\n\nimpl<ISA: OpenVmISA> SpecializedConfig<ISA> {\n    pub fn new(\n        base_config: OriginalVmConfig<ISA>,\n        precompiles: Vec<PowdrPrecompile<BabyBear, ISA>>,\n        degree_bound: DegreeBound,\n    ) -> Self {\n        let airs = base_config.airs(degree_bound).expect(\"Failed to convert the AIR of an OpenVM instruction, even after filtering by the blacklist!\");\n        let bus_map = base_config.bus_map();\n        let powdr_extension = PowdrExtension::new(precompiles, base_config.clone(), bus_map, airs);\n        Self {\n            original: base_config,\n            powdr: powdr_extension,\n        }\n    }\n}\n\n#[derive(Clone, Serialize, Deserialize, Default, Debug, Eq, PartialEq)]\npub struct AirMetrics {\n    pub widths: AirWidths,\n    pub constraints: usize,\n    pub bus_interactions: usize,\n}\n\nimpl From<AirMetrics> for AirStats {\n    fn from(metrics: AirMetrics) -> Self {\n        AirStats {\n            main_columns: metrics.widths.main,\n            constraints: metrics.constraints,\n            bus_interactions: metrics.bus_interactions,\n        }\n    }\n}\n\nimpl Add for AirMetrics {\n    type Output = AirMetrics;\n\n    fn add(self, rhs: AirMetrics) -> AirMetrics {\n        AirMetrics {\n            widths: self.widths + rhs.widths,\n            constraints: self.constraints + rhs.constraints,\n            bus_interactions: self.bus_interactions + rhs.bus_interactions,\n        }\n    }\n}\n\nimpl Sum<AirMetrics> for AirMetrics {\n    fn sum<I: Iterator<Item = AirMetrics>>(iter: I) -> AirMetrics {\n        iter.fold(AirMetrics::default(), Add::add)\n    }\n}\n\nimpl AirMetrics {\n    pub fn total_width(&self) -> usize {\n        self.widths.total()\n    }\n}\n\nimpl<ISA: OpenVmISA> CompiledProgram<ISA> {\n    // Return a tuple of (powdr AirMetrics, non-powdr AirMetrics)\n    pub fn air_metrics(\n        &self,\n        max_degree: usize,\n    ) -> (Vec<(AirMetrics, Option<AirWidthsDiff>)>, Vec<AirMetrics>) {\n        let air_inventory = self.vm_config.create_airs().unwrap();\n\n        let chip_complex = <SpecializedConfigCpuBuilder<ISA> as VmBuilder<\n            BabyBearPoseidon2Engine,\n        >>::create_chip_complex(\n            &SpecializedConfigCpuBuilder::default(),\n            &self.vm_config,\n            air_inventory,\n        )\n        .unwrap();\n\n        let inventory = chip_complex.inventory;\n\n        // Order of precompile is the same as that of Powdr executors in chip inventory\n        let mut apc_stats = self\n            .vm_config\n            .powdr\n            .precompiles\n            .iter()\n            .map(|precompile| precompile.apc_stats.clone());\n\n        inventory.airs().ext_airs().iter().fold(\n            (Vec::new(), Vec::new()),\n            |(mut powdr_air_metrics, mut non_powdr_air_metrics), air| {\n                let name = air.name();\n                // We actually give name \"powdr_air_for_opcode_<opcode>\" to the AIRs,\n                // but OpenVM uses the actual Rust type (PowdrAir) as the name in this method.\n                // TODO this is hacky but not sure how to do it better rn.\n                if name.starts_with(\"PowdrAir\") {\n                    powdr_air_metrics.push((\n                        get_air_metrics(air.clone(), max_degree),\n                        Some(apc_stats.next().unwrap().widths),\n                    ));\n                } else {\n                    non_powdr_air_metrics.push(get_air_metrics(air.clone(), max_degree));\n                }\n\n                (powdr_air_metrics, non_powdr_air_metrics)\n            },\n        )\n    }\n}\n\npub fn execute<ISA: OpenVmISA>(\n    program: CompiledProgram<ISA>,\n    inputs: StdIn,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let CompiledProgram { exe, vm_config } = program;\n\n    // Set app configuration\n    let app_fri_params =\n        FriParameters::standard_with_100_bits_conjectured_security(DEFAULT_APP_LOG_BLOWUP);\n    let app_config = AppConfig::new(app_fri_params, vm_config.clone());\n\n    // prepare for execute\n    #[cfg(feature = \"cuda\")]\n    let sdk = PowdrSdkGpu::new(app_config).unwrap();\n    #[cfg(not(feature = \"cuda\"))]\n    let sdk = PowdrSdkCpu::new(app_config).unwrap();\n\n    let output = sdk.execute(exe.clone(), inputs.clone()).unwrap();\n\n    tracing::info!(\"Public values output: {:?}\", output);\n\n    Ok(())\n}\n\n// Generate execution profile for a guest program\npub fn execution_profile_from_guest<ISA: OpenVmISA>(\n    program: &OriginalCompiledProgram<ISA>,\n    inputs: StdIn,\n) -> ExecutionProfile {\n    let OriginalCompiledProgram { exe, vm_config, .. } = program;\n    let program = Prog::from(&exe.program);\n\n    // Set app configuration\n    let app_fri_params =\n        FriParameters::standard_with_100_bits_conjectured_security(DEFAULT_APP_LOG_BLOWUP);\n    let app_config = AppConfig::new(app_fri_params, vm_config.clone().config);\n\n    // prepare for execute\n    let sdk = PowdrExecutionProfileSdkCpu::<ISA>::new(app_config).unwrap();\n\n    execution_profile::<BabyBearOpenVmApcAdapter<ISA>>(&program, || {\n        sdk.execute_interpreted(exe.clone(), inputs.clone())\n            .unwrap();\n    })\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/chip.rs",
    "content": "// Mostly taken from [this openvm extension](https://github.com/openvm-org/openvm/blob/1b76fd5a900a7d69850ee9173969f70ef79c4c76/extensions/rv32im/circuit/src/auipc/core.rs#L1)\n\nuse std::{cell::RefCell, collections::BTreeMap, rc::Rc};\n\nuse crate::{\n    extraction_utils::{OriginalAirs, OriginalVmConfig},\n    isa::OpenVmISA,\n    powdr_extension::{\n        executor::OriginalArenas,\n        trace_generator::cpu::{PowdrPeripheryInstancesCpu, PowdrTraceGeneratorCpu},\n        PowdrPrecompile,\n    },\n};\n\nuse itertools::Itertools;\nuse openvm_circuit::arch::MatrixRecordArena;\nuse openvm_stark_backend::{\n    p3_air::{Air, BaseAir},\n    rap::ColumnsAir,\n};\n\nuse openvm_stark_backend::{\n    interaction::InteractionBuilder,\n    p3_field::PrimeField32,\n    p3_matrix::Matrix,\n    rap::{BaseAirWithPublicValues, PartitionedBaseAir},\n};\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::{\n    expression::{AlgebraicEvaluator, AlgebraicReference, WitnessEvaluator},\n    symbolic_machine::SymbolicMachine,\n};\n\npub struct PowdrChipCpu<ISA: OpenVmISA> {\n    pub name: String,\n    pub record_arena_by_air_name: Rc<RefCell<OriginalArenas<MatrixRecordArena<BabyBear>>>>,\n    pub trace_generator: PowdrTraceGeneratorCpu<ISA>,\n}\n\nimpl<ISA: OpenVmISA> PowdrChipCpu<ISA> {\n    pub(crate) fn new(\n        precompile: PowdrPrecompile<BabyBear, ISA>,\n        original_airs: OriginalAirs<BabyBear, ISA>,\n        base_config: OriginalVmConfig<ISA>,\n        periphery: PowdrPeripheryInstancesCpu<ISA>,\n    ) -> Self {\n        let PowdrPrecompile {\n            name,\n            apc,\n            apc_record_arena_cpu: apc_record_arena,\n            ..\n        } = precompile;\n        let trace_generator =\n            PowdrTraceGeneratorCpu::new(apc, original_airs, base_config, periphery);\n\n        Self {\n            name,\n            record_arena_by_air_name: apc_record_arena,\n            trace_generator,\n        }\n    }\n}\n\npub struct PowdrAir<F> {\n    /// The columns in arbitrary order\n    columns: Vec<AlgebraicReference>,\n    machine: SymbolicMachine<F>,\n}\n\nimpl<F: PrimeField32> ColumnsAir<F> for PowdrAir<F> {\n    fn columns(&self) -> Option<Vec<String>> {\n        Some(self.columns.iter().map(|c| (*c.name).clone()).collect())\n    }\n}\n\nimpl<F: PrimeField32> PowdrAir<F> {\n    pub fn new(machine: SymbolicMachine<F>) -> Self {\n        Self {\n            columns: machine.main_columns().collect(),\n            machine,\n        }\n    }\n}\n\nimpl<F: PrimeField32> BaseAir<F> for PowdrAir<F> {\n    fn width(&self) -> usize {\n        let res = self.columns.len();\n        assert!(res > 0);\n        res\n    }\n}\n\n// No public values, but the trait is implemented\nimpl<F: PrimeField32> BaseAirWithPublicValues<F> for PowdrAir<F> {}\n\nimpl<AB: InteractionBuilder> Air<AB> for PowdrAir<AB::F>\nwhere\n    AB::F: PrimeField32,\n{\n    fn eval(&self, builder: &mut AB) {\n        let main = builder.main();\n        let witnesses = main.row_slice(0);\n        // TODO: cache?\n        let witness_values: BTreeMap<u64, AB::Var> = self\n            .columns\n            .iter()\n            .map(|c| c.id)\n            .zip_eq(witnesses.iter().cloned())\n            .collect();\n\n        let witness_evaluator = WitnessEvaluator::new(&witness_values);\n\n        for constraint in &self.machine.constraints {\n            let constraint = witness_evaluator.eval_constraint(constraint);\n            builder.assert_zero(constraint.expr);\n        }\n\n        for interaction in &self.machine.bus_interactions {\n            let interaction = witness_evaluator.eval_bus_interaction(interaction);\n            // TODO: is this correct?\n            let count_weight = 1;\n\n            builder.push_interaction(\n                interaction.id as u16,\n                interaction.args,\n                interaction.mult,\n                count_weight,\n            );\n        }\n    }\n}\n\nimpl<F: PrimeField32> PartitionedBaseAir<F> for PowdrAir<F> {}\n\n#[cfg(feature = \"cuda\")]\nmod cuda {\n    use std::{cell::RefCell, rc::Rc};\n\n    use openvm_circuit::arch::DenseRecordArena;\n    use openvm_stark_sdk::p3_baby_bear::BabyBear;\n\n    use crate::{\n        extraction_utils::{OriginalAirs, OriginalVmConfig},\n        isa::OpenVmISA,\n        powdr_extension::{\n            executor::OriginalArenas,\n            trace_generator::cuda::{PowdrPeripheryInstancesGpu, PowdrTraceGeneratorGpu},\n            PowdrPrecompile,\n        },\n    };\n\n    pub struct PowdrChipGpu<ISA: OpenVmISA> {\n        pub name: String,\n        pub record_arena_by_air_name: Rc<RefCell<OriginalArenas<DenseRecordArena>>>,\n        pub trace_generator: PowdrTraceGeneratorGpu<ISA>,\n    }\n\n    impl<ISA: OpenVmISA> PowdrChipGpu<ISA> {\n        pub(crate) fn new(\n            precompile: PowdrPrecompile<BabyBear, ISA>,\n            original_airs: OriginalAirs<BabyBear, ISA>,\n            base_config: OriginalVmConfig<ISA>,\n            periphery: PowdrPeripheryInstancesGpu<ISA>,\n        ) -> Self {\n            let PowdrPrecompile {\n                name,\n                apc,\n                apc_record_arena_gpu: apc_record_arena,\n                ..\n            } = precompile;\n            let trace_generator =\n                PowdrTraceGeneratorGpu::new(apc, original_airs, base_config, periphery);\n\n            Self {\n                name,\n                record_arena_by_air_name: apc_record_arena,\n                trace_generator,\n            }\n        }\n    }\n}\n#[cfg(feature = \"cuda\")]\npub use cuda::*;\n"
  },
  {
    "path": "openvm/src/powdr_extension/executor/mod.rs",
    "content": "use std::{\n    borrow::{Borrow, BorrowMut},\n    cell::RefCell,\n    collections::HashMap,\n    rc::Rc,\n};\n\nuse crate::{\n    customize_exe::OpenVmExecutionState,\n    extraction_utils::{\n        record_arena_dimension_by_air_name_per_apc_call, OriginalAirs, OriginalVmConfig,\n    },\n    isa::{IsaApc, OpenVmISA},\n};\n\nuse itertools::Itertools;\nuse openvm_circuit::arch::InterpreterMeteredExecutor;\nuse openvm_circuit::arch::{\n    execution_mode::{ExecutionCtx, MeteredCtx},\n    Arena, DenseRecordArena, E2PreCompute, InterpreterExecutor, MatrixRecordArena,\n    PreflightExecutor,\n};\n#[cfg(feature = \"aot\")]\nuse openvm_circuit::arch::{AotExecutor, AotMeteredExecutor};\nuse openvm_circuit_derive::create_handler;\nuse openvm_circuit_primitives::AlignedBytesBorrow;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::{\n    execution::{OptimisticConstraintEvaluator, OptimisticConstraints},\n    InstructionHandler,\n};\n\nuse openvm_circuit::{\n    arch::{\n        ExecuteFunc, ExecutionCtxTrait, ExecutionError, ExecutorInventory,\n        MeteredExecutionCtxTrait, StaticProgramError, VmExecState, VmExecutionConfig, VmStateMut,\n    },\n    system::memory::online::{GuestMemory, TracingMemory},\n};\n\n/// A struct which holds the state of the execution based on the original instructions in this block and a dummy inventory.\n/// It holds arenas for each original use for both cpu and gpu execution, so that this struct can be agnostic to the execution backend.\n/// When using the cpu backend, only `original_arenas_cpu` is used, and vice versa for gpu execution.\npub struct PowdrExecutor<ISA: OpenVmISA> {\n    pub air_by_opcode_id: OriginalAirs<BabyBear, ISA>,\n    pub executor_inventory: ExecutorInventory<ISA::Executor<BabyBear>>,\n    pub apc: IsaApc<BabyBear, ISA>,\n    pub original_arenas_cpu: Rc<RefCell<OriginalArenas<MatrixRecordArena<BabyBear>>>>,\n    pub original_arenas_gpu: Rc<RefCell<OriginalArenas<DenseRecordArena>>>,\n    pub height_change: u32,\n    cached_instructions_meta: Vec<CachedInstructionMeta>,\n}\n\n/// A shared mutable reference to the arenas used to store the traces of the original instructions, accessed during preflight execution and trace generation.\n/// The same reference is reused for all segments, under the assumption that segments are executed sequentially: preflight_0, tracegen_0, preflight_1, tracegen_1, ...\n/// It goes through the following cycle for each segment:\n/// - initialized at the beginning of preflight execution with the correct sizes for this segment\n/// - written to during preflight execution\n/// - read from during trace generation\n/// - reset to uninitialized after trace generation\n#[derive(Default)]\npub enum OriginalArenas<A> {\n    #[default]\n    Uninitialized,\n    Initialized(InitializedOriginalArenas<A>),\n}\n\nimpl<A: Arena> OriginalArenas<A> {\n    /// Given an estimate of how many times the APC is called in this segment, and the original airs and apc,\n    /// initializes the arenas iff not already initialized.\n    fn ensure_initialized<ISA: OpenVmISA>(\n        &mut self,\n        apc_call_count_estimate: impl Fn() -> usize,\n        original_airs: &OriginalAirs<BabyBear, ISA>,\n        apc: &IsaApc<BabyBear, ISA>,\n    ) -> &mut InitializedOriginalArenas<A> {\n        match self {\n            OriginalArenas::Uninitialized => {\n                *self = OriginalArenas::Initialized(InitializedOriginalArenas::new(\n                    apc_call_count_estimate(),\n                    original_airs,\n                    apc,\n                ));\n                match self {\n                    OriginalArenas::Initialized(i) => i,\n                    _ => unreachable!(),\n                }\n            }\n            OriginalArenas::Initialized(i) => i,\n        }\n    }\n}\n\n/// A collection of arenas used to store the records of the original instructions, one per air name.\n/// Each arena is initialized with a capacity based on an estimate of how many times the APC is called in this segment\n/// and how many calls to each air are made per APC call.\n#[derive(Default)]\npub struct InitializedOriginalArenas<A> {\n    arenas: Vec<Option<ArenaPair<A>>>,\n    air_name_to_arena_index: HashMap<String, usize>,\n    pub number_of_calls: usize,\n}\n\nimpl<A: Arena> InitializedOriginalArenas<A> {\n    /// Creates a new instance of `InitializedOriginalArenas`.\n    pub fn new<ISA: OpenVmISA>(\n        apc_call_count_estimate: usize,\n        original_airs: &OriginalAirs<BabyBear, ISA>,\n        apc: &IsaApc<BabyBear, ISA>,\n    ) -> Self {\n        let record_arena_dimensions =\n            record_arena_dimension_by_air_name_per_apc_call(apc, original_airs);\n        let (air_name_to_arena_index, arenas) =\n            record_arena_dimensions.into_iter().enumerate().fold(\n                (HashMap::new(), Vec::new()),\n                |(mut air_name_to_arena_index, mut arenas),\n                 (\n                    idx,\n                    (\n                        air_name,\n                        RecordArenaDimension {\n                            real_height,\n                            width: air_width,\n                            dummy_height,\n                        },\n                    ),\n                )| {\n                    air_name_to_arena_index.insert(air_name, idx);\n                    arenas.push(Some(ArenaPair {\n                        real: A::with_capacity(real_height * apc_call_count_estimate, air_width),\n                        dummy: A::with_capacity(dummy_height * apc_call_count_estimate, air_width),\n                    }));\n                    (air_name_to_arena_index, arenas)\n                },\n            );\n\n        Self {\n            arenas,\n            air_name_to_arena_index,\n            // This is the actual number of calls, which we don't know yet. It will be updated during preflight execution.\n            number_of_calls: 0,\n        }\n    }\n\n    #[inline]\n    fn arena_mut_by_index(&mut self, index: usize) -> &mut ArenaPair<A> {\n        self.arenas\n            .get_mut(index)\n            .and_then(|arena| arena.as_mut())\n            .expect(\"arena missing for index\")\n    }\n\n    #[inline]\n    fn real_arena_mut_by_index(&mut self, index: usize) -> &mut A {\n        &mut self.arena_mut_by_index(index).real\n    }\n\n    #[inline]\n    fn dummy_arena_mut_by_index(&mut self, index: usize) -> &mut A {\n        &mut self.arena_mut_by_index(index).dummy\n    }\n\n    pub fn take_real_arena(&mut self, air_name: &str) -> Option<A> {\n        let index = *self.air_name_to_arena_index.get(air_name)?;\n        self.arenas[index].take().map(|arena_pair| arena_pair.real)\n    }\n}\n\npub struct ArenaPair<A> {\n    pub real: A,\n    pub dummy: A,\n}\n\n/// The dimensions of a record arena for a given air name, used to initialize the arenas.\npub struct RecordArenaDimension {\n    pub real_height: usize,\n    pub width: usize,\n    pub dummy_height: usize,\n}\n\n#[derive(Clone, Copy)]\nstruct CachedInstructionMeta {\n    executor_index: usize,\n    arena_index: usize,\n    should_use_real_arena: bool,\n}\n\n/// A struct to interpret the pre-compute data as for PowdrExecutor.\n#[derive(AlignedBytesBorrow, Clone)]\n#[repr(C)]\nstruct PowdrPreCompute<F, Ctx> {\n    height_change: u32,\n    original_instructions: Vec<(ExecuteFunc<F, Ctx>, Vec<u8>)>,\n    optimistic_constraints: OptimisticConstraints<(), u32>,\n}\n\nimpl<ISA: OpenVmISA> InterpreterExecutor<BabyBear> for PowdrExecutor<ISA> {\n    fn pre_compute_size(&self) -> usize {\n        // TODO: do we know `ExecutionCtx` is correct? It's only one implementation of `ExecutionCtxTrait`.\n        // A clean fix would be to add `Ctx` as a generic parameter to this method in the `Executor` trait, but that would be a breaking change.\n        size_of::<PowdrPreCompute<BabyBear, ExecutionCtx>>()\n    }\n\n    #[cfg(not(feature = \"tco\"))]\n    fn pre_compute<Ctx>(\n        &self,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<ExecuteFunc<BabyBear, Ctx>, StaticProgramError>\n    where\n        Ctx: ExecutionCtxTrait,\n    {\n        let pre_compute: &mut PowdrPreCompute<BabyBear, Ctx> = data.borrow_mut();\n\n        self.pre_compute_impl::<Ctx>(pc, inst, pre_compute)?;\n\n        Ok(execute_e1_impl::<BabyBear, Ctx, ISA>)\n    }\n\n    #[cfg(feature = \"tco\")]\n    fn handler<Ctx>(\n        &self,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<openvm_circuit::arch::Handler<BabyBear, Ctx>, StaticProgramError>\n    where\n        Ctx: ExecutionCtxTrait,\n    {\n        let pre_compute: &mut PowdrPreCompute<BabyBear, Ctx> = data.borrow_mut();\n        self.pre_compute_impl::<Ctx>(pc, inst, pre_compute)?;\n        Ok(execute_e1_handler::<BabyBear, Ctx>)\n    }\n}\n\nimpl<ISA: OpenVmISA> InterpreterMeteredExecutor<BabyBear> for PowdrExecutor<ISA> {\n    fn metered_pre_compute_size(&self) -> usize {\n        // TODO: do we know `MeteredCtx` is correct? It's only one implementation of `MeteredExecutionCtxTrait`.\n        // A clean fix would be to add `Ctx` as a generic parameter to this method in the `MeteredExecutor` trait, but that would be a breaking change.\n        size_of::<E2PreCompute<PowdrPreCompute<BabyBear, MeteredCtx>>>()\n    }\n\n    #[cfg(not(feature = \"tco\"))]\n    fn metered_pre_compute<Ctx>(\n        &self,\n        chip_idx: usize,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<ExecuteFunc<BabyBear, Ctx>, StaticProgramError>\n    where\n        Ctx: MeteredExecutionCtxTrait,\n    {\n        let pre_compute: &mut E2PreCompute<PowdrPreCompute<BabyBear, Ctx>> = data.borrow_mut();\n        pre_compute.chip_idx = chip_idx as u32;\n\n        self.pre_compute_impl::<Ctx>(pc, inst, &mut pre_compute.data)?;\n\n        Ok(execute_e2_impl::<BabyBear, Ctx, ISA>)\n    }\n\n    #[cfg(feature = \"tco\")]\n    fn metered_handler<Ctx>(\n        &self,\n        chip_idx: usize,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<openvm_circuit::arch::Handler<BabyBear, Ctx>, StaticProgramError>\n    where\n        Ctx: MeteredExecutionCtxTrait,\n    {\n        let pre_compute: &mut E2PreCompute<PowdrPreCompute<BabyBear, Ctx>> = data.borrow_mut();\n        pre_compute.chip_idx = chip_idx as u32;\n\n        self.pre_compute_impl::<Ctx>(pc, inst, &mut pre_compute.data)?;\n\n        Ok(execute_e2_handler::<BabyBear, Ctx>)\n    }\n}\n\n#[cfg(feature = \"aot\")]\nimpl<ISA: OpenVmISA> AotExecutor<BabyBear> for PowdrExecutor<ISA> {\n    fn is_aot_supported(&self, _inst: &Instruction<BabyBear>) -> bool {\n        false\n    }\n\n    fn generate_x86_asm(\n        &self,\n        _inst: &Instruction<BabyBear>,\n        _pc: u32,\n    ) -> Result<String, openvm_circuit::arch::AotError> {\n        std::unimplemented!()\n    }\n}\n\n#[cfg(feature = \"aot\")]\nimpl<ISA: OpenVmISA> AotMeteredExecutor<BabyBear> for PowdrExecutor<ISA> {\n    fn is_aot_metered_supported(&self, _inst: &Instruction<BabyBear>) -> bool {\n        false\n    }\n\n    fn generate_x86_metered_asm(\n        &self,\n        _inst: &Instruction<BabyBear>,\n        _pc: u32,\n        _chip_idx: usize,\n        _config: &openvm_circuit::arch::SystemConfig,\n    ) -> Result<String, openvm_circuit::arch::AotError> {\n        std::unimplemented!()\n    }\n}\n\nimpl<ISA: OpenVmISA> PowdrExecutor<ISA> {\n    #[cfg(not(feature = \"tco\"))]\n    /// The implementation of pre_compute, shared between Executor and MeteredExecutor.\n    #[inline]\n    fn pre_compute_impl<Ctx>(\n        &self,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut PowdrPreCompute<BabyBear, Ctx>,\n    ) -> Result<(), StaticProgramError>\n    where\n        Ctx: ExecutionCtxTrait,\n    {\n        use openvm_stark_backend::{p3_field::Field, p3_maybe_rayon::prelude::ParallelIterator};\n\n        let &Instruction {\n            a,\n            b,\n            c,\n            d,\n            e,\n            f,\n            g,\n            ..\n        } = inst;\n\n        // TODO: debug_assert that the opcode is the one we expect\n\n        if !a.is_zero()\n            || !b.is_zero()\n            || !c.is_zero()\n            || !d.is_zero()\n            || !e.is_zero()\n            || !f.is_zero()\n            || !g.is_zero()\n        {\n            return Err(StaticProgramError::InvalidInstruction(pc));\n        }\n\n        let executor_inventory = &self.executor_inventory;\n        // Set the data using the original instructions\n        let new_data = PowdrPreCompute {\n            height_change: self.height_change,\n            original_instructions: self\n                .apc\n                .block\n                .par_instructions()\n                .map(|(pc, instruction)| {\n                    let executor = executor_inventory\n                        .get_executor(instruction.inner.opcode)\n                        .ok_or(StaticProgramError::ExecutorNotFound {\n                            opcode: instruction.inner.opcode,\n                        })?;\n                    let pre_compute_size = executor.pre_compute_size();\n                    let mut pre_compute_data = vec![0u8; pre_compute_size];\n                    let execute_func = executor.pre_compute::<Ctx>(\n                        pc as u32,\n                        &instruction.inner,\n                        &mut pre_compute_data,\n                    )?;\n                    Ok((execute_func, pre_compute_data.to_vec()))\n                })\n                .collect::<Result<Vec<_>, StaticProgramError>>()?,\n            optimistic_constraints: self.apc.optimistic_constraints.clone(),\n        };\n        unsafe {\n            std::ptr::write(data, new_data);\n        }\n\n        Ok(())\n    }\n\n    #[cfg(feature = \"tco\")]\n    /// The implementation of pre_compute, shared between Executor and MeteredExecutor.\n    #[inline]\n    fn pre_compute_impl<Ctx>(\n        &self,\n        _pc: u32,\n        _inst: &Instruction<BabyBear>,\n        _data: &mut PowdrPreCompute<BabyBear, Ctx>,\n    ) -> Result<(), StaticProgramError> {\n        unimplemented!(\"tco is not implemented yet\")\n    }\n}\n\n/// The implementation of the execute function, shared between Executor and MeteredExecutor.\n#[inline(always)]\nunsafe fn execute_e12_impl<F: PrimeField32, CTX: ExecutionCtxTrait, ISA: OpenVmISA>(\n    pre_compute: &PowdrPreCompute<F, CTX>,\n    exec_state: &mut VmExecState<F, GuestMemory, CTX>,\n) {\n    let mut optimistic_constraint_evalutator = OptimisticConstraintEvaluator::new();\n    // Check the state before execution\n    assert!(optimistic_constraint_evalutator\n        .try_next_execution_step::<OpenVmExecutionState<'_, F, ISA>>(\n            &OpenVmExecutionState::from(&exec_state.vm_state),\n            &pre_compute.optimistic_constraints\n        )\n        .is_ok());\n    for (executor, data) in &pre_compute.original_instructions {\n        executor(data.as_ptr(), exec_state);\n        // Check the state after each original instruction\n        assert!(optimistic_constraint_evalutator\n            .try_next_execution_step::<OpenVmExecutionState<'_, F, ISA>>(\n                &OpenVmExecutionState::from(&exec_state.vm_state),\n                &pre_compute.optimistic_constraints\n            )\n            .is_ok());\n    }\n}\n\n#[create_handler]\nunsafe fn execute_e1_impl<F: PrimeField32, CTX: ExecutionCtxTrait, ISA: OpenVmISA>(\n    pre_compute: *const u8,\n    exec_state: &mut VmExecState<F, GuestMemory, CTX>,\n) {\n    let pre_compute: &PowdrPreCompute<F, CTX> =\n        std::slice::from_raw_parts(pre_compute, size_of::<PowdrPreCompute<F, CTX>>()).borrow();\n    execute_e12_impl::<F, CTX, ISA>(pre_compute, exec_state);\n}\n\n#[create_handler]\nunsafe fn execute_e2_impl<F: PrimeField32, CTX: MeteredExecutionCtxTrait, ISA: OpenVmISA>(\n    pre_compute: *const u8,\n    exec_state: &mut VmExecState<F, GuestMemory, CTX>,\n) {\n    let pre_compute: &E2PreCompute<PowdrPreCompute<F, CTX>> = std::slice::from_raw_parts(\n        pre_compute,\n        size_of::<E2PreCompute<PowdrPreCompute<F, CTX>>>(),\n    )\n    .borrow();\n    exec_state.ctx.on_height_change(\n        pre_compute.chip_idx as usize,\n        pre_compute.data.height_change,\n    );\n    execute_e12_impl::<F, CTX, ISA>(&pre_compute.data, exec_state);\n}\n\n// Preflight execution is implemented separately for CPU and GPU backends, because they use a different arena from `self`\n// TODO: reduce code duplication between the two implementations. The main issue now is we need to use the concrete arena types.\nimpl<ISA: OpenVmISA> PreflightExecutor<BabyBear, MatrixRecordArena<BabyBear>>\n    for PowdrExecutor<ISA>\n{\n    fn execute(\n        &self,\n        state: VmStateMut<BabyBear, TracingMemory, MatrixRecordArena<BabyBear>>,\n        _: &Instruction<BabyBear>,\n    ) -> Result<(), ExecutionError> {\n        // Extract the state components, since `execute` consumes the state but we need to pass it to each instruction execution\n        let VmStateMut {\n            pc,\n            memory,\n            streams,\n            rng,\n            custom_pvs,\n            ctx,\n            #[cfg(feature = \"metrics\")]\n            metrics,\n        } = state;\n\n        // We use the arena for metrics, so this line ensures that the number of rows is reported correctly.\n        // It does not allocate any extra memory, because the memory is already at initialization.\n        #[cfg(feature = \"metrics\")]\n        ctx.alloc_single_row();\n\n        // Initialize the original arenas if not already initialized\n        let mut original_arenas = self.original_arenas_cpu.as_ref().borrow_mut();\n\n        // Recover an estimate of how many times the APC is called in this segment based on the current ctx height and width\n        let apc_call_count = || ctx.trace_buffer.len() / ctx.width;\n\n        let original_arenas =\n            original_arenas.ensure_initialized(apc_call_count, &self.air_by_opcode_id, &self.apc);\n\n        // execute the original instructions one by one\n        for (instruction, cached_meta) in self\n            .apc\n            .instructions()\n            .zip_eq(&self.cached_instructions_meta)\n        {\n            let executor = &self.executor_inventory.executors[cached_meta.executor_index];\n\n            let ctx_arena = if cached_meta.should_use_real_arena {\n                original_arenas.real_arena_mut_by_index(cached_meta.arena_index)\n            } else {\n                original_arenas.dummy_arena_mut_by_index(cached_meta.arena_index)\n            };\n\n            let state = VmStateMut {\n                pc,\n                memory,\n                streams,\n                rng,\n                custom_pvs,\n                // We execute in the context of the relevant original table\n                ctx: ctx_arena,\n                // TODO: should we pass around the same metrics object, or snapshot it at the beginning of this method and apply a single update at the end?\n                #[cfg(feature = \"metrics\")]\n                metrics,\n            };\n\n            executor.execute(state, &instruction.inner)?;\n        }\n\n        // Update the real number of calls to the APC\n        original_arenas.number_of_calls += 1;\n\n        Ok(())\n    }\n\n    fn get_opcode_name(&self, opcode: usize) -> String {\n        format!(\"APC_{opcode}\")\n    }\n}\n\n// The GPU preflight executor implementation\nimpl<ISA: OpenVmISA> PreflightExecutor<BabyBear, DenseRecordArena> for PowdrExecutor<ISA> {\n    fn execute(\n        &self,\n        state: VmStateMut<BabyBear, TracingMemory, DenseRecordArena>,\n        _: &Instruction<BabyBear>,\n    ) -> Result<(), ExecutionError> {\n        // Extract the state components, since `execute` consumes the state but we need to pass it to each instruction execution\n        let VmStateMut {\n            pc,\n            memory,\n            streams,\n            rng,\n            custom_pvs,\n            ctx,\n            #[cfg(feature = \"metrics\")]\n            metrics,\n        } = state;\n\n        // Initialize the original arenas if not already initialized\n        let mut original_arenas = self.original_arenas_gpu.as_ref().borrow_mut();\n\n        // Recover an (over)estimate of how many times the APC is called in this segment\n        // Overestimate is fine because we can initailize dummy arenas with some extra space\n        // Exact apc call count from execution is used in final tracegen regardless\n        let apc_call_count = || {\n            let apc_width = self.apc.machine().main_columns().count();\n            let bytes_per_row = apc_width * std::mem::size_of::<u32>();\n            let buf = ctx.records_buffer.get_ref();\n            buf.len() / bytes_per_row\n        };\n\n        let original_arenas =\n            original_arenas.ensure_initialized(apc_call_count, &self.air_by_opcode_id, &self.apc);\n\n        // execute the original instructions one by one\n        for (instruction, cached_meta) in\n            self.apc.instructions().zip(&self.cached_instructions_meta)\n        {\n            let executor = &self.executor_inventory.executors[cached_meta.executor_index];\n\n            let ctx_arena = if cached_meta.should_use_real_arena {\n                original_arenas.real_arena_mut_by_index(cached_meta.arena_index)\n            } else {\n                original_arenas.dummy_arena_mut_by_index(cached_meta.arena_index)\n            };\n\n            let state = VmStateMut {\n                pc,\n                memory,\n                streams,\n                rng,\n                custom_pvs,\n                // We execute in the context of the relevant original table\n                ctx: ctx_arena,\n                // TODO: should we pass around the same metrics object, or snapshot it at the beginning of this method and apply a single update at the end?\n                #[cfg(feature = \"metrics\")]\n                metrics,\n            };\n\n            executor.execute(state, &instruction.inner)?;\n        }\n\n        // Update the real number of calls to the APC\n        original_arenas.number_of_calls += 1;\n\n        Ok(())\n    }\n\n    fn get_opcode_name(&self, opcode: usize) -> String {\n        format!(\"APC_{opcode}\")\n    }\n}\n\nimpl<ISA: OpenVmISA> PowdrExecutor<ISA> {\n    pub fn new(\n        air_by_opcode_id: OriginalAirs<BabyBear, ISA>,\n        base_config: OriginalVmConfig<ISA>,\n        apc: IsaApc<BabyBear, ISA>,\n        record_arena_by_air_name_cpu: Rc<RefCell<OriginalArenas<MatrixRecordArena<BabyBear>>>>,\n        record_arena_by_air_name_gpu: Rc<RefCell<OriginalArenas<DenseRecordArena>>>,\n        height_change: u32,\n    ) -> Self {\n        let executor_inventory = base_config.config.create_executors().unwrap();\n\n        let arena_index_by_name =\n            record_arena_dimension_by_air_name_per_apc_call(apc.as_ref(), &air_by_opcode_id)\n                .iter()\n                .enumerate()\n                .map(|(idx, (name, _))| (name.clone(), idx))\n                .collect::<HashMap<_, _>>();\n\n        let cached_instructions_meta = apc\n            .instructions()\n            .zip_eq(apc.subs.iter())\n            .map(|(instruction, sub)| {\n                let executor_index = *executor_inventory\n                    .instruction_lookup\n                    .get(&instruction.inner.opcode)\n                    .expect(\"missing executor for opcode\")\n                    as usize;\n                let (air_name, _) = air_by_opcode_id.get_instruction_air_and_id(instruction);\n                let arena_index = *arena_index_by_name\n                    .get(&air_name)\n                    .expect(\"missing arena for air\");\n                CachedInstructionMeta {\n                    executor_index,\n                    arena_index,\n                    should_use_real_arena: !sub.is_empty(),\n                }\n            })\n            .collect();\n\n        Self {\n            air_by_opcode_id,\n            executor_inventory,\n            apc,\n            original_arenas_cpu: record_arena_by_air_name_cpu,\n            original_arenas_gpu: record_arena_by_air_name_gpu,\n            height_change,\n            cached_instructions_meta,\n        }\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/mod.rs",
    "content": "/// The core logic of our extension\npub mod chip;\n/// The executor for the powdr instructions\npub mod executor;\n/// The trace generator for the powdr instructions\npub mod trace_generator;\n\n/// The opcodes for the powdr instructions, which is used in the chip implementation and contains the opcode ID\npub mod opcode;\n/// The integration of our extension with the VM\nmod vm;\n\npub use opcode::PowdrOpcode;\npub use vm::{PowdrExtension, PowdrExtensionExecutor, PowdrPrecompile};\n"
  },
  {
    "path": "openvm/src/powdr_extension/opcode.rs",
    "content": "use openvm_instructions::LocalOpcode;\nuse serde::{Deserialize, Serialize};\n\n#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]\npub struct PowdrOpcode {\n    pub class_offset: usize,\n}\n\nimpl LocalOpcode for PowdrOpcode {\n    // This offset must not be accessed, since we want many opcodes of the same type to have different class_offsets.\n    // This is because each opcode has its own air.\n    const CLASS_OFFSET: usize = unreachable!();\n\n    fn from_usize(value: usize) -> Self {\n        Self {\n            class_offset: value,\n        }\n    }\n\n    // The local offset is always 0, since we want to have many opcodes over the same air.\n    fn local_usize(&self) -> usize {\n        0\n    }\n\n    // The global opcode is based on `class_offset`, *NOT* on the static `CLASS_OFFSET`.\n    fn global_opcode(&self) -> openvm_instructions::VmOpcode {\n        openvm_instructions::VmOpcode::from_usize(self.class_offset)\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/common.rs",
    "content": "use derive_more::From;\nuse openvm_circuit::system::phantom::PhantomExecutor;\nuse openvm_circuit_derive::{AnyEnum, Executor, MeteredExecutor, PreflightExecutor};\nuse openvm_circuit_primitives::Chip;\nuse openvm_stark_backend::p3_field::PrimeField32;\n\nuse crate::isa::OpenVmISA;\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Chip, PreflightExecutor, Executor, MeteredExecutor, AnyEnum)]\npub enum DummyExecutor<F: PrimeField32, ISA: OpenVmISA> {\n    #[any_enum]\n    Base(ISA::Executor<F>),\n    #[any_enum]\n    Shared(SharedExecutor<F>),\n}\n\n#[derive(Chip, PreflightExecutor, Executor, MeteredExecutor, From, AnyEnum)]\npub enum SharedExecutor<F: PrimeField32> {\n    Phantom(PhantomExecutor<F>),\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cpu/inventory.rs",
    "content": "use openvm_circuit::{\n    arch::{MatrixRecordArena, VmChipComplex},\n    system::SystemChipInventory,\n};\nuse openvm_stark_backend::{config::Val, prover::cpu::CpuBackend};\n\n/// A dummy inventory used for execution of autoprecompiles\n/// It extends the `SdkVmConfigExecutor` and `SdkVmConfigPeriphery`, providing them with shared, pre-loaded periphery chips to avoid memory allocations by each SDK chip\npub type DummyChipComplex<SC> =\n    VmChipComplex<SC, MatrixRecordArena<Val<SC>>, CpuBackend<SC>, SystemChipInventory<SC>>;\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cpu/mod.rs",
    "content": "use std::{collections::HashMap, sync::Arc};\n\nuse itertools::Itertools;\nuse openvm_circuit::{arch::MatrixRecordArena, utils::next_power_of_two_or_zero};\nuse openvm_stark_backend::{\n    p3_field::{Field, FieldAlgebra, PrimeField32},\n    p3_matrix::dense::{DenseMatrix, RowMajorMatrix},\n    prover::{hal::ProverBackend, types::AirProvingContext},\n    Chip,\n};\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::trace_handler::TraceTrait;\nuse powdr_constraint_solver::constraint_system::ComputationMethod;\n\nuse crate::{\n    extraction_utils::{OriginalAirs, OriginalVmConfig},\n    isa::IsaApc,\n    isa::OpenVmISA,\n    powdr_extension::{chip::PowdrChipCpu, executor::OriginalArenas},\n};\n\n/// The inventory of the PowdrExecutor, which contains the executors for each opcode.\nmod inventory;\n/// The shared periphery chips used by the PowdrTraceGenerator\nmod periphery;\n\npub use inventory::DummyChipComplex;\npub use periphery::{\n    PowdrPeripheryInstancesCpu, SharedPeripheryChipsCpu, SharedPeripheryChipsCpuProverExt,\n};\n\n/// A wrapper around a DenseMatrix to implement `TraceTrait` which is required for `generate_trace`.\npub struct SharedCpuTrace<F> {\n    pub matrix: Arc<RowMajorMatrix<F>>,\n}\n\nimpl<F: Send + Sync> TraceTrait<F> for SharedCpuTrace<F> {\n    type Values = Vec<F>;\n\n    fn width(&self) -> usize {\n        self.matrix.width\n    }\n\n    fn values(&self) -> &Self::Values {\n        &self.matrix.values\n    }\n}\n\nimpl<F> From<Arc<RowMajorMatrix<F>>> for SharedCpuTrace<F> {\n    fn from(matrix: Arc<RowMajorMatrix<F>>) -> Self {\n        Self { matrix }\n    }\n}\n\nimpl<R, PB: ProverBackend<Matrix = Arc<RowMajorMatrix<BabyBear>>>, ISA: OpenVmISA> Chip<R, PB>\n    for PowdrChipCpu<ISA>\n{\n    fn generate_proving_ctx(&self, _: R) -> AirProvingContext<PB> {\n        tracing::trace!(\"Generating air proof input for PowdrChip {}\", self.name);\n\n        let trace = self\n            .trace_generator\n            .generate_witness(self.record_arena_by_air_name.take());\n\n        AirProvingContext::simple(Arc::new(trace), vec![])\n    }\n}\n\npub struct PowdrTraceGeneratorCpu<ISA: OpenVmISA> {\n    pub apc: IsaApc<BabyBear, ISA>,\n    pub original_airs: OriginalAirs<BabyBear, ISA>,\n    pub config: OriginalVmConfig<ISA>,\n    pub periphery: PowdrPeripheryInstancesCpu<ISA>,\n}\n\nimpl<ISA: OpenVmISA> PowdrTraceGeneratorCpu<ISA> {\n    pub fn new(\n        apc: IsaApc<BabyBear, ISA>,\n        original_airs: OriginalAirs<BabyBear, ISA>,\n        config: OriginalVmConfig<ISA>,\n        periphery: PowdrPeripheryInstancesCpu<ISA>,\n    ) -> Self {\n        Self {\n            apc,\n            original_airs,\n            config,\n            periphery,\n        }\n    }\n\n    pub fn generate_witness(\n        &self,\n        original_arenas: OriginalArenas<MatrixRecordArena<BabyBear>>,\n    ) -> DenseMatrix<BabyBear> {\n        use powdr_autoprecompiles::trace_handler::{generate_trace, TraceData};\n\n        let width = self.apc.machine().main_columns().count();\n\n        let mut original_arenas = match original_arenas {\n            OriginalArenas::Initialized(arenas) => arenas,\n            OriginalArenas::Uninitialized => {\n                // if the arenas are uninitialized, the apc was not called, so we return an empty trace\n                return RowMajorMatrix::new(vec![], width);\n            }\n        };\n\n        let num_apc_calls = original_arenas.number_of_calls;\n\n        let chip_inventory = {\n            let airs = ISA::create_dummy_airs(self.config.config(), self.periphery.dummy.clone())\n                .expect(\"Failed to create dummy airs\");\n\n            ISA::create_dummy_chip_complex_cpu(\n                self.config.config(),\n                airs,\n                self.periphery.dummy.clone(),\n            )\n            .expect(\"Failed to create chip complex\")\n            .inventory\n        };\n\n        let dummy_trace_by_air_name: HashMap<String, SharedCpuTrace<BabyBear>> = chip_inventory\n            .chips()\n            .iter()\n            .enumerate()\n            .rev()\n            .filter_map(|(insertion_idx, chip)| {\n                let air_name = chip_inventory.airs().ext_airs()[insertion_idx].name();\n\n                let record_arena = {\n                    match original_arenas.take_real_arena(&air_name) {\n                        Some(ra) => ra,\n                        None => return None, // skip this iteration, because we only have record arena for chips that are used\n                    }\n                };\n\n                let shared_trace = chip.generate_proving_ctx(record_arena).common_main.unwrap();\n\n                Some((air_name, SharedCpuTrace::from(shared_trace)))\n            })\n            .collect();\n\n        let TraceData {\n            dummy_values,\n            dummy_trace_index_to_apc_index_by_instruction,\n            apc_poly_id_to_index,\n            columns_to_compute,\n        } = generate_trace(\n            &dummy_trace_by_air_name,\n            &self.original_airs,\n            num_apc_calls,\n            &self.apc,\n        );\n\n        // allocate for apc trace\n        let width = apc_poly_id_to_index.len();\n        let height = next_power_of_two_or_zero(num_apc_calls);\n        let mut values = <BabyBear as FieldAlgebra>::zero_vec(height * width);\n\n        // go through the final table and fill in the values\n        values\n            // a record is `width` values\n            // TODO: optimize by parallelizing on chunks of rows, currently fails because `dyn AnyChip<MatrixRecordArena<Val<SC>>>` is not `Send`\n            .chunks_mut(width)\n            .zip(dummy_values)\n            .for_each(|(row_slice, dummy_values)| {\n                // map the dummy rows to the autoprecompile row\n\n                use powdr_autoprecompiles::expression::MappingRowEvaluator;\n                for (dummy_row, dummy_trace_index_to_apc_index) in dummy_values\n                    .iter()\n                    .map(|r| &r.data[r.start..r.start + r.length])\n                    .zip_eq(&dummy_trace_index_to_apc_index_by_instruction)\n                {\n                    for (dummy_trace_index, apc_index) in dummy_trace_index_to_apc_index {\n                        row_slice[*apc_index] = dummy_row[*dummy_trace_index];\n                    }\n                }\n\n                // Fill in the columns we have to compute from other columns\n                // (these are either new columns or for example the \"is_valid\" column).\n                for derived_column in columns_to_compute {\n                    let col_index = apc_poly_id_to_index[&derived_column.variable.id];\n                    row_slice[col_index] = match &derived_column.computation_method {\n                        ComputationMethod::Constant(c) => *c,\n                        ComputationMethod::QuotientOrZero(e1, e2) => {\n                            use powdr_number::ExpressionConvertible;\n\n                            let divisor_val = e2.to_expression(&|n| *n, &|column_ref| {\n                                row_slice[apc_poly_id_to_index[&column_ref.id]]\n                            });\n                            if divisor_val.is_zero() {\n                                BabyBear::ZERO\n                            } else {\n                                divisor_val.inverse()\n                                    * e1.to_expression(&|n| *n, &|column_ref| {\n                                        row_slice[apc_poly_id_to_index[&column_ref.id]]\n                                    })\n                            }\n                        }\n                    };\n                }\n\n                let evaluator = MappingRowEvaluator::new(row_slice, &apc_poly_id_to_index);\n\n                // replay the side effects of this row on the main periphery\n                self.apc\n                    .machine()\n                    .bus_interactions\n                    .iter()\n                    .for_each(|interaction| {\n                        use powdr_autoprecompiles::expression::{\n                            AlgebraicEvaluator, ConcreteBusInteraction,\n                        };\n\n                        let ConcreteBusInteraction { id, mult, args } =\n                            evaluator.eval_bus_interaction(interaction);\n                        self.periphery.real.apply(\n                            id as u16,\n                            mult.as_canonical_u32(),\n                            args.map(|arg| arg.as_canonical_u32()),\n                            &self.periphery.bus_ids,\n                        );\n                    });\n            });\n\n        RowMajorMatrix::new(values, width)\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cpu/periphery.rs",
    "content": "use std::marker::PhantomData;\n\nuse crate::powdr_extension::trace_generator::common::DummyExecutor;\nuse openvm_circuit::arch::{\n    AirInventory, AirInventoryError, ChipInventory, ChipInventoryError, ExecutorInventoryBuilder,\n    ExecutorInventoryError, VmCircuitExtension, VmExecutionExtension, VmProverExtension,\n};\nuse openvm_circuit_primitives::{\n    bitwise_op_lookup::{\n        BitwiseOperationLookupAir, BitwiseOperationLookupChip, SharedBitwiseOperationLookupChip,\n    },\n    range_tuple::{RangeTupleCheckerAir, RangeTupleCheckerChip, SharedRangeTupleCheckerChip},\n    var_range::{SharedVariableRangeCheckerChip, VariableRangeCheckerAir},\n};\nuse openvm_stark_backend::{config::StarkGenericConfig, p3_field::PrimeField32};\n\nuse itertools::Itertools;\nuse openvm_circuit::arch::RowMajorMatrixArena;\nuse openvm_stark_backend::config::Val;\nuse openvm_stark_backend::engine::StarkEngine;\nuse openvm_stark_backend::prover::cpu::{CpuBackend, CpuDevice};\n\nuse crate::{isa::OpenVmISA, PeripheryBusIds};\n\n/// The shared chips which can be used by the PowdrChip.\n#[derive(Clone)]\npub struct PowdrPeripheryInstancesCpu<ISA> {\n    /// The real chips used for the main execution.\n    pub real: SharedPeripheryChipsCpu<ISA>,\n    /// The dummy chips used for all APCs. They share the range checker but create new instances of the bitwise lookup chip and the tuple range checker.\n    pub dummy: SharedPeripheryChipsCpu<ISA>,\n    /// The bus ids of the periphery\n    pub bus_ids: PeripheryBusIds,\n}\n\n#[derive(Clone)]\npub struct SharedPeripheryChipsCpu<ISA> {\n    pub bitwise_lookup_8: Option<SharedBitwiseOperationLookupChip<8>>,\n    pub range_checker: SharedVariableRangeCheckerChip,\n    pub tuple_range_checker: Option<SharedRangeTupleCheckerChip<2>>,\n    _marker: PhantomData<ISA>,\n}\n\nimpl<ISA> PowdrPeripheryInstancesCpu<ISA> {\n    pub fn new(\n        range_checker: SharedVariableRangeCheckerChip,\n        bitwise_8: Option<SharedBitwiseOperationLookupChip<8>>,\n        tuple_range_checker: Option<SharedRangeTupleCheckerChip<2>>,\n        bus_ids: PeripheryBusIds,\n    ) -> Self {\n        Self {\n            real: SharedPeripheryChipsCpu {\n                bitwise_lookup_8: bitwise_8.clone(),\n                range_checker: range_checker.clone(),\n                tuple_range_checker: tuple_range_checker.clone(),\n                _marker: PhantomData,\n            },\n            // Bitwise lookup and tuple range checker do not need to be shared with the main execution:\n            // If we did share, we'd have to roll back the side effects of execution and apply the side effects from the apc air onto the main periphery.\n            // By not sharing them, we can throw away the dummy ones after execution and only apply the side effects from the apc air onto the main periphery.\n            dummy: SharedPeripheryChipsCpu {\n                bitwise_lookup_8: bitwise_8.map(|bitwise_8| {\n                    SharedBitwiseOperationLookupChip::new(BitwiseOperationLookupChip::new(\n                        bitwise_8.bus(),\n                    ))\n                }),\n                range_checker: range_checker.clone(),\n                tuple_range_checker: tuple_range_checker.map(|tuple_range_checker| {\n                    SharedRangeTupleCheckerChip::new(RangeTupleCheckerChip::new(\n                        *tuple_range_checker.bus(),\n                    ))\n                }),\n                _marker: PhantomData,\n            },\n            bus_ids,\n        }\n    }\n}\n\nimpl<F: PrimeField32, ISA: OpenVmISA> VmExecutionExtension<F> for SharedPeripheryChipsCpu<ISA> {\n    type Executor = DummyExecutor<F, ISA>;\n\n    fn extend_execution(\n        &self,\n        _: &mut ExecutorInventoryBuilder<F, Self::Executor>,\n    ) -> Result<(), ExecutorInventoryError> {\n        // No executor to add for periphery chips\n        Ok(())\n    }\n}\n\nimpl<SC: StarkGenericConfig, ISA: OpenVmISA> VmCircuitExtension<SC>\n    for SharedPeripheryChipsCpu<ISA>\n{\n    fn extend_circuit(&self, inventory: &mut AirInventory<SC>) -> Result<(), AirInventoryError> {\n        // create dummy airs\n        if let Some(bitwise_lookup_8) = &self.bitwise_lookup_8 {\n            assert!(inventory\n                .find_air::<BitwiseOperationLookupAir<8>>()\n                .next()\n                .is_none());\n            inventory.add_air(BitwiseOperationLookupAir::<8>::new(\n                bitwise_lookup_8.air.bus,\n            ));\n        }\n\n        if let Some(tuple_range_checker) = &self.tuple_range_checker {\n            assert!(inventory\n                .find_air::<RangeTupleCheckerAir<2>>()\n                .next()\n                .is_none());\n            inventory.add_air(RangeTupleCheckerAir::<2> {\n                bus: tuple_range_checker.air.bus,\n            });\n        }\n\n        // The range checker is already present in the builder because it's is used by the system, so we don't add it again.\n        assert!(inventory\n            .find_air::<VariableRangeCheckerAir>()\n            .next()\n            .is_some());\n\n        Ok(())\n    }\n}\n\npub struct SharedPeripheryChipsCpuProverExt;\n\n// We implement an extension to make it easy to pre-load the shared chips into the VM inventory.\n// This implementation is specific to CpuBackend because the lookup chips (VariableRangeChecker,\n// BitwiseOperationLookupChip) are specific to CpuBackend.\nimpl<E, SC, RA, ISA: OpenVmISA> VmProverExtension<E, RA, SharedPeripheryChipsCpu<ISA>>\n    for SharedPeripheryChipsCpuProverExt\nwhere\n    SC: StarkGenericConfig,\n    E: StarkEngine<SC = SC, PB = CpuBackend<SC>, PD = CpuDevice<SC>>,\n    RA: RowMajorMatrixArena<Val<SC>>,\n    Val<SC>: PrimeField32,\n{\n    fn extend_prover(\n        &self,\n        extension: &SharedPeripheryChipsCpu<ISA>,\n        inventory: &mut ChipInventory<SC, RA, CpuBackend<SC>>,\n    ) -> Result<(), ChipInventoryError> {\n        // Sanity check that the shared chips are not already present in the builder.\n        if let Some(bitwise_lookup_8) = &extension.bitwise_lookup_8 {\n            assert!(inventory\n                .find_chip::<SharedBitwiseOperationLookupChip<8>>()\n                .next()\n                .is_none());\n            inventory.add_periphery_chip(bitwise_lookup_8.clone());\n        }\n\n        if let Some(tuple_checker) = &extension.tuple_range_checker {\n            assert!(inventory\n                .find_chip::<SharedRangeTupleCheckerChip<2>>()\n                .next()\n                .is_none());\n            inventory.add_periphery_chip(tuple_checker.clone());\n        }\n\n        // The range checker is already present in the builder because it's is used by the system, so we don't add it again.\n        assert!(inventory\n            .find_chip::<SharedVariableRangeCheckerChip>()\n            .next()\n            .is_some());\n\n        Ok(())\n    }\n}\n\nimpl<ISA> SharedPeripheryChipsCpu<ISA> {\n    /// Sends concrete values to the shared chips using a given bus id.\n    /// Panics if the bus id doesn't match any of the chips' bus ids.\n    pub fn apply(\n        &self,\n        bus_id: u16,\n        mult: u32,\n        mut args: impl Iterator<Item = u32>,\n        periphery_bus_ids: &PeripheryBusIds,\n    ) {\n        match bus_id {\n            id if Some(id) == periphery_bus_ids.bitwise_lookup => {\n                // bitwise operation lookup\n                // interpret the arguments, see `Air<AB> for BitwiseOperationLookupAir<NUM_BITS>`\n                let [x, y, x_xor_y, selector] = [\n                    args.next().unwrap(),\n                    args.next().unwrap(),\n                    args.next().unwrap(),\n                    args.next().unwrap(),\n                ];\n\n                for _ in 0..mult {\n                    match selector {\n                        0 => {\n                            self.bitwise_lookup_8.as_ref().unwrap().request_range(x, y);\n                        }\n                        1 => {\n                            let res = self.bitwise_lookup_8.as_ref().unwrap().request_xor(x, y);\n                            debug_assert_eq!(res, x_xor_y);\n                        }\n                        _ => {\n                            unreachable!(\"Invalid selector\");\n                        }\n                    }\n                }\n            }\n            id if id == periphery_bus_ids.range_checker => {\n                // interpret the arguments, see `Air<AB> for VariableRangeCheckerAir`\n                let [value, max_bits] = [args.next().unwrap(), args.next().unwrap()];\n\n                for _ in 0..mult {\n                    self.range_checker.add_count(value, max_bits as usize);\n                }\n            }\n            id if Some(id) == periphery_bus_ids.tuple_range_checker => {\n                // tuple range checker\n                // We pass a slice. It is checked inside `add_count`.\n                let args = args.collect_vec();\n                for _ in 0..mult {\n                    self.tuple_range_checker.as_ref().unwrap().add_count(&args);\n                }\n            }\n            0..=2 => {\n                // execution bridge, memory, pc lookup\n                // do nothing\n            }\n            _ => {\n                unreachable!(\"Bus interaction {} not implemented\", bus_id);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cuda/inventory.rs",
    "content": "use openvm_circuit::{\n    arch::{DenseRecordArena, VmChipComplex},\n    system::cuda::SystemChipInventoryGPU,\n};\nuse openvm_cuda_backend::prover_backend::GpuBackend;\n\npub type GpuDummyChipComplex<SC> =\n    VmChipComplex<SC, DenseRecordArena, GpuBackend, SystemChipInventoryGPU>;\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cuda/mod.rs",
    "content": "use std::collections::{BTreeMap, HashMap};\n\nuse itertools::Itertools;\nuse openvm_circuit::{\n    arch::{ChipInventory, DenseRecordArena},\n    utils::next_power_of_two_or_zero,\n};\nuse openvm_cuda_backend::base::DeviceMatrix;\nuse openvm_cuda_common::copy::MemCopyH2D;\nuse openvm_stark_backend::{\n    p3_field::PrimeField32,\n    prover::{hal::ProverBackend, types::AirProvingContext},\n    Chip,\n};\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::{\n    expression::{AlgebraicExpression, AlgebraicReference},\n    symbolic_machine::SymbolicBusInteraction,\n};\nuse powdr_constraint_solver::constraint_system::{ComputationMethod, DerivedVariable};\nuse powdr_expression::{AlgebraicBinaryOperator, AlgebraicUnaryOperator};\n\nuse crate::{\n    cuda_abi::{self, DerivedExprSpec, DevInteraction, ExprSpan, OpCode, OriginalAir, Subst},\n    extraction_utils::{OriginalAirs, OriginalVmConfig},\n    isa::{IsaApc, OpenVmISA},\n    powdr_extension::{chip::PowdrChipGpu, executor::OriginalArenas},\n    BabyBearSC, GpuBackend,\n};\n\nmod inventory;\nmod periphery;\n\npub use inventory::GpuDummyChipComplex;\npub use periphery::{\n    PowdrPeripheryInstancesGpu, SharedPeripheryChipsGpu, SharedPeripheryChipsGpuProverExt,\n};\n\n/// Encodes an algebraic expression into GPU stack-machine bytecode.\n///\n/// Appends instructions to `bc` representing `expr` using the opcodes in `OpCode`.\n/// References are encoded as `PushApc` with a column-major offset computed from\n/// `id_to_apc_index` and `apc_height` (offset = apc_col_index * apc_height).\n/// Constants are encoded as `PushConst` followed by the field element as `u32`.\n/// Unary minus and binary operations map to `Neg`, `Add`, `Sub`, and `Mul`.\n///\n/// Note: This function does not track or enforce the evaluation stack depth,\n/// which is done in device code.\nfn emit_expr(\n    bc: &mut Vec<u32>,\n    expr: &AlgebraicExpression<BabyBear>,\n    id_to_apc_index: &BTreeMap<u64, usize>,\n    apc_height: usize,\n) {\n    match expr {\n        AlgebraicExpression::Number(c) => {\n            bc.push(OpCode::PushConst as u32);\n            bc.push(c.as_canonical_u32());\n        }\n        AlgebraicExpression::Reference(r) => {\n            let idx = (id_to_apc_index[&r.id] * apc_height) as u32;\n            bc.push(OpCode::PushApc as u32);\n            bc.push(idx);\n        }\n        AlgebraicExpression::UnaryOperation(u) => {\n            emit_expr(bc, &u.expr, id_to_apc_index, apc_height);\n            match u.op {\n                AlgebraicUnaryOperator::Minus => bc.push(OpCode::Neg as u32),\n            }\n        }\n        AlgebraicExpression::BinaryOperation(b) => {\n            emit_expr(bc, &b.left, id_to_apc_index, apc_height);\n            emit_expr(bc, &b.right, id_to_apc_index, apc_height);\n            match b.op {\n                AlgebraicBinaryOperator::Add => bc.push(OpCode::Add as u32),\n                AlgebraicBinaryOperator::Sub => bc.push(OpCode::Sub as u32),\n                AlgebraicBinaryOperator::Mul => bc.push(OpCode::Mul as u32),\n            }\n        }\n    }\n}\n\n/// Given the current bytecode, appends bytecode for the expression `expr` and returns the associated span\nfn emit_expr_span(\n    bc: &mut Vec<u32>,\n    expr: &AlgebraicExpression<BabyBear>,\n    id_to_apc_index: &BTreeMap<u64, usize>,\n    apc_height: usize,\n) -> ExprSpan {\n    // The span starts where the bytecode currently ends\n    let off = bc.len() as u32;\n    // Append the bytecode for `expr`\n    emit_expr(bc, expr, id_to_apc_index, apc_height);\n    // Calculate the length of the span\n    let len = (bc.len() as u32) - off;\n    ExprSpan { off, len }\n}\n\n/// Compile derived columns to GPU bytecode according to input order.\nfn compile_derived_to_gpu(\n    derived_columns: &[DerivedVariable<\n        BabyBear,\n        AlgebraicReference,\n        AlgebraicExpression<BabyBear>,\n    >],\n    apc_poly_id_to_index: &BTreeMap<u64, usize>,\n    apc_height: usize,\n) -> (Vec<DerivedExprSpec>, Vec<u32>) {\n    let mut specs = Vec::with_capacity(derived_columns.len());\n    let mut bytecode = Vec::new();\n\n    for DerivedVariable {\n        variable,\n        computation_method,\n    } in derived_columns\n    {\n        let apc_col_index = apc_poly_id_to_index[&variable.id];\n        let off = bytecode.len() as u32;\n        match computation_method {\n            ComputationMethod::Constant(c) => {\n                // Encode constant as an expression\n                bytecode.push(OpCode::PushConst as u32);\n                bytecode.push(c.as_canonical_u32());\n            }\n            ComputationMethod::QuotientOrZero(e1, e2) => {\n                // Invert denominator (or use zero), then multiply with numerator.\n                emit_expr(&mut bytecode, e2, apc_poly_id_to_index, apc_height);\n                bytecode.push(OpCode::InvOrZero as u32);\n                emit_expr(&mut bytecode, e1, apc_poly_id_to_index, apc_height);\n                bytecode.push(OpCode::Mul as u32);\n            }\n        }\n        let len = (bytecode.len() as u32) - off;\n        specs.push(DerivedExprSpec {\n            col_base: (apc_col_index * apc_height) as u64,\n            span: ExprSpan { off, len },\n        });\n    }\n\n    (specs, bytecode)\n}\n\npub fn compile_bus_to_gpu(\n    bus_interactions: &[SymbolicBusInteraction<BabyBear>],\n    apc_poly_id_to_index: &BTreeMap<u64, usize>,\n    apc_height: usize,\n) -> (Vec<DevInteraction>, Vec<ExprSpan>, Vec<u32>) {\n    let mut interactions = Vec::with_capacity(bus_interactions.len());\n    let mut arg_spans = Vec::new();\n    let mut bytecode = Vec::new();\n\n    for bus_interaction in bus_interactions {\n        // multiplicity as first arg span\n        let args_index_off = arg_spans.len() as u32;\n        let mult_span = emit_expr_span(\n            &mut bytecode,\n            &bus_interaction.mult,\n            apc_poly_id_to_index,\n            apc_height,\n        );\n        arg_spans.push(mult_span);\n\n        // args\n        for arg in &bus_interaction.args {\n            let span = emit_expr_span(&mut bytecode, arg, apc_poly_id_to_index, apc_height);\n            arg_spans.push(span);\n        }\n\n        interactions.push(DevInteraction {\n            bus_id: (bus_interaction.id as u32),\n            num_args: bus_interaction.args.len() as u32,\n            args_index_off,\n        });\n    }\n\n    (interactions, arg_spans, bytecode)\n}\n\npub struct PowdrTraceGeneratorGpu<ISA: OpenVmISA> {\n    pub apc: IsaApc<BabyBear, ISA>,\n    pub original_airs: OriginalAirs<BabyBear, ISA>,\n    pub config: OriginalVmConfig<ISA>,\n    pub periphery: PowdrPeripheryInstancesGpu<ISA>,\n}\n\nimpl<ISA: OpenVmISA> PowdrTraceGeneratorGpu<ISA> {\n    pub fn new(\n        apc: IsaApc<BabyBear, ISA>,\n        original_airs: OriginalAirs<BabyBear, ISA>,\n        config: OriginalVmConfig<ISA>,\n        periphery: PowdrPeripheryInstancesGpu<ISA>,\n    ) -> Self {\n        Self {\n            apc,\n            original_airs,\n            config,\n            periphery,\n        }\n    }\n\n    fn try_generate_witness(\n        &self,\n        original_arenas: OriginalArenas<DenseRecordArena>,\n    ) -> Option<DeviceMatrix<BabyBear>> {\n        let mut original_arenas = match original_arenas {\n            OriginalArenas::Initialized(arenas) => arenas,\n            OriginalArenas::Uninitialized => {\n                // if the arenas are uninitialized, the apc was not called, so we return early\n                return None;\n            }\n        };\n\n        let num_apc_calls = original_arenas.number_of_calls;\n\n        let chip_inventory: ChipInventory<BabyBearSC, DenseRecordArena, GpuBackend> = {\n            let airs = ISA::create_dummy_airs(self.config.config(), self.periphery.dummy.clone())\n                .expect(\"Failed to create dummy airs\");\n\n            ISA::create_dummy_chip_complex_gpu(\n                self.config.config(),\n                airs,\n                self.periphery.dummy.clone(),\n            )\n            .expect(\"Failed to create chip complex\")\n            .inventory\n        };\n\n        let dummy_trace_by_air_name: HashMap<String, DeviceMatrix<BabyBear>> = chip_inventory\n            .chips()\n            .iter()\n            .enumerate()\n            .rev()\n            .filter_map(|(insertion_idx, chip)| {\n                let air_name = chip_inventory.airs().ext_airs()[insertion_idx].name();\n\n                let record_arena = {\n                    match original_arenas.take_real_arena(&air_name) {\n                        Some(ra) => ra,\n                        None => return None, // skip this iteration, because we only have record arena for chips that are used\n                    }\n                };\n\n                // We might have initialized an arena for an AIR which ends up having no real records. It gets filtered out here.\n                chip.generate_proving_ctx(record_arena)\n                    .common_main\n                    .map(|m| (air_name, m))\n            })\n            .collect();\n\n        // Map from apc poly id to its index in the final apc trace\n        let apc_poly_id_to_index: BTreeMap<u64, usize> = self\n            .apc\n            .machine\n            .main_columns()\n            .enumerate()\n            .map(|(index, c)| (c.id, index))\n            .collect();\n\n        // allocate for apc trace\n        let width = apc_poly_id_to_index.len();\n        let height = next_power_of_two_or_zero(num_apc_calls);\n        let mut output = DeviceMatrix::<BabyBear>::with_capacity(height, width);\n\n        // Prepare `OriginalAir` and `Subst` arrays\n        let (airs, substitutions) = {\n            self.apc\n                // go through original instructions\n                .instructions()\n                // along with their substitutions\n                .zip_eq(self.apc.subs())\n                // map to `(air_name, substitutions)`\n                .filter_map(|(instr, subs)| {\n                    if subs.is_empty() {\n                        None\n                    } else {\n                        Some((&self.original_airs.opcode_to_air[&instr.inner.opcode], subs))\n                    }\n                })\n                // group by air name. This results in `HashMap<air_name, Vec<subs>>` where the length of the vector is the number of rows which are created in this air, per apc call\n                .into_group_map()\n                // go through each air and its substitutions\n                .iter()\n                .enumerate()\n                .fold(\n                    (Vec::new(), Vec::new()),\n                    |(mut airs, mut substitutions), (air_index, (air_name, subs_by_row))| {\n                        // Find the substitutions that map to an apc column\n                        let new_substitutions: Vec<Subst> = subs_by_row\n                            .iter()\n                            // enumerate over them to get the row index inside the air block\n                            .enumerate()\n                            .flat_map(|(row, subs)| {\n                                // for each substitution, map to `Subst` struct\n                                subs.iter()\n                                    .map(move |sub| (row, sub))\n                                    .map(|(row, sub)| Subst {\n                                        air_index: air_index as i32,\n                                        col: sub.original_poly_index as i32,\n                                        row: row as i32,\n                                        apc_col: apc_poly_id_to_index[&sub.apc_poly_id] as i32,\n                                    })\n                            })\n                            .collect();\n\n                        // get the device dummy trace for this air\n                        let dummy_trace = &dummy_trace_by_air_name[*air_name];\n\n                        use openvm_stark_backend::prover::hal::MatrixDimensions;\n                        airs.push(OriginalAir {\n                            width: dummy_trace.width() as i32,\n                            height: dummy_trace.height() as i32,\n                            buffer: dummy_trace.buffer().as_ptr(),\n                            row_block_size: subs_by_row.len() as i32,\n                        });\n\n                        substitutions.extend(new_substitutions);\n\n                        (airs, substitutions)\n                    },\n                )\n        };\n\n        // Send the airs and substitutions to device\n        let airs = airs.to_device().unwrap();\n        let substitutions = substitutions.to_device().unwrap();\n\n        cuda_abi::apc_tracegen(&mut output, airs, substitutions, num_apc_calls).unwrap();\n\n        // Apply derived columns using the GPU expression evaluator\n        let (derived_specs, derived_bc) = compile_derived_to_gpu(\n            &self.apc.machine.derived_columns,\n            &apc_poly_id_to_index,\n            height,\n        );\n        // In practice `d_specs` is never empty, because we will always have `is_valid`\n        let d_specs = derived_specs.to_device().unwrap();\n        let d_bc = derived_bc.to_device().unwrap();\n        cuda_abi::apc_apply_derived_expr(&mut output, d_specs, d_bc, num_apc_calls).unwrap();\n\n        // Encode bus interactions for GPU consumption\n        let (bus_interactions, arg_spans, bytecode) = compile_bus_to_gpu(\n            &self.apc.machine.bus_interactions,\n            &apc_poly_id_to_index,\n            height,\n        );\n        let bus_interactions = bus_interactions.to_device().unwrap();\n        let arg_spans = arg_spans.to_device().unwrap();\n        let bytecode = bytecode.to_device().unwrap();\n\n        // Gather GPU inputs for periphery (bus ids, count device buffers)\n        let periphery = &self.periphery.real;\n\n        // Range checker\n        let var_range_bus_id = self.periphery.bus_ids.range_checker as u32;\n        let var_range_count = &periphery.range_checker.count;\n\n        // Tuple checker\n        let tuple_range_checker_chip = periphery.tuple_range_checker.as_ref().unwrap();\n        let tuple2_bus_id = self.periphery.bus_ids.tuple_range_checker.unwrap() as u32;\n        let tuple2_sizes = tuple_range_checker_chip.sizes;\n        let tuple2_count_u32 = tuple_range_checker_chip.count.as_ref();\n\n        // Bitwise lookup; NUM_BITS is fixed at 8 in CUDA\n        let bitwise_bus_id = self.periphery.bus_ids.bitwise_lookup.unwrap() as u32;\n        let bitwise_count_u32 = periphery.bitwise_lookup_8.as_ref().unwrap().count.as_ref();\n\n        // Launch GPU apply-bus to update periphery histograms on device\n        // Note that this is implicitly serialized after `apc_tracegen`,\n        // because we use the default host to device stream, which only launches\n        // the next kernel function after the prior (`apc_tracegen`) returns.\n        // This is important because bus evaluation depends on trace results.\n        cuda_abi::apc_apply_bus(\n            // APC related\n            &output,\n            num_apc_calls,\n            // Interaction related\n            bytecode,\n            bus_interactions,\n            arg_spans,\n            // Variable range checker related\n            var_range_bus_id,\n            var_range_count,\n            // Tuple range checker related\n            tuple2_bus_id,\n            tuple2_count_u32,\n            tuple2_sizes,\n            // Bitwise related\n            bitwise_bus_id,\n            bitwise_count_u32,\n        )\n        .unwrap();\n\n        Some(output)\n    }\n}\n\nimpl<R, PB: ProverBackend<Matrix = DeviceMatrix<BabyBear>>, ISA: OpenVmISA> Chip<R, PB>\n    for PowdrChipGpu<ISA>\n{\n    fn generate_proving_ctx(&self, _: R) -> AirProvingContext<PB> {\n        tracing::trace!(\"Generating air proof input for PowdrChip {}\", self.name);\n\n        let trace = self\n            .trace_generator\n            .try_generate_witness(self.record_arena_by_air_name.take());\n\n        AirProvingContext::new(vec![], trace, vec![])\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/cuda/periphery.rs",
    "content": "use openvm_circuit::arch::{\n    AirInventory, AirInventoryError, ChipInventory, ChipInventoryError, DenseRecordArena,\n    ExecutorInventoryBuilder, ExecutorInventoryError, VmCircuitExtension, VmExecutionExtension,\n    VmProverExtension,\n};\nuse openvm_circuit_primitives::{\n    bitwise_op_lookup::{\n        BitwiseOperationLookupAir, BitwiseOperationLookupChip, BitwiseOperationLookupChipGPU,\n    },\n    range_tuple::{RangeTupleCheckerAir, RangeTupleCheckerChip, RangeTupleCheckerChipGPU},\n    var_range::{VariableRangeCheckerAir, VariableRangeCheckerChipGPU},\n};\nuse openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine;\nuse openvm_cuda_backend::prover_backend::GpuBackend;\nuse openvm_stark_backend::{config::StarkGenericConfig, p3_field::PrimeField32};\n\nuse crate::{\n    isa::OpenVmISA, powdr_extension::trace_generator::common::DummyExecutor, BabyBearSC,\n    PeripheryBusIds,\n};\nuse std::{marker::PhantomData, sync::Arc};\n\n/// The shared chips which can be used by the PowdrChipGpu.\n#[derive(Clone)]\npub struct PowdrPeripheryInstancesGpu<ISA> {\n    /// The real chips used for the main execution.\n    pub real: SharedPeripheryChipsGpu<ISA>,\n    /// The dummy chips used for all APCs. They share the range checker but create new instances of the bitwise lookup chip and the tuple range checker.\n    pub dummy: SharedPeripheryChipsGpu<ISA>,\n    /// The bus ids of the periphery\n    pub bus_ids: PeripheryBusIds,\n}\n\n#[derive(Clone)]\npub struct SharedPeripheryChipsGpu<ISA> {\n    pub bitwise_lookup_8: Option<std::sync::Arc<BitwiseOperationLookupChipGPU<8>>>,\n    pub range_checker: std::sync::Arc<VariableRangeCheckerChipGPU>,\n    pub tuple_range_checker: Option<std::sync::Arc<RangeTupleCheckerChipGPU<2>>>,\n    _marker: PhantomData<ISA>,\n}\n\nimpl<ISA> PowdrPeripheryInstancesGpu<ISA> {\n    pub fn new(\n        range_checker: Arc<VariableRangeCheckerChipGPU>,\n        bitwise_8: Option<Arc<BitwiseOperationLookupChipGPU<8>>>,\n        tuple_range_checker: Option<Arc<RangeTupleCheckerChipGPU<2>>>,\n        bus_ids: PeripheryBusIds,\n    ) -> Self {\n        Self {\n            real: SharedPeripheryChipsGpu {\n                bitwise_lookup_8: bitwise_8.clone(),\n                range_checker: range_checker.clone(),\n                tuple_range_checker: tuple_range_checker.clone(),\n                _marker: PhantomData,\n            },\n            dummy: SharedPeripheryChipsGpu {\n                // BitwiseLookupChipGPU is always initialized via `hybrid()` with a CPU chip in all available extensions of `SdkVmGpuBuilder::create_chip_complex()`.\n                // In case this changes in the future, `cpu_chip.unwrap()` will panic, and we can fix the code.\n                bitwise_lookup_8: bitwise_8.map(|bitwise_8| {\n                    Arc::new(BitwiseOperationLookupChipGPU::hybrid(Arc::new(\n                        BitwiseOperationLookupChip::new(\n                            bitwise_8.as_ref().cpu_chip.as_ref().unwrap().bus(),\n                        ),\n                    )))\n                }),\n                range_checker: range_checker.clone(),\n                // RangeTupleCheckerGPU is always initialized via `new()` without a CPU chip in all available extensions of `SdkVmGpuBuilder::create_chip_complex()`.\n                // In case this changes in the future the `Some` matching arm below will catch it.\n                tuple_range_checker: tuple_range_checker.map(|tuple_range_checker| {\n                    Arc::new({\n                        match tuple_range_checker.cpu_chip.as_ref() {\n                            // None is the expected case\n                            None => RangeTupleCheckerChipGPU::new(tuple_range_checker.sizes),\n                            Some(cpu_chip) => RangeTupleCheckerChipGPU::hybrid(Arc::new(\n                                RangeTupleCheckerChip::new(*cpu_chip.bus()),\n                            )),\n                        }\n                    })\n                }),\n                _marker: PhantomData,\n            },\n            bus_ids,\n        }\n    }\n}\n\nimpl<F: PrimeField32, ISA: OpenVmISA> VmExecutionExtension<F> for SharedPeripheryChipsGpu<ISA> {\n    type Executor = DummyExecutor<F, ISA>;\n\n    fn extend_execution(\n        &self,\n        _: &mut ExecutorInventoryBuilder<F, Self::Executor>,\n    ) -> Result<(), ExecutorInventoryError> {\n        // No executor to add for periphery chips\n        Ok(())\n    }\n}\n\nimpl<SC: StarkGenericConfig, ISA: OpenVmISA> VmCircuitExtension<SC>\n    for SharedPeripheryChipsGpu<ISA>\n{\n    fn extend_circuit(&self, inventory: &mut AirInventory<SC>) -> Result<(), AirInventoryError> {\n        // create dummy airs\n        if let Some(bitwise_lookup_8) = &self.bitwise_lookup_8 {\n            assert!(inventory\n                .find_air::<BitwiseOperationLookupAir<8>>()\n                .next()\n                .is_none());\n            inventory.add_air(BitwiseOperationLookupAir::<8>::new(\n                bitwise_lookup_8.cpu_chip.as_ref().unwrap().bus(),\n            ));\n        }\n\n        if let Some(tuple_range_checker) = &self.tuple_range_checker {\n            use openvm_circuit_primitives::range_tuple::RangeTupleCheckerBus;\n\n            use crate::bus_map::DEFAULT_TUPLE_RANGE_CHECKER;\n\n            assert!(inventory\n                .find_air::<RangeTupleCheckerAir<2>>()\n                .next()\n                .is_none());\n            // RangeTupleCheckerGPU is always initialized via `new()` without a CPU chip in all available extensions of `SdkVmGpuBuilder::create_chip_complex()`.\n            // Therefore we create a new bus index, following a similar scenario in `Rv32M::extend_circuit`.\n            // The bus id is hardcoded to the default and isn't guaranteed to be correct, because it depends on chip insertion order,\n            // but this won't matter because the dummy chips are thrown away anyway.\n            let bus = match tuple_range_checker.cpu_chip.as_ref() {\n                // None is the expected case\n                None => RangeTupleCheckerBus::new(\n                    DEFAULT_TUPLE_RANGE_CHECKER as u16,\n                    tuple_range_checker.sizes,\n                ),\n                Some(cpu_chip) => *cpu_chip.bus(),\n            };\n            inventory.add_air(RangeTupleCheckerAir::<2> { bus });\n        }\n\n        // The range checker is already present in the builder because it's is used by the system, so we don't add it again.\n        assert!(inventory\n            .find_air::<VariableRangeCheckerAir>()\n            .nth(1)\n            .is_none());\n\n        Ok(())\n    }\n}\n\npub struct SharedPeripheryChipsGpuProverExt;\n\nimpl<ISA: OpenVmISA>\n    VmProverExtension<GpuBabyBearPoseidon2Engine, DenseRecordArena, SharedPeripheryChipsGpu<ISA>>\n    for SharedPeripheryChipsGpuProverExt\n{\n    fn extend_prover(\n        &self,\n        extension: &SharedPeripheryChipsGpu<ISA>,\n        inventory: &mut ChipInventory<BabyBearSC, DenseRecordArena, GpuBackend>,\n    ) -> Result<(), ChipInventoryError> {\n        // Sanity check that the shared chips are not already present in the builder.\n        if let Some(bitwise_lookup_8) = &extension.bitwise_lookup_8 {\n            assert!(inventory\n                .find_chip::<Arc<BitwiseOperationLookupChipGPU<8>>>()\n                .next()\n                .is_none());\n            inventory.add_periphery_chip(bitwise_lookup_8.clone());\n        }\n\n        if let Some(tuple_checker) = &extension.tuple_range_checker {\n            assert!(inventory\n                .find_chip::<Arc<RangeTupleCheckerChipGPU<2>>>()\n                .next()\n                .is_none());\n            inventory.add_periphery_chip(tuple_checker.clone());\n        }\n\n        // The range checker is already present in the builder because it's is used by the system, so we don't add it again.\n        assert!(inventory\n            .find_chip::<Arc<VariableRangeCheckerChipGPU>>()\n            .next()\n            .is_some());\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "openvm/src/powdr_extension/trace_generator/mod.rs",
    "content": "pub mod cpu;\n#[cfg(feature = \"cuda\")]\npub mod cuda;\n\nmod common;\n\npub use cpu::{DummyChipComplex, SharedPeripheryChipsCpu};\n\n#[cfg(feature = \"cuda\")]\npub use cuda::{GpuDummyChipComplex, SharedPeripheryChipsGpu};\n"
  },
  {
    "path": "openvm/src/powdr_extension/vm.rs",
    "content": "// Mostly taken from [this openvm extension](https://github.com/openvm-org/openvm/blob/1b76fd5a900a7d69850ee9173969f70ef79c4c76/extensions/rv32im/circuit/src/extension.rs#L185) and simplified to only handle a single opcode with its necessary dependencies\n\nuse std::cell::RefCell;\nuse std::iter::once;\nuse std::rc::Rc;\n\nuse derive_more::From;\nuse openvm_circuit::arch::{DenseRecordArena, MatrixRecordArena};\n#[cfg(not(feature = \"tco\"))]\nuse openvm_instructions::instruction::Instruction;\nuse openvm_instructions::LocalOpcode;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_openvm_bus_interaction_handler::bus_map::BusMap;\n\nuse crate::customize_exe::OvmApcStats;\nuse crate::extraction_utils::{OriginalAirs, OriginalVmConfig};\nuse crate::isa::{IsaApc, OpenVmISA};\nuse crate::powdr_extension::chip::PowdrAir;\nuse crate::powdr_extension::executor::{OriginalArenas, PowdrExecutor};\nuse crate::powdr_extension::PowdrOpcode;\nuse openvm_circuit::{\n    arch::{AirInventory, AirInventoryError, VmCircuitExtension, VmExecutionExtension},\n    circuit_derive::Chip,\n};\nuse openvm_stark_backend::{\n    config::{StarkGenericConfig, Val},\n    p3_field::{Field, PrimeField32},\n};\nuse serde::{Deserialize, Serialize};\n\n#[derive(Clone, Deserialize, Serialize)]\n#[serde(bound = \"F: Field\")]\npub struct PowdrExtension<F, ISA: OpenVmISA> {\n    pub precompiles: Vec<PowdrPrecompile<F, ISA>>,\n    pub base_config: OriginalVmConfig<ISA>,\n    pub bus_map: BusMap,\n    pub airs: OriginalAirs<F, ISA>,\n}\n\n#[derive(Clone, Serialize, Deserialize)]\n#[serde(bound = \"F: Field\")]\npub struct PowdrPrecompile<F, ISA: OpenVmISA> {\n    pub name: String,\n    pub opcode: PowdrOpcode,\n    pub apc: IsaApc<F, ISA>,\n    pub apc_stats: OvmApcStats,\n    #[serde(skip)]\n    pub apc_record_arena_cpu: Rc<RefCell<OriginalArenas<MatrixRecordArena<F>>>>,\n    #[serde(skip)]\n    pub apc_record_arena_gpu: Rc<RefCell<OriginalArenas<DenseRecordArena>>>,\n}\n\nimpl<F, ISA: OpenVmISA> PowdrPrecompile<F, ISA> {\n    pub fn new(\n        name: String,\n        opcode: PowdrOpcode,\n        apc: IsaApc<F, ISA>,\n        apc_stats: OvmApcStats,\n    ) -> Self {\n        Self {\n            name,\n            opcode,\n            apc,\n            apc_stats,\n            // Initialize with empty Rc (default to OriginalArenas::Uninitialized) for each APC\n            apc_record_arena_cpu: Default::default(),\n            apc_record_arena_gpu: Default::default(),\n        }\n    }\n}\n\nimpl<F, ISA: OpenVmISA> PowdrExtension<F, ISA> {\n    pub fn new(\n        precompiles: Vec<PowdrPrecompile<F, ISA>>,\n        base_config: OriginalVmConfig<ISA>,\n        bus_map: BusMap,\n        airs: OriginalAirs<F, ISA>,\n    ) -> Self {\n        Self {\n            precompiles,\n            base_config,\n            bus_map,\n            airs,\n        }\n    }\n}\n\n#[derive(From, Chip)]\n#[allow(clippy::large_enum_variant)]\npub enum PowdrExtensionExecutor<ISA: OpenVmISA> {\n    Powdr(PowdrExecutor<ISA>),\n}\n\nimpl<ISA: OpenVmISA> VmExecutionExtension<BabyBear> for PowdrExtension<BabyBear, ISA> {\n    type Executor = PowdrExtensionExecutor<ISA>;\n\n    fn extend_execution(\n        &self,\n        inventory: &mut openvm_circuit::arch::ExecutorInventoryBuilder<BabyBear, Self::Executor>,\n    ) -> Result<(), openvm_circuit::arch::ExecutorInventoryError> {\n        for precompile in &self.precompiles {\n            // The apc chip uses a single row per call\n            let height_change = 1;\n\n            let powdr_executor = PowdrExtensionExecutor::Powdr(PowdrExecutor::new(\n                self.airs.clone(),\n                self.base_config.clone(),\n                precompile.apc.clone(),\n                precompile.apc_record_arena_cpu.clone(),\n                precompile.apc_record_arena_gpu.clone(),\n                height_change,\n            ));\n            inventory.add_executor(powdr_executor, once(precompile.opcode.global_opcode()))?;\n        }\n\n        Ok(())\n    }\n}\n\nimpl<SC, ISA: OpenVmISA> VmCircuitExtension<SC> for PowdrExtension<Val<SC>, ISA>\nwhere\n    SC: StarkGenericConfig,\n    Val<SC>: PrimeField32,\n{\n    fn extend_circuit(&self, inventory: &mut AirInventory<SC>) -> Result<(), AirInventoryError> {\n        for precompile in &self.precompiles {\n            inventory.add_air(PowdrAir::new(precompile.apc.machine.clone()));\n        }\n        Ok(())\n    }\n}\n\n// We cannot derive the implementations below due to limitations in the openvm derives\nimpl<ISA: OpenVmISA> openvm_circuit::arch::AnyEnum for PowdrExtensionExecutor<ISA> {\n    fn as_any_kind(&self) -> &dyn std::any::Any {\n        match self {\n            Self::Powdr(x) => x,\n        }\n    }\n\n    fn as_any_kind_mut(&mut self) -> &mut dyn std::any::Any {\n        match self {\n            Self::Powdr(x) => x,\n        }\n    }\n}\n\nimpl<ISA: OpenVmISA> openvm_circuit::arch::InterpreterExecutor<BabyBear>\n    for PowdrExtensionExecutor<ISA>\n{\n    fn pre_compute_size(&self) -> usize {\n        match self {\n            Self::Powdr(x) => x.pre_compute_size(),\n        }\n    }\n\n    #[cfg(not(feature = \"tco\"))]\n    fn pre_compute<Ctx>(\n        &self,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<\n        openvm_circuit::arch::ExecuteFunc<BabyBear, Ctx>,\n        openvm_circuit::arch::StaticProgramError,\n    >\n    where\n        Ctx: openvm_circuit::arch::execution_mode::ExecutionCtxTrait,\n    {\n        match self {\n            Self::Powdr(x) => x.pre_compute(pc, inst, data),\n        }\n    }\n\n    #[cfg(feature = \"tco\")]\n    fn handler<Ctx>(\n        &self,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<\n        openvm_circuit::arch::Handler<BabyBear, Ctx>,\n        openvm_circuit::arch::StaticProgramError,\n    >\n    where\n        Ctx: openvm_circuit::arch::execution_mode::ExecutionCtxTrait,\n    {\n        match self {\n            Self::Powdr(x) => x.handler(pc, inst, data),\n        }\n    }\n}\n\nimpl<ISA: OpenVmISA> openvm_circuit::arch::InterpreterMeteredExecutor<BabyBear>\n    for PowdrExtensionExecutor<ISA>\n{\n    fn metered_pre_compute_size(&self) -> usize {\n        match self {\n            Self::Powdr(x) => x.metered_pre_compute_size(),\n        }\n    }\n\n    #[cfg(not(feature = \"tco\"))]\n    fn metered_pre_compute<Ctx>(\n        &self,\n        chip_idx: usize,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<\n        openvm_circuit::arch::ExecuteFunc<BabyBear, Ctx>,\n        openvm_circuit::arch::StaticProgramError,\n    >\n    where\n        Ctx: openvm_circuit::arch::execution_mode::MeteredExecutionCtxTrait,\n    {\n        match self {\n            Self::Powdr(x) => x.metered_pre_compute(chip_idx, pc, inst, data),\n        }\n    }\n\n    #[cfg(feature = \"tco\")]\n    fn metered_handler<Ctx>(\n        &self,\n        chip_idx: usize,\n        pc: u32,\n        inst: &Instruction<BabyBear>,\n        data: &mut [u8],\n    ) -> Result<\n        openvm_circuit::arch::Handler<BabyBear, Ctx>,\n        openvm_circuit::arch::StaticProgramError,\n    >\n    where\n        Ctx: openvm_circuit::arch::execution_mode::MeteredExecutionCtxTrait,\n    {\n        match self {\n            Self::Powdr(x) => x.metered_handler(chip_idx, pc, inst, data),\n        }\n    }\n}\n\n#[cfg(feature = \"aot\")]\nimpl<ISA: OpenVmISA> openvm_circuit::arch::AotExecutor<BabyBear> for PowdrExtensionExecutor<ISA>\nwhere\n    PowdrExecutor<ISA>: openvm_circuit::arch::AotExecutor<BabyBear>,\n{\n    fn is_aot_supported(&self, inst: &Instruction<BabyBear>) -> bool {\n        match self {\n            Self::Powdr(x) => x.is_aot_supported(inst),\n        }\n    }\n\n    fn generate_x86_asm(\n        &self,\n        inst: &Instruction<BabyBear>,\n        pc: u32,\n    ) -> Result<String, openvm_circuit::arch::AotError> {\n        match self {\n            Self::Powdr(x) => x.generate_x86_asm(inst, pc),\n        }\n    }\n}\n\n#[cfg(feature = \"aot\")]\nimpl<ISA: OpenVmISA> openvm_circuit::arch::AotMeteredExecutor<BabyBear>\n    for PowdrExtensionExecutor<ISA>\nwhere\n    PowdrExecutor<ISA>: openvm_circuit::arch::AotMeteredExecutor<BabyBear>,\n{\n    fn is_aot_metered_supported(&self, inst: &Instruction<BabyBear>) -> bool {\n        match self {\n            Self::Powdr(x) => x.is_aot_metered_supported(inst),\n        }\n    }\n\n    fn generate_x86_metered_asm(\n        &self,\n        inst: &Instruction<BabyBear>,\n        pc: u32,\n        chip_idx: usize,\n        config: &openvm_circuit::arch::SystemConfig,\n    ) -> Result<String, openvm_circuit::arch::AotError> {\n        match self {\n            Self::Powdr(x) => x.generate_x86_metered_asm(inst, pc, chip_idx, config),\n        }\n    }\n}\n\nimpl<ISA: OpenVmISA, RA> openvm_circuit::arch::PreflightExecutor<BabyBear, RA>\n    for PowdrExtensionExecutor<ISA>\nwhere\n    PowdrExecutor<ISA>: openvm_circuit::arch::PreflightExecutor<BabyBear, RA>,\n{\n    fn execute(\n        &self,\n        state: openvm_circuit::arch::VmStateMut<\n            BabyBear,\n            openvm_circuit::system::memory::online::TracingMemory,\n            RA,\n        >,\n        instruction: &Instruction<BabyBear>,\n    ) -> Result<(), openvm_circuit::arch::ExecutionError> {\n        match self {\n            Self::Powdr(x) => x.execute(state, instruction),\n        }\n    }\n\n    fn get_opcode_name(&self, opcode: usize) -> String {\n        match self {\n            Self::Powdr(x) => <PowdrExecutor<ISA> as openvm_circuit::arch::PreflightExecutor<\n                BabyBear,\n                RA,\n            >>::get_opcode_name(x, opcode),\n        }\n    }\n}\n"
  },
  {
    "path": "openvm/src/program.rs",
    "content": "use std::sync::Arc;\n\nuse openvm_instructions::exe::VmExe;\nuse openvm_instructions::program::Program as OpenVmProgram;\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::{collect_basic_blocks, BasicBlock, Program};\nuse powdr_autoprecompiles::DegreeBound;\nuse serde::{Deserialize, Serialize};\n\nuse crate::customize_exe::Instr;\nuse crate::extraction_utils::OriginalVmConfig;\nuse crate::isa::OpenVmISA;\nuse crate::{BabyBearOpenVmApcAdapter, SpecializedConfig};\n#[derive(Serialize, Deserialize, Clone)]\n#[serde(bound = \"\")]\npub struct CompiledProgram<ISA: OpenVmISA> {\n    pub exe: Arc<VmExe<BabyBear>>,\n    pub vm_config: SpecializedConfig<ISA>,\n}\n\n// the original openvm program and config without powdr extension, along with the elf\npub struct OriginalCompiledProgram<'a, ISA: OpenVmISA> {\n    pub exe: Arc<VmExe<BabyBear>>,\n    pub vm_config: OriginalVmConfig<ISA>,\n    pub linked_program: ISA::LinkedProgram<'a>,\n}\n\nimpl<'a, ISA: OpenVmISA> OriginalCompiledProgram<'a, ISA> {\n    pub fn new(\n        exe: Arc<VmExe<BabyBear>>,\n        vm_config: OriginalVmConfig<ISA>,\n        linked_program: ISA::LinkedProgram<'a>,\n    ) -> Self {\n        Self {\n            exe,\n            vm_config,\n            linked_program,\n        }\n    }\n\n    /// Segments the program into basic blocks\n    pub fn collect_basic_blocks(&self) -> Vec<BasicBlock<Instr<BabyBear, ISA>>> {\n        let jumpdest_set = ISA::get_jump_destinations(self);\n\n        let program = Prog::from(&self.exe.program);\n\n        collect_basic_blocks::<BabyBearOpenVmApcAdapter<ISA>>(&program, &jumpdest_set)\n    }\n\n    /// Converts to a `CompiledProgram` with the original vm config (without autoprecompiles).\n    pub fn compiled_program(&self, degree_bound: DegreeBound) -> CompiledProgram<ISA> {\n        CompiledProgram {\n            exe: self.exe.clone(),\n            vm_config: SpecializedConfig::new(self.vm_config.clone(), Vec::new(), degree_bound),\n        }\n    }\n}\n\n/// A newtype wrapper around `OpenVmProgram` to implement the `Program` trait.\n/// This is necessary because we cannot implement a foreign trait for a foreign type.\npub struct Prog<'a, F>(&'a OpenVmProgram<F>);\n\nimpl<'a, F> From<&'a OpenVmProgram<F>> for Prog<'a, F> {\n    fn from(program: &'a OpenVmProgram<F>) -> Self {\n        Prog(program)\n    }\n}\n\nimpl<'a, F: PrimeField32, ISA: OpenVmISA> Program<Instr<F, ISA>> for Prog<'a, F> {\n    fn base_pc(&self) -> u64 {\n        self.0.pc_base as u64\n    }\n\n    fn instructions(&self) -> Box<dyn Iterator<Item = Instr<F, ISA>> + '_> {\n        Box::new(\n            self.0\n                .instructions_and_debug_infos\n                .iter()\n                .filter_map(|x| x.as_ref().map(|i| Instr::from(i.0.clone()))),\n        )\n    }\n\n    fn length(&self) -> u32 {\n        self.0.instructions_and_debug_infos.len() as u32\n    }\n}\n"
  },
  {
    "path": "openvm/src/test_utils.rs",
    "content": "use itertools::Itertools;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::SuperBlock;\nuse powdr_autoprecompiles::empirical_constraints::EmpiricalConstraints;\nuse powdr_autoprecompiles::evaluation::evaluate_apc;\nuse powdr_autoprecompiles::export::ExportOptions;\nuse powdr_autoprecompiles::{build, VmConfig};\nuse powdr_number::BabyBearField;\nuse std::fs;\nuse std::path::Path;\n\nuse crate::extraction_utils::OriginalVmConfig;\nuse crate::isa::OpenVmISA;\nuse crate::{BabyBearOpenVmApcAdapter, Instr, DEFAULT_DEGREE_BOUND};\nuse powdr_openvm_bus_interaction_handler::OpenVmBusInteractionHandler;\n\n/// Compile a superblock into an APC snapshot string.\n///\n/// This builds the APC, evaluates it, and returns a formatted string containing\n/// the instructions, evaluation stats, and machine rendering.\npub fn compile_apc<ISA: OpenVmISA>(\n    original_config: &OriginalVmConfig<ISA>,\n    superblock: SuperBlock<Instruction<BabyBear>>,\n) -> String {\n    let degree_bound = DEFAULT_DEGREE_BOUND;\n    let airs = original_config.airs(degree_bound).unwrap();\n    let bus_map = original_config.bus_map();\n\n    let vm_config = VmConfig {\n        instruction_handler: &airs,\n        bus_interaction_handler: OpenVmBusInteractionHandler::<BabyBearField>::default(),\n        bus_map: bus_map.clone(),\n    };\n\n    let superblock = superblock.map_instructions(Instr::<BabyBear, ISA>::from);\n    // for aligning the output\n    let max_pc_digits = superblock.pcs().max().unwrap().max(1).ilog10() as usize + 1;\n    let superblock_str = superblock\n        .instructions()\n        .map(|(pc, inst)| format!(\"  {pc:>max_pc_digits$}: {}\", ISA::format(&inst.inner)))\n        .join(\"\\n\");\n\n    let export_path = std::env::var(\"APC_EXPORT_PATH\").ok();\n    let export_level = std::env::var(\"APC_EXPORT_LEVEL\").ok();\n\n    let apc = build::<BabyBearOpenVmApcAdapter<ISA>>(\n        superblock.clone(),\n        vm_config.clone(),\n        degree_bound,\n        ExportOptions::from_env_vars(export_path, export_level, &superblock.start_pcs()),\n        &EmpiricalConstraints::default(),\n    )\n    .unwrap();\n\n    let apc_with_stats =\n        evaluate_apc::<BabyBearOpenVmApcAdapter<ISA>>(vm_config.instruction_handler, apc);\n\n    let evaluation = apc_with_stats.evaluation_result();\n    let apc = &apc_with_stats.apc().machine;\n\n    format!(\n        \"Instructions:\\n{superblock_str}\\n\\n{evaluation}\\n\\n{}\",\n        apc.render(&bus_map)\n    )\n}\n\n/// Assert that the APC output for a superblock matches the expected snapshot.\n///\n/// - `snapshot_base_dir`: The base directory for snapshot files (typically\n///   `Path::new(env!(\"CARGO_MANIFEST_DIR\")).join(\"tests\").join(\"apc_snapshots\")`).\n/// - `module_name`: Subdirectory within the snapshot dir (e.g., \"single_instructions\").\n/// - `test_name`: Name of the test, used as the snapshot filename (without extension).\n///\n/// Set the `UPDATE_EXPECT=1` environment variable to update snapshot files.\npub fn assert_apc_snapshot(\n    actual: &str,\n    snapshot_base_dir: &Path,\n    module_name: &str,\n    test_name: &str,\n) {\n    let expected_path = snapshot_base_dir\n        .join(module_name)\n        .join(format!(\"{test_name}.txt\"));\n\n    let should_update_expectation = std::env::var(\"UPDATE_EXPECT\")\n        .map(|v| v.as_str() == \"1\")\n        .unwrap_or(false);\n\n    let expected = expected_path\n        .exists()\n        .then(|| fs::read_to_string(&expected_path).unwrap());\n\n    match (expected, should_update_expectation) {\n        (Some(expected), _) if expected == actual => {\n            // Test succeeded.\n        }\n        (Some(expected), false) => {\n            // The expectation file exists, is different from \"actual\" and we are\n            // not allowed to update it.\n            pretty_assertions::assert_eq!(\n                expected.trim(),\n                actual.trim(),\n                \"The output of `{test_name}` does not match the expected output. \\\n                 To overwrite the expected output with the currently generated one, \\\n                 re-run the test with the environment variable `UPDATE_EXPECT=1` or \\\n                 delete the file `{test_name}.txt`.\",\n            );\n        }\n        _ => {\n            // Expectation file does not exist or is different from \"actual\" and we are allowed to update it.\n            fs::create_dir_all(expected_path.parent().unwrap()).unwrap();\n            fs::write(&expected_path, actual).unwrap();\n            println!(\"Expected output for `{test_name}` was created. Re-run the test to confirm.\");\n        }\n    }\n}\n\n/// Convenience function combining [`compile_apc`] and [`assert_apc_snapshot`].\npub fn assert_apc_machine_output<ISA: OpenVmISA>(\n    original_config: &OriginalVmConfig<ISA>,\n    program: SuperBlock<Instruction<BabyBear>>,\n    snapshot_base_dir: &Path,\n    module_name: &str,\n    test_name: &str,\n) {\n    let actual = compile_apc::<ISA>(original_config, program);\n    assert_apc_snapshot(&actual, snapshot_base_dir, module_name, test_name);\n}\n"
  },
  {
    "path": "openvm/src/trace_generation.rs",
    "content": "use crate::PowdrSdkCpu;\nuse crate::SpecializedConfigCpuBuilder;\nuse crate::{isa::OpenVmISA, program::CompiledProgram, SpecializedConfig};\nuse openvm_circuit::arch::{\n    execution_mode::Segment, Executor, MeteredExecutor, PreflightExecutionOutput,\n    PreflightExecutor, VirtualMachine, VmBuilder, VmCircuitConfig, VmExecutionConfig, VmInstance,\n};\nuse openvm_native_circuit::NativeConfig;\nuse openvm_sdk::{\n    config::{AppConfig, DEFAULT_APP_LOG_BLOWUP},\n    prover::vm::new_local_prover,\n    GenericSdk, StdIn,\n};\nuse openvm_stark_backend::config::Val;\nuse openvm_stark_backend::{keygen::types::MultiStarkProvingKey, prover::types::ProvingContext};\nuse openvm_stark_sdk::{\n    config::{\n        baby_bear_poseidon2::BabyBearPoseidon2Engine as CpuBabyBearPoseidon2Engine, FriParameters,\n    },\n    engine::{StarkEngine, StarkFriEngine},\n};\nuse tracing::info_span;\n\nuse crate::BabyBearSC;\n\n#[cfg(not(feature = \"cuda\"))]\nuse crate::PowdrSdkCpu as PowdrSdk;\n#[cfg(feature = \"cuda\")]\nuse crate::PowdrSdkGpu as PowdrSdk;\n\n#[cfg(not(feature = \"cuda\"))]\nuse crate::SpecializedConfigCpuBuilder as SpecializedConfigBuilder;\n#[cfg(feature = \"cuda\")]\nuse crate::SpecializedConfigGpuBuilder as SpecializedConfigBuilder;\n\n#[cfg(feature = \"cuda\")]\nuse openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine as BabyBearPoseidon2Engine;\n#[cfg(not(feature = \"cuda\"))]\nuse openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;\n\n/// Given a program and input, generates the trace segment by segment and calls the provided\n/// callback with the VM, proving key, and proving context (containing the trace) for each segment.\npub fn do_with_trace<ISA: OpenVmISA>(\n    program: &CompiledProgram<ISA>,\n    inputs: StdIn,\n    callback: impl FnMut(\n        usize,\n        &VirtualMachine<BabyBearPoseidon2Engine, SpecializedConfigBuilder<ISA>>,\n        &MultiStarkProvingKey<BabyBearSC>,\n        ProvingContext<<BabyBearPoseidon2Engine as StarkEngine>::PB>,\n    ),\n) -> Result<(), Box<dyn std::error::Error>> {\n    let sdk = PowdrSdk::new(create_app_config(program))?;\n    do_with_trace_with_sdk::<ISA, BabyBearPoseidon2Engine, SpecializedConfigBuilder<ISA>, _>(\n        program, inputs, sdk, callback,\n    )\n}\n\n/// Like [`do_with_trace`], but always uses the CPU engine and CPU VM config builder.\npub fn do_with_cpu_trace<ISA: OpenVmISA>(\n    program: &CompiledProgram<ISA>,\n    inputs: StdIn,\n    callback: impl FnMut(\n        usize,\n        &VirtualMachine<CpuBabyBearPoseidon2Engine, SpecializedConfigCpuBuilder<ISA>>,\n        &MultiStarkProvingKey<BabyBearSC>,\n        ProvingContext<<CpuBabyBearPoseidon2Engine as StarkEngine>::PB>,\n    ),\n) -> Result<(), Box<dyn std::error::Error>> {\n    let sdk = PowdrSdkCpu::new(create_app_config(program))?;\n    do_with_trace_with_sdk::<ISA, CpuBabyBearPoseidon2Engine, SpecializedConfigCpuBuilder<ISA>, _>(\n        program, inputs, sdk, callback,\n    )\n}\n\nfn do_with_trace_with_sdk<ISA: OpenVmISA, E, VB, NB>(\n    program: &CompiledProgram<ISA>,\n    inputs: StdIn,\n    sdk: GenericSdk<E, VB, NB>,\n    mut callback: impl FnMut(\n        usize,\n        &VirtualMachine<E, VB>,\n        &MultiStarkProvingKey<BabyBearSC>,\n        ProvingContext<<E as StarkEngine>::PB>,\n    ),\n) -> Result<(), Box<dyn std::error::Error>>\nwhere\n    E: StarkFriEngine<SC = BabyBearSC>,\n    VB: VmBuilder<E> + Clone,\n    <VB::VmConfig as VmExecutionConfig<Val<E::SC>>>::Executor: Executor<Val<E::SC>>\n        + MeteredExecutor<Val<E::SC>>\n        + PreflightExecutor<Val<E::SC>, VB::RecordArena>,\n    NB: VmBuilder<E, VmConfig = NativeConfig> + Clone,\n    <NativeConfig as VmExecutionConfig<Val<E::SC>>>::Executor:\n        PreflightExecutor<Val<E::SC>, NB::RecordArena>,\n{\n    let exe = sdk.convert_to_exe(program.exe.clone())?;\n    // Build owned vm instance, so we can mutate it later\n    let vm_builder = sdk.app_vm_builder().clone();\n    let vm_pk = sdk.app_pk().app_vm_pk.clone();\n    let mut vm_instance: VmInstance<_, _> = new_local_prover(vm_builder, &vm_pk, exe.clone())?;\n\n    vm_instance.reset_state(inputs.clone());\n    let metered_ctx = vm_instance.vm.build_metered_ctx(&exe);\n    let metered_interpreter = vm_instance.vm.metered_interpreter(vm_instance.exe())?;\n    let (segments, _) = metered_interpreter.execute_metered(inputs.clone(), metered_ctx)?;\n    let mut state = vm_instance.state_mut().take();\n\n    // Move `vm` and `interpreter` out of `vm_instance`\n    // (after this, you can't use `vm_instance` anymore).\n    let mut vm = vm_instance.vm;\n    let mut interpreter = vm_instance.interpreter;\n\n    // Get reusable inputs for `debug_proving_ctx`, the mock prover API from OVM.\n    let air_inv = vm.config().create_airs()?;\n    let pk = air_inv.keygen::<E>(&vm.engine);\n\n    for (seg_idx, segment) in segments.into_iter().enumerate() {\n        let _segment_span = info_span!(\"prove_segment\", segment = seg_idx).entered();\n        // We need a separate span so the metric label includes \"segment\" from _segment_span\n        let _prove_span = info_span!(\"total_proof\").entered();\n        let Segment {\n            num_insns,\n            trace_heights,\n            ..\n        } = segment;\n        let from_state = Option::take(&mut state).unwrap();\n        vm.transport_init_memory_to_device(&from_state.memory);\n        let PreflightExecutionOutput {\n            system_records,\n            record_arenas,\n            to_state,\n        } = vm.execute_preflight(\n            &mut interpreter,\n            from_state,\n            Some(num_insns),\n            &trace_heights,\n        )?;\n        state = Some(to_state);\n\n        let ctx = vm.generate_proving_ctx(system_records, record_arenas)?;\n\n        callback(seg_idx, &vm, &pk, ctx);\n    }\n    Ok(())\n}\n\nfn create_app_config<ISA: OpenVmISA>(\n    program: &CompiledProgram<ISA>,\n) -> AppConfig<SpecializedConfig<ISA>> {\n    let app_fri_params =\n        FriParameters::standard_with_100_bits_conjectured_security(DEFAULT_APP_LOG_BLOWUP);\n    AppConfig::new(app_fri_params, program.vm_config.clone())\n}\n"
  },
  {
    "path": "openvm/src/utils.rs",
    "content": "use core::fmt;\nuse std::{collections::BTreeMap, sync::Arc};\n\nuse itertools::Itertools;\nuse openvm_stark_backend::{\n    air_builders::symbolic::{\n        symbolic_expression::SymbolicExpression,\n        symbolic_variable::{Entry, SymbolicVariable},\n        SymbolicConstraints,\n    },\n    interaction::{Interaction, SymbolicInteraction},\n    p3_field::PrimeField32,\n};\nuse powdr_autoprecompiles::{\n    expression::{try_convert, AlgebraicReference},\n    symbolic_machine::SymbolicBusInteraction,\n};\nuse powdr_expression::AlgebraicExpression;\n\nuse crate::bus_map::BusMap;\n\npub enum OpenVmReference {\n    /// Reference to a witness column. The boolean indicates if the reference is to the next row.\n    WitnessColumn(AlgebraicReference, bool),\n    IsFirstRow,\n    IsLastRow,\n    IsTransition,\n}\n\nimpl fmt::Display for OpenVmReference {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            OpenVmReference::WitnessColumn(reference, next) => {\n                write!(f, \"{}{}\", reference.name, if *next { \"'\" } else { \"\" })\n            }\n            OpenVmReference::IsFirstRow => write!(f, \"is_first_row\"),\n            OpenVmReference::IsLastRow => write!(f, \"is_last_row\"),\n            OpenVmReference::IsTransition => write!(f, \"is_transition\"),\n        }\n    }\n}\n\n/// An unsupported OpenVM reference appeared, i.e., a non-zero offset or a reference to\n/// is_first_row, is_last_row, or is_transition.\n#[derive(Debug)]\npub struct UnsupportedOpenVmReferenceError;\n\nimpl TryFrom<OpenVmReference> for AlgebraicReference {\n    type Error = UnsupportedOpenVmReferenceError;\n\n    fn try_from(value: OpenVmReference) -> Result<Self, Self::Error> {\n        match value {\n            OpenVmReference::WitnessColumn(reference, false) => Ok(reference),\n            _ => Err(UnsupportedOpenVmReferenceError),\n        }\n    }\n}\n\npub fn symbolic_to_algebraic<F: PrimeField32>(\n    expr: &SymbolicExpression<F>,\n    columns: &[Arc<String>],\n) -> AlgebraicExpression<F, OpenVmReference> {\n    match expr {\n        SymbolicExpression::Constant(c) => AlgebraicExpression::Number(*c),\n        SymbolicExpression::Add { x, y, .. } => {\n            symbolic_to_algebraic(x, columns) + symbolic_to_algebraic(y, columns)\n        }\n        SymbolicExpression::Sub { x, y, .. } => {\n            symbolic_to_algebraic(x, columns) - symbolic_to_algebraic(y, columns)\n        }\n        SymbolicExpression::Mul { x, y, .. } => {\n            symbolic_to_algebraic(x, columns) * symbolic_to_algebraic(y, columns)\n        }\n        SymbolicExpression::Neg { x, .. } => -symbolic_to_algebraic(x, columns),\n        SymbolicExpression::Variable(SymbolicVariable { entry, index, .. }) => match entry {\n            Entry::Main { offset, part_index } => {\n                assert_eq!(*part_index, 0);\n                let next = match *offset {\n                    0 => false,\n                    1 => true,\n                    _ => panic!(\"Unexpected offset: {offset}\"),\n                };\n                let name = columns.get(*index).unwrap_or_else(|| {\n                    panic!(\"Column index out of bounds: {index}\\nColumns: {columns:?}\");\n                });\n                AlgebraicExpression::Reference(OpenVmReference::WitnessColumn(\n                    AlgebraicReference {\n                        name: name.clone(),\n                        id: *index as u64,\n                    },\n                    next,\n                ))\n            }\n            _ => unimplemented!(),\n        },\n        SymbolicExpression::IsFirstRow => {\n            AlgebraicExpression::Reference(OpenVmReference::IsFirstRow)\n        }\n        SymbolicExpression::IsLastRow => AlgebraicExpression::Reference(OpenVmReference::IsLastRow),\n        SymbolicExpression::IsTransition => {\n            AlgebraicExpression::Reference(OpenVmReference::IsTransition)\n        }\n    }\n}\n\npub fn openvm_bus_interaction_to_powdr<F: PrimeField32>(\n    interaction: &SymbolicInteraction<F>,\n    columns: &[Arc<String>],\n) -> Result<SymbolicBusInteraction<F>, UnsupportedOpenVmReferenceError> {\n    let id = interaction.bus_index as u64;\n\n    let mult = try_convert(symbolic_to_algebraic(&interaction.count, columns))?;\n    let args = interaction\n        .message\n        .iter()\n        .map(|e| try_convert(symbolic_to_algebraic(e, columns)))\n        .collect::<Result<_, _>>()?;\n\n    Ok(SymbolicBusInteraction { id, mult, args })\n}\n\npub fn get_pil<F: PrimeField32>(\n    name: &str,\n    constraints: &SymbolicConstraints<F>,\n    columns: &Vec<Arc<String>>,\n    public_values: Vec<String>,\n    bus_map: &BusMap,\n) -> String {\n    let mut pil = format!(\n        \"\nnamespace {name};\n    // Preamble\n    col fixed is_first_row = [1] + [0]*;\n    col fixed is_last_row = [0] + [1]*;\n    col fixed is_transition = [0] + [1]* + [0];\n\n\"\n    );\n\n    pil.push_str(\n        &bus_map\n            .all_types_by_id()\n            .iter()\n            .map(|(id, bus_type)| format!(\"    let {bus_type} = {id};\"))\n            .join(\"\\n\"),\n    );\n\n    pil.push_str(\n        \"\n\n    // Witness columns\n\",\n    );\n\n    // Declare witness columns\n    for column in columns {\n        pil.push_str(&format!(\"    col witness {column};\\n\"));\n    }\n\n    let (bus_interactions_by_bus, new_buses): (BTreeMap<_, _>, BTreeMap<_, _>) = constraints\n        .interactions\n        .iter()\n        .map(|interaction| (interaction.bus_index, interaction))\n        .into_group_map()\n        .into_iter()\n        .partition::<BTreeMap<_, _>, _>(|(bus_index, _)| {\n            bus_map.all_types_by_id().contains_key(&(*bus_index as u64))\n        });\n\n    pil.push_str(\n        \"\n    // Bus interactions (bus_index, fields, count)\\n\",\n    );\n\n    for (bus_index, interactions) in bus_interactions_by_bus {\n        let bus_name = bus_map.bus_type(bus_index as u64).to_string();\n\n        for interaction in interactions {\n            format_bus_interaction(&mut pil, interaction, columns, &public_values, &bus_name);\n        }\n        pil.push('\\n');\n    }\n\n    for (bus_index, interactions) in new_buses {\n        let bus_name = format!(\"bus_{bus_index}\");\n        for interaction in interactions {\n            format_bus_interaction(&mut pil, interaction, columns, &public_values, &bus_name);\n        }\n        pil.push('\\n');\n    }\n\n    pil.push_str(\"    // Constraints\\n\");\n\n    for constraint in &constraints.constraints {\n        pil.push_str(&format!(\n            \"    {} = 0;\\n\",\n            format_expr(constraint, columns, &public_values)\n        ));\n    }\n    pil\n}\n\nfn format_bus_interaction<F: PrimeField32>(\n    pil: &mut String,\n    interaction: &Interaction<SymbolicExpression<F>>,\n    columns: &[Arc<String>],\n    public_values: &[String],\n    bus_name: &str,\n) {\n    let Interaction { message, count, .. } = interaction;\n    // We do not know what is a send or a receive\n    let function_name = \"bus_interaction\";\n\n    pil.push_str(&format!(\n        \"    std::protocols::bus::{}({bus_name}, [{}], {});\\n\",\n        function_name,\n        message\n            .iter()\n            .map(|value| format_expr(value, columns, public_values))\n            .collect::<Vec<String>>()\n            .join(\", \"),\n        format_expr(count, columns, public_values)\n    ));\n}\n\nfn format_expr<F: PrimeField32>(\n    expr: &SymbolicExpression<F>,\n    columns: &[Arc<String>],\n    // TODO: Implement public references\n    _public_values: &[String],\n) -> String {\n    symbolic_to_algebraic(expr, columns).to_string()\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm-bus-interaction-handler\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[dependencies]\npowdr-autoprecompiles.workspace = true\npowdr-expression.workspace = true\npowdr-number.workspace = true\npowdr-constraint-solver.workspace = true\n\nitertools.workspace = true\nserde.workspace = true\n\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/bitwise_lookup.rs",
    "content": "use powdr_autoprecompiles::range_constraint_optimizer::RangeConstraints;\nuse powdr_constraint_solver::{\n    grouped_expression::GroupedExpression, range_constraint::RangeConstraint,\n};\nuse powdr_number::{FieldElement, LargeInt};\n\nuse super::byte_constraint;\n\npub fn handle_bitwise_lookup<T: FieldElement>(\n    payload: &[RangeConstraint<T>],\n) -> Vec<RangeConstraint<T>> {\n    // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/bitwise_op_lookup/bus.rs\n    // Expects (x, y, z, op), where:\n    // - if op == 0, x & y are bytes, z = 0\n    // - if op == 1, x & y are bytes, z = x ^ y\n\n    let [x, y, _z, op] = payload else {\n        panic!(\"Expected arguments (x, y, z, op)\");\n    };\n    match op\n        .try_to_single_value()\n        .map(|v| v.to_integer().try_into_u64().unwrap())\n    {\n        // Range constraint on x & y, z = 0\n        Some(0) => vec![\n            byte_constraint(),\n            byte_constraint(),\n            RangeConstraint::from_value(T::zero()),\n            RangeConstraint::from_value(T::zero()),\n        ],\n        // z = x ^ y\n        Some(1) => {\n            if let (Some(x), Some(y)) = (x.try_to_single_value(), y.try_to_single_value()) {\n                // Both inputs are known, can compute result concretely\n                let z = T::from(\n                    x.to_integer().try_into_u64().unwrap() ^ y.to_integer().try_into_u64().unwrap(),\n                );\n                vec![\n                    RangeConstraint::from_value(x),\n                    RangeConstraint::from_value(y),\n                    RangeConstraint::from_value(z),\n                    RangeConstraint::from_value(T::one()),\n                ]\n            } else {\n                // The result of an XOR can only be a byte and have bits set that are set in either x or y\n                let z_constraint = RangeConstraint::from_mask(*x.mask() | *y.mask())\n                    .conjunction(&byte_constraint());\n                vec![\n                    byte_constraint(),\n                    byte_constraint(),\n                    z_constraint,\n                    RangeConstraint::from_value(T::one()),\n                ]\n            }\n        }\n        // Operation is unknown, but we know that x, y, and z are bytes\n        // and that op is 0 or 1\n        None => vec![\n            byte_constraint(),\n            byte_constraint(),\n            byte_constraint(),\n            RangeConstraint::from_mask(0x1u64),\n        ],\n        _ => panic!(\"Invalid operation\"),\n    }\n}\n\npub fn bitwise_lookup_pure_range_constraints<T: FieldElement, V: Ord + Clone + Eq>(\n    payload: &[GroupedExpression<T, V>],\n) -> Option<RangeConstraints<T, V>> {\n    // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/bitwise_op_lookup/bus.rs\n    // Expects (x, y, z, op), where:\n    // - if op == 0, x & y are bytes, z = 0\n    // - if op == 1, x & y are bytes, z = x ^ y\n    let [x, y, z, op] = payload else {\n        panic!(\"Expected arguments (x, y, z, op)\");\n    };\n    let byte_rc = RangeConstraint::from_mask(0xffu64);\n    let zero_rc = RangeConstraint::from_value(T::zero());\n    if op.try_to_number() == Some(T::from(0u64)) {\n        Some(\n            [\n                (x.clone(), byte_rc),\n                (y.clone(), byte_rc),\n                (z.clone(), zero_rc),\n            ]\n            .into(),\n        )\n    } else if x == y {\n        // This is a common pattern, because the `BaseAluCoreChip` range-constraints\n        // the output of an addition by sending each limb as both operands to the XOR table:\n        // https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/circuit/src/base_alu/core.rs#L131-L138\n        // Note that this block also gets executed if `op` is unknown (but we know that `op` can only be 0 or 1).\n        Some(\n            [\n                (x.clone(), byte_rc),\n                (z.clone(), zero_rc),\n                (op.clone(), RangeConstraint::from_mask(1)),\n            ]\n            .into(),\n        )\n    } else {\n        None\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bus_map::DEFAULT_BITWISE_LOOKUP, test_utils::*, OpenVmBusInteractionHandler};\n\n    use super::*;\n    use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};\n    use powdr_number::BabyBearField;\n\n    fn run(\n        x: RangeConstraint<BabyBearField>,\n        y: RangeConstraint<BabyBearField>,\n        z: RangeConstraint<BabyBearField>,\n        op: RangeConstraint<BabyBearField>,\n    ) -> Vec<RangeConstraint<BabyBearField>> {\n        let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();\n\n        let bus_interaction = BusInteraction {\n            bus_id: RangeConstraint::from_value(DEFAULT_BITWISE_LOOKUP.into()),\n            multiplicity: value(1),\n            payload: vec![x, y, z, op],\n        };\n        let result = handler.handle_bus_interaction(bus_interaction);\n        result.payload\n    }\n\n    #[test]\n    fn test_byte_constraint() {\n        let result = run(\n            Default::default(),\n            Default::default(),\n            Default::default(),\n            value(0),\n        );\n\n        assert_eq!(result.len(), 4);\n        assert_eq!(result[0], mask(0xff));\n        assert_eq!(result[1], mask(0xff));\n        assert_eq!(result[2], value(0));\n        assert_eq!(result[3], value(0));\n    }\n\n    #[test]\n    fn test_xor_known() {\n        let result = run(\n            value(0b10101010),\n            value(0b11001100),\n            Default::default(),\n            value(1),\n        );\n\n        assert_eq!(result.len(), 4);\n        assert_eq!(result[0], value(0b10101010));\n        assert_eq!(result[1], value(0b11001100));\n        assert_eq!(result[2], value(0b01100110));\n        assert_eq!(result[3], value(1));\n    }\n\n    #[test]\n    fn test_xor_unknown() {\n        let result = run(\n            Default::default(),\n            Default::default(),\n            Default::default(),\n            value(1),\n        );\n\n        assert_eq!(result.len(), 4);\n        assert_eq!(result[0], mask(0xff));\n        assert_eq!(result[1], mask(0xff));\n        assert_eq!(result[2], mask(0xff));\n        assert_eq!(result[3], value(1));\n    }\n\n    #[test]\n    fn test_xor_one_unknown() {\n        let result = run(mask(0xabcd), value(0), Default::default(), value(1));\n\n        assert_eq!(result.len(), 4);\n        // Note that this constraint could be tighter (0xcd), but the solver\n        // will get to this by intersecting the result with the input\n        // constraints.\n        assert_eq!(result[0], mask(0xff));\n        // Same here\n        assert_eq!(result[1], mask(0xff));\n        // We won't be able to compute the result, but we know that the range\n        // constraint of `x` also applies to `z`.\n        assert_eq!(result[2], mask(0xcd));\n        assert_eq!(result[3], value(1));\n    }\n\n    #[test]\n    fn test_unknown_operation() {\n        let result = run(\n            Default::default(),\n            Default::default(),\n            Default::default(),\n            Default::default(),\n        );\n\n        assert_eq!(result.len(), 4);\n        assert_eq!(result[0], mask(0xff));\n        assert_eq!(result[1], mask(0xff));\n        assert_eq!(result[2], mask(0xff));\n        assert_eq!(result[3], mask(0x1));\n    }\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/bus_map.rs",
    "content": "//! To support an abstracted autoprecompile layer, this module stores type implementations specific to OpenVM\nuse std::fmt::Display;\n\nuse powdr_autoprecompiles::bus_map::BusType;\nuse serde::{Deserialize, Serialize};\n\nuse crate::DEFAULT_RANGE_TUPLE_CHECKER_SIZES;\n\npub const DEFAULT_EXECUTION_BRIDGE: u64 = 0;\npub const DEFAULT_MEMORY: u64 = 1;\npub const DEFAULT_PC_LOOKUP: u64 = 2;\npub const DEFAULT_VARIABLE_RANGE_CHECKER: u64 = 3;\npub const DEFAULT_BITWISE_LOOKUP: u64 = 6;\npub const DEFAULT_TUPLE_RANGE_CHECKER: u64 = 7;\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub enum OpenVmBusType {\n    VariableRangeChecker,\n    TupleRangeChecker([u32; 2]),\n    BitwiseLookup,\n}\n\npub type BusMap = powdr_autoprecompiles::bus_map::BusMap<OpenVmBusType>;\n\nimpl Display for OpenVmBusType {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            OpenVmBusType::VariableRangeChecker => write!(f, \"VARIABLE_RANGE_CHECKER\"),\n            OpenVmBusType::TupleRangeChecker(sizes) => {\n                write!(f, \"TUPLE_RANGE_CHECKER_{}_{}\", sizes[0], sizes[1])\n            }\n            OpenVmBusType::BitwiseLookup => write!(f, \"BITWISE_LOOKUP\"),\n        }\n    }\n}\n\npub fn default_openvm_bus_map() -> BusMap {\n    let bus_ids = [\n        (DEFAULT_EXECUTION_BRIDGE, BusType::ExecutionBridge),\n        (DEFAULT_MEMORY, BusType::Memory),\n        (DEFAULT_PC_LOOKUP, BusType::PcLookup),\n        (\n            DEFAULT_VARIABLE_RANGE_CHECKER,\n            BusType::Other(OpenVmBusType::VariableRangeChecker),\n        ),\n        (\n            DEFAULT_BITWISE_LOOKUP,\n            BusType::Other(OpenVmBusType::BitwiseLookup),\n        ),\n        (\n            DEFAULT_TUPLE_RANGE_CHECKER,\n            BusType::Other(OpenVmBusType::TupleRangeChecker(\n                DEFAULT_RANGE_TUPLE_CHECKER_SIZES,\n            )),\n        ),\n    ];\n    BusMap::from_id_type_pairs(bus_ids)\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/lib.rs",
    "content": "use std::fmt::Display;\n\nuse bitwise_lookup::handle_bitwise_lookup;\nuse itertools::Itertools;\nuse memory::handle_memory;\nuse powdr_autoprecompiles::{\n    bus_map::BusType,\n    constraint_optimizer::IsBusStateful,\n    range_constraint_optimizer::{\n        utils::{filter_byte_constraints, range_constraint_to_num_bits},\n        MakeRangeConstraintsError, RangeConstraintHandler, RangeConstraints,\n    },\n};\nuse powdr_constraint_solver::{\n    constraint_system::{BusInteraction, BusInteractionHandler},\n    grouped_expression::GroupedExpression,\n    range_constraint::RangeConstraint,\n};\nuse powdr_number::{FieldElement, LargeInt};\nuse std::hash::Hash;\nuse variable_range_checker::handle_variable_range_checker;\n\nuse crate::{\n    bitwise_lookup::bitwise_lookup_pure_range_constraints,\n    bus_map::{default_openvm_bus_map, BusMap, OpenVmBusType},\n    tuple_range_checker::TupleRangeCheckerHandler,\n    variable_range_checker::variable_range_checker_pure_range_constraints,\n};\n\nmod bitwise_lookup;\npub mod bus_map;\nmod memory;\npub mod memory_bus_interaction;\nmod tuple_range_checker;\nmod variable_range_checker;\n\n#[derive(Clone)]\npub struct OpenVmBusInteractionHandler<T: FieldElement> {\n    bus_map: BusMap,\n    _phantom: std::marker::PhantomData<T>,\n}\n\n/// Taken from openvm implementation, should be kept in sync.\nconst DEFAULT_RANGE_TUPLE_CHECKER_SIZES: [u32; 2] = [1 << 8, 8 * (1 << 8)];\n\nimpl<T: FieldElement> Default for OpenVmBusInteractionHandler<T> {\n    fn default() -> Self {\n        Self::new(default_openvm_bus_map())\n    }\n}\n\nimpl<T: FieldElement> OpenVmBusInteractionHandler<T> {\n    pub fn new(bus_map: BusMap) -> Self {\n        Self {\n            bus_map,\n            _phantom: std::marker::PhantomData,\n        }\n    }\n\n    pub fn tuple_range_checker_sizes(&self) -> [u32; 2] {\n        self.bus_map\n            .all_types_by_id()\n            .values()\n            .find_map(|ty| {\n                if let BusType::Other(OpenVmBusType::TupleRangeChecker(sizes)) = ty {\n                    Some(*sizes)\n                } else {\n                    None\n                }\n            })\n            .unwrap()\n    }\n}\n\nimpl<T: FieldElement> BusInteractionHandler<T> for OpenVmBusInteractionHandler<T> {\n    fn handle_bus_interaction(\n        &self,\n        bus_interaction: BusInteraction<RangeConstraint<T>>,\n    ) -> BusInteraction<RangeConstraint<T>> {\n        let (Some(bus_id), Some(multiplicity)) = (\n            bus_interaction.bus_id.try_to_single_value(),\n            bus_interaction.multiplicity.try_to_single_value(),\n        ) else {\n            return bus_interaction;\n        };\n\n        if multiplicity.is_zero() {\n            return bus_interaction;\n        }\n\n        let payload_constraints = match self\n            .bus_map\n            .bus_type(bus_id.to_integer().try_into_u64().unwrap())\n        {\n            // Sends / receives (pc, timestamp) pairs. They could have any value.\n            BusType::ExecutionBridge => bus_interaction.payload,\n            // Sends a (pc, opcode, args..) tuple. In theory, we could refine the range constraints\n            // of the args here, but for auto-precompiles, only the PC will be unknown, which could\n            // have any value.\n            BusType::PcLookup => bus_interaction.payload,\n            BusType::Other(OpenVmBusType::BitwiseLookup) => {\n                handle_bitwise_lookup(&bus_interaction.payload)\n            }\n            BusType::Memory => handle_memory(&bus_interaction.payload, multiplicity),\n            BusType::Other(OpenVmBusType::VariableRangeChecker) => {\n                handle_variable_range_checker(&bus_interaction.payload)\n            }\n            BusType::Other(OpenVmBusType::TupleRangeChecker(sizes)) => {\n                TupleRangeCheckerHandler::new(sizes)\n                    .handle_bus_interaction(&bus_interaction.payload)\n            }\n        };\n        BusInteraction {\n            payload: payload_constraints,\n            ..bus_interaction\n        }\n    }\n}\n\nfn byte_constraint<T: FieldElement>() -> RangeConstraint<T> {\n    RangeConstraint::from_mask(0xffu64)\n}\n\nimpl<T: FieldElement> IsBusStateful<T> for OpenVmBusInteractionHandler<T> {\n    fn is_stateful(&self, bus_id: T) -> bool {\n        let bus_id = bus_id.to_integer().try_into_u64().unwrap();\n        match self.bus_map.bus_type(bus_id) {\n            BusType::ExecutionBridge => true,\n            BusType::Memory => true,\n            BusType::PcLookup => false,\n            BusType::Other(OpenVmBusType::BitwiseLookup) => false,\n            BusType::Other(OpenVmBusType::VariableRangeChecker) => false,\n            BusType::Other(OpenVmBusType::TupleRangeChecker(_)) => false,\n        }\n    }\n}\n\nimpl<T: FieldElement> RangeConstraintHandler<T> for OpenVmBusInteractionHandler<T> {\n    fn pure_range_constraints<V: Ord + Clone + Eq>(\n        &self,\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n    ) -> Option<RangeConstraints<T, V>> {\n        let bus_id = bus_interaction\n            .bus_id\n            .try_to_number()\n            .unwrap()\n            .to_integer()\n            .try_into_u64()\n            .unwrap();\n        match self.bus_map.bus_type(bus_id) {\n            BusType::ExecutionBridge | BusType::Memory | BusType::PcLookup => None,\n            BusType::Other(OpenVmBusType::BitwiseLookup) => {\n                bitwise_lookup_pure_range_constraints(&bus_interaction.payload)\n            }\n            BusType::Other(OpenVmBusType::VariableRangeChecker) => {\n                variable_range_checker_pure_range_constraints(&bus_interaction.payload)\n            }\n            BusType::Other(OpenVmBusType::TupleRangeChecker(sizes)) => {\n                TupleRangeCheckerHandler::new(sizes)\n                    .pure_range_constraints(&bus_interaction.payload)\n            }\n        }\n    }\n\n    fn batch_make_range_constraints<V: Ord + Clone + Eq + Display + Hash>(\n        &self,\n        mut range_constraints: RangeConstraints<T, V>,\n    ) -> Result<Vec<BusInteraction<GroupedExpression<T, V>>>, MakeRangeConstraintsError> {\n        let mut byte_constraints = filter_byte_constraints(&mut range_constraints);\n        let tuple_range_checker_sizes = self.tuple_range_checker_sizes();\n        let tuple_range_checker_ranges =\n            TupleRangeCheckerHandler::new(tuple_range_checker_sizes).tuple_range_checker_ranges();\n        assert_eq!(\n            tuple_range_checker_ranges.0,\n            RangeConstraint::from_mask(0xffu64),\n        );\n\n        // The tuple range checker bus can range-check two expressions at the same time.\n        // We assume the first range is a byte range (see assertion above). From the remaining\n        // range constraints, we find all that happen to require the second range and zip them\n        // with the byte constraints.\n        let (mut tuple_range_checker_second_args, mut range_constraints): (Vec<_>, Vec<_>) =\n            range_constraints\n                .into_iter()\n                .partition(|(_expr, rc)| rc == &tuple_range_checker_ranges.1);\n        if tuple_range_checker_second_args.len() > byte_constraints.len() {\n            range_constraints\n                .extend(tuple_range_checker_second_args.drain(byte_constraints.len()..));\n        }\n        let num_variable_range_checker_interactions = tuple_range_checker_second_args.len();\n\n        let tuple_range_checker_constraints = byte_constraints\n            .drain(..num_variable_range_checker_interactions)\n            .zip_eq(tuple_range_checker_second_args)\n            .map(|(byte_expr, (expr2, _rc))| {\n                // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/range_tuple/bus.rs\n                // Expects (x, y), where `x` is in the range [0, MAX_0] and `y` is in the range [0, MAX_1]\n                let bus_id = self\n                    .bus_map\n                    .get_bus_id(&BusType::Other(OpenVmBusType::TupleRangeChecker(\n                        tuple_range_checker_sizes,\n                    )))\n                    .unwrap();\n                BusInteraction {\n                    bus_id: GroupedExpression::from_number(T::from(bus_id)),\n                    multiplicity: GroupedExpression::from_number(T::one()),\n                    payload: vec![byte_expr.clone(), expr2.clone()],\n                }\n            })\n            .collect::<Vec<_>>();\n\n        let byte_constraints = byte_constraints\n            .into_iter()\n            .chunks(2)\n            .into_iter()\n            .map(|mut bytes| {\n                // Use the bitwise lookup to range-check two bytes at the same time:\n                // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/bitwise_op_lookup/bus.rs\n                // Expects (x, y, z, op), where:\n                // - if op == 0, x & y are bytes, z = 0\n                // - if op == 1, x & y are bytes, z = x ^ y\n                let byte1 = bytes.next().unwrap();\n                let byte2 = bytes\n                    .next()\n                    .unwrap_or(GroupedExpression::from_number(T::zero()));\n\n                let bus_id = self\n                    .bus_map\n                    .get_bus_id(&BusType::Other(OpenVmBusType::BitwiseLookup))\n                    .unwrap();\n                BusInteraction {\n                    bus_id: GroupedExpression::from_number(T::from(bus_id)),\n                    multiplicity: GroupedExpression::from_number(T::one()),\n                    payload: vec![\n                        byte1.clone(),\n                        byte2.clone(),\n                        GroupedExpression::from_number(T::zero()),\n                        GroupedExpression::from_number(T::zero()),\n                    ],\n                }\n            })\n            .collect::<Vec<_>>();\n        let other_constraints = range_constraints\n            .into_iter()\n            .map(|(expr, rc)| {\n                // Use the variable range checker to range-check expressions:\n                // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/var_range/bus.rs\n                // Expects (x, bits), where `x` is in the range [0, 2^bits - 1]\n                let Some(num_bits) = range_constraint_to_num_bits(&rc) else {\n                    return Err(MakeRangeConstraintsError(format!(\n                        \"Failed to get number of bits from range constraint: {rc:?}\"\n                    )));\n                };\n                let bus_id = self\n                    .bus_map\n                    .get_bus_id(&BusType::Other(OpenVmBusType::VariableRangeChecker))\n                    .unwrap();\n                Ok(BusInteraction {\n                    bus_id: GroupedExpression::from_number(T::from(bus_id)),\n                    multiplicity: GroupedExpression::from_number(T::one()),\n                    payload: vec![\n                        expr,\n                        GroupedExpression::from_number(T::from(num_bits as u64)),\n                    ],\n                })\n            })\n            .collect::<Result<Vec<_>, _>>()?;\n        Ok(tuple_range_checker_constraints\n            .into_iter()\n            .chain(byte_constraints)\n            .chain(other_constraints)\n            .collect::<Vec<_>>())\n    }\n}\n\n#[cfg(test)]\nmod test_utils {\n\n    use super::*;\n    use powdr_number::BabyBearField;\n\n    pub fn value(value: u64) -> RangeConstraint<BabyBearField> {\n        RangeConstraint::from_value(BabyBearField::from(value))\n    }\n\n    pub fn mask(mask: u64) -> RangeConstraint<BabyBearField> {\n        RangeConstraint::from_mask(mask)\n    }\n\n    pub fn range(start: u64, end: u64) -> RangeConstraint<BabyBearField> {\n        RangeConstraint::from_range(BabyBearField::from(start), BabyBearField::from(end))\n    }\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/memory.rs",
    "content": "use powdr_constraint_solver::range_constraint::RangeConstraint;\nuse powdr_number::{FieldElement, LargeInt};\n\nuse super::byte_constraint;\n\n/// Taken from the openvm implementation, should be kept in sync.\npub const RV32_REGISTER_AS: u32 = 1;\n/// Taken from the openvm implementation, should be kept in sync.\npub const RV32_MEMORY_AS: u32 = 2;\n\npub fn handle_memory<T: FieldElement>(\n    payload: &[RangeConstraint<T>],\n    multiplicity: T,\n) -> Vec<RangeConstraint<T>> {\n    // See: https://github.com/openvm-org/openvm/blob/main/crates/vm/src/system/memory/offline_checker/bus.rs\n    // Expects (address_space, pointer, data, timestamp).\n    let [address_space, pointer, data @ .., timestamp] = payload else {\n        panic!();\n    };\n    assert!(!data.is_empty(), \"Data must contain at least one element\");\n\n    if multiplicity != -T::one() {\n        // The interaction is not a receive, we can't make assumptions about the ranges.\n        return payload.to_vec();\n    }\n\n    let address_space_value = address_space\n        .try_to_single_value()\n        .map(|v| v.to_integer().try_into_u32().unwrap());\n\n    match address_space_value {\n        Some(RV32_REGISTER_AS | RV32_MEMORY_AS) => {\n            let data = if address_space_value == Some(RV32_REGISTER_AS)\n                && pointer.try_to_single_value() == Some(T::zero())\n            {\n                // By the assumption that x0 is never written to, we know the result.\n                data.iter()\n                    .map(|_| RangeConstraint::from_value(T::zero()))\n                    .collect::<Vec<_>>()\n            } else {\n                // By the assumption that all data written to registers or memory are range-checked,\n                // we can return a byte range constraint for the data.\n                data.iter().map(|_| byte_constraint()).collect::<Vec<_>>()\n            };\n\n            [*address_space, *pointer]\n                .into_iter()\n                .chain(data)\n                .chain(std::iter::once(*timestamp))\n                .collect()\n        }\n        // Otherwise, we can't improve the constraints\n        _ => payload.to_vec(),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{bus_map::DEFAULT_MEMORY, test_utils::*, OpenVmBusInteractionHandler};\n\n    use super::*;\n    use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};\n    use powdr_number::BabyBearField;\n\n    fn run(\n        address_space: RangeConstraint<BabyBearField>,\n        pointer: RangeConstraint<BabyBearField>,\n        data: Vec<RangeConstraint<BabyBearField>>,\n        timestamp: RangeConstraint<BabyBearField>,\n        multiplicity: BabyBearField,\n    ) -> Vec<RangeConstraint<BabyBearField>> {\n        let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();\n\n        let bus_interaction = BusInteraction {\n            bus_id: RangeConstraint::from_value(DEFAULT_MEMORY.into()),\n            multiplicity: RangeConstraint::from_value(multiplicity),\n            payload: std::iter::once(address_space)\n                .chain(std::iter::once(pointer))\n                .chain(data)\n                .chain(std::iter::once(timestamp))\n                .collect(),\n        };\n        let result = handler.handle_bus_interaction(bus_interaction);\n        result.payload\n    }\n\n    #[test]\n    fn test_receive() {\n        let address_space = value(RV32_MEMORY_AS as u64);\n        let pointer = value(0x1234);\n        let data = vec![Default::default(); 4];\n        let timestamp = value(0x5678);\n\n        let result = run(\n            address_space,\n            pointer,\n            data,\n            timestamp,\n            -(BabyBearField::from(1)),\n        );\n\n        assert_eq!(result.len(), 7);\n        assert_eq!(result[0], value(RV32_MEMORY_AS as u64));\n        assert_eq!(result[1], value(0x1234));\n        assert_eq!(result[2], byte_constraint());\n        assert_eq!(result[3], byte_constraint());\n        assert_eq!(result[4], byte_constraint());\n        assert_eq!(result[5], byte_constraint());\n        assert_eq!(result[6], value(0x5678));\n    }\n\n    #[test]\n    fn test_send() {\n        let address_space = value(RV32_MEMORY_AS as u64);\n        let pointer = value(0x1234);\n        let data = vec![Default::default(); 4];\n        let timestamp = value(0x5678);\n\n        let result = run(address_space, pointer, data, timestamp, 1.into());\n\n        assert_eq!(result.len(), 7);\n        assert_eq!(result[0], value(RV32_MEMORY_AS as u64));\n        assert_eq!(result[1], value(0x1234));\n        // For receives, the range constraints should not be modified.\n        assert_eq!(result[2], Default::default());\n        assert_eq!(result[3], Default::default());\n        assert_eq!(result[4], Default::default());\n        assert_eq!(result[5], Default::default());\n        assert_eq!(result[6], value(0x5678));\n    }\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/memory_bus_interaction.rs",
    "content": "use std::hash::Hash;\nuse std::{array::IntoIter, fmt::Display};\n\nuse powdr_autoprecompiles::memory_optimizer::{\n    MemoryBusInteraction, MemoryBusInteractionConversionError, MemoryOp,\n};\nuse powdr_constraint_solver::{\n    constraint_system::BusInteraction, grouped_expression::GroupedExpression,\n};\nuse powdr_number::FieldElement;\n\n/// The memory address space for register memory operations.\npub const REGISTER_ADDRESS_SPACE: u32 = 1;\n\n#[derive(Clone, Debug)]\npub struct OpenVmMemoryBusInteraction<T: FieldElement, V> {\n    op: MemoryOp,\n    address: OpenVmAddress<T, V>,\n    data: Vec<GroupedExpression<T, V>>,\n    timestamp: Vec<GroupedExpression<T, V>>,\n}\n\n#[derive(Clone, Hash, Eq, PartialEq, Debug)]\npub struct OpenVmAddress<T, V> {\n    /// The address space (e.g. register, memory, native, etc.), always a concrete number.\n    address_space: T,\n    /// The address expression.\n    local_address: GroupedExpression<T, V>,\n}\n\nimpl<T: FieldElement, V> IntoIterator for OpenVmAddress<T, V> {\n    type Item = GroupedExpression<T, V>;\n    type IntoIter = IntoIter<GroupedExpression<T, V>, 2>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        [\n            GroupedExpression::from_number(self.address_space),\n            self.local_address,\n        ]\n        .into_iter()\n    }\n}\n\nimpl<T: FieldElement, V: Ord + Clone + Eq + Display + Hash> MemoryBusInteraction<T, V>\n    for OpenVmMemoryBusInteraction<T, V>\n{\n    type Address = OpenVmAddress<T, V>;\n\n    fn try_from_bus_interaction(\n        bus_interaction: &BusInteraction<GroupedExpression<T, V>>,\n        memory_bus_id: u64,\n    ) -> Result<Option<Self>, MemoryBusInteractionConversionError> {\n        match bus_interaction.bus_id.try_to_number() {\n            None => return Err(MemoryBusInteractionConversionError),\n            Some(id) if id == memory_bus_id.into() => {}\n            Some(_) => return Ok(None),\n        }\n\n        let op = match bus_interaction.multiplicity.try_to_number() {\n            Some(n) if n == 1.into() => MemoryOp::SetNew,\n            Some(n) if n == (-1).into() => MemoryOp::GetPrevious,\n            _ => return Err(MemoryBusInteractionConversionError),\n        };\n\n        let [address_space, addr, data @ .., timestamp] = &bus_interaction.payload[..] else {\n            panic!();\n        };\n        let Some(address_space) = address_space.try_to_number() else {\n            panic!(\"Address space must be known!\");\n        };\n        let address = OpenVmAddress {\n            address_space,\n            local_address: addr.clone(),\n        };\n        Ok(Some(OpenVmMemoryBusInteraction {\n            op,\n            address,\n            data: data.to_vec(),\n            timestamp: vec![timestamp.clone()],\n        }))\n    }\n\n    fn addr(&self) -> Self::Address {\n        self.address.clone()\n    }\n\n    fn data(&self) -> &[GroupedExpression<T, V>] {\n        &self.data\n    }\n\n    fn timestamp_limbs(&self) -> &[GroupedExpression<T, V>] {\n        &self.timestamp\n    }\n\n    fn op(&self) -> MemoryOp {\n        self.op\n    }\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/tuple_range_checker.rs",
    "content": "use powdr_autoprecompiles::range_constraint_optimizer::RangeConstraints;\nuse powdr_constraint_solver::{\n    grouped_expression::GroupedExpression, range_constraint::RangeConstraint,\n};\nuse powdr_number::FieldElement;\n\n#[derive(Clone)]\npub struct TupleRangeCheckerHandler {\n    range_tuple_checker_sizes: [u32; 2],\n}\n\nimpl TupleRangeCheckerHandler {\n    pub fn new(range_tuple_checker_sizes: [u32; 2]) -> Self {\n        Self {\n            range_tuple_checker_sizes,\n        }\n    }\n\n    pub fn tuple_range_checker_ranges<T: FieldElement>(\n        &self,\n    ) -> (RangeConstraint<T>, RangeConstraint<T>) {\n        (\n            RangeConstraint::from_range(T::zero(), T::from(self.range_tuple_checker_sizes[0] - 1)),\n            RangeConstraint::from_range(T::zero(), T::from(self.range_tuple_checker_sizes[1] - 1)),\n        )\n    }\n\n    pub fn handle_bus_interaction<T: FieldElement>(\n        &self,\n        payload: &[RangeConstraint<T>],\n    ) -> Vec<RangeConstraint<T>> {\n        // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/range_tuple/bus.rs\n        // Expects (x, y), where `x` is in the range [0, MAX_0] and `y` is in the range [0, MAX_1]\n        let [_x, _y] = payload else {\n            panic!(\"Expected arguments (x, y)\");\n        };\n\n        let (x_rc, y_rc) = self.tuple_range_checker_ranges();\n        vec![x_rc, y_rc]\n    }\n\n    pub fn pure_range_constraints<T: FieldElement, V: Ord + Clone + Eq>(\n        &self,\n        payload: &[GroupedExpression<T, V>],\n    ) -> Option<RangeConstraints<T, V>> {\n        // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/range_tuple/bus.rs\n        // Expects (x, y), where `x` is in the range [0, MAX_0] and `y` is in the range [0, MAX_1]\n        let [x, y] = payload else {\n            panic!(\"Expected arguments (x, y)\");\n        };\n        let (x_rc, y_rc) = self.tuple_range_checker_ranges();\n        Some([(x.clone(), x_rc), (y.clone(), y_rc)].into())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bus_map::DEFAULT_TUPLE_RANGE_CHECKER, test_utils::value, OpenVmBusInteractionHandler,\n    };\n\n    use super::*;\n    use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};\n    use powdr_number::BabyBearField;\n\n    fn run(\n        x: RangeConstraint<BabyBearField>,\n        y: RangeConstraint<BabyBearField>,\n    ) -> Vec<RangeConstraint<BabyBearField>> {\n        let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();\n\n        let bus_interaction = BusInteraction {\n            bus_id: RangeConstraint::from_value(DEFAULT_TUPLE_RANGE_CHECKER.into()),\n            multiplicity: value(1),\n            payload: vec![x, y],\n        };\n        let result = handler.handle_bus_interaction(bus_interaction);\n        result.payload\n    }\n\n    #[test]\n    fn test_unknown() {\n        let x = Default::default();\n        let y = Default::default();\n        let result = run(x, y);\n        assert_eq!(result.len(), 2);\n        let (x_rc, y_rc) = (\n            RangeConstraint::from_range(BabyBearField::from(0), BabyBearField::from(255)),\n            RangeConstraint::from_range(\n                BabyBearField::from(0),\n                BabyBearField::from(8 * (1 << 8) - 1),\n            ),\n        );\n        assert_eq!(result[0], x_rc);\n        assert_eq!(result[1], y_rc);\n    }\n}\n"
  },
  {
    "path": "openvm-bus-interaction-handler/src/variable_range_checker.rs",
    "content": "use powdr_autoprecompiles::range_constraint_optimizer::RangeConstraints;\nuse powdr_constraint_solver::{\n    grouped_expression::GroupedExpression, range_constraint::RangeConstraint,\n};\nuse powdr_number::{FieldElement, LargeInt};\n\n/// The maximum number of bits that can be checked by the variable range checker.\n// TODO: This should be configurable\nconst MAX_BITS: u64 = 25;\n\n/// Implements [BusInteractionHandler::handle_bus_interaction] for the variable range checker bus,\n/// tightening the currently known range constraints.\npub fn handle_variable_range_checker<T: FieldElement>(\n    payload: &[RangeConstraint<T>],\n) -> Vec<RangeConstraint<T>> {\n    // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/var_range/bus.rs\n    // Expects (x, bits), where `x` is in the range [0, 2^bits - 1]\n    let [_x, bits] = payload else {\n        panic!(\"Expected arguments (x, bits)\");\n    };\n    match bits.try_to_single_value() {\n        Some(bits_value) if bits_value.to_degree() <= MAX_BITS => {\n            let bits_value = bits_value.to_integer().try_into_u64().unwrap();\n            let mask = (1u64 << bits_value) - 1;\n            vec![RangeConstraint::from_mask(mask), *bits]\n        }\n        _ => {\n            vec![\n                RangeConstraint::from_mask((1u64 << MAX_BITS) - 1),\n                RangeConstraint::from_range(T::from(0), T::from(MAX_BITS)),\n            ]\n        }\n    }\n}\n\npub fn variable_range_checker_pure_range_constraints<T: FieldElement, V: Ord + Clone + Eq>(\n    payload: &[GroupedExpression<T, V>],\n) -> Option<RangeConstraints<T, V>> {\n    // See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/var_range/bus.rs\n    // Expects (x, bits), where `x` is in the range [0, 2^bits - 1]\n    let [x, bits] = payload else {\n        panic!(\"Expected arguments (x, bits)\");\n    };\n    bits.try_to_number().map(|bits| {\n        [(\n            x.clone(),\n            RangeConstraint::from_mask((1u64 << bits.to_degree()) - 1),\n        )]\n        .into()\n    })\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        bus_map::DEFAULT_VARIABLE_RANGE_CHECKER,\n        test_utils::{mask, range, value},\n        OpenVmBusInteractionHandler,\n    };\n\n    use super::*;\n    use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};\n    use powdr_number::BabyBearField;\n\n    fn run(\n        x: RangeConstraint<BabyBearField>,\n        bits: RangeConstraint<BabyBearField>,\n    ) -> Vec<RangeConstraint<BabyBearField>> {\n        let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();\n\n        let bus_interaction = BusInteraction {\n            bus_id: RangeConstraint::from_value(DEFAULT_VARIABLE_RANGE_CHECKER.into()),\n            multiplicity: value(1),\n            payload: vec![x, bits],\n        };\n        let result = handler.handle_bus_interaction(bus_interaction);\n        result.payload\n    }\n\n    #[test]\n    fn test_unknown_bits() {\n        let x = Default::default();\n        let bits = Default::default();\n        let result = run(x, bits);\n        assert_eq!(result.len(), 2);\n        assert_eq!(\n            result[0],\n            RangeConstraint::from_mask((1u64 << MAX_BITS) - 1)\n        );\n        assert_eq!(result[1], range(0, MAX_BITS));\n    }\n\n    #[test]\n    fn test_known_bits() {\n        let x = Default::default();\n        let bits = value(12);\n        let result = run(x, bits);\n        assert_eq!(result.len(), 2);\n        assert_eq!(result[0], mask(0xfff));\n        assert_eq!(result[1], value(12));\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/.gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\ndebug/\ntarget/\n\n# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries\n# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html\n#Cargo.lock\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\n# MSVC Windows builds of rustc generate these, which store debugging information\n*.pdb\n\n# RustRover\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n\nguest/openvm\nguest-keccak/target"
  },
  {
    "path": "openvm-riscv/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm-riscv\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[features]\ndefault = []\naot = [\"powdr-openvm/aot\", \"powdr-openvm-riscv-hints-circuit/aot\"]\ntco = [\"powdr-openvm/tco\", \"powdr-openvm-riscv-hints-circuit/tco\"]\nmetrics = [\"powdr-openvm/metrics\"]\ncuda = [\"powdr-openvm/cuda\"]\n\n[dependencies]\nopenvm.workspace = true\nopenvm-build.workspace = true\nopenvm-rv32im-circuit.workspace = true\nopenvm-rv32im-transpiler.workspace = true\nopenvm-rv32im-guest.workspace = true\nopenvm-transpiler.workspace = true\nopenvm-circuit.workspace = true\nopenvm-circuit-derive.workspace = true\nopenvm-circuit-primitives.workspace = true\nopenvm-circuit-primitives-derive.workspace = true\nopenvm-instructions.workspace = true\nopenvm-instructions-derive.workspace = true\nopenvm-sdk.workspace = true\nopenvm-ecc-circuit.workspace = true\nopenvm-ecc-transpiler.workspace = true\nopenvm-keccak256-circuit.workspace = true\nopenvm-keccak256-transpiler.workspace = true\nopenvm-sha256-circuit.workspace = true\nopenvm-sha256-transpiler.workspace = true\nopenvm-algebra-circuit.workspace = true\nopenvm-algebra-transpiler.workspace = true\nopenvm-bigint-circuit.workspace = true\nopenvm-bigint-transpiler.workspace = true\nopenvm-pairing-circuit.workspace = true\nopenvm-pairing-transpiler.workspace = true\nopenvm-native-circuit.workspace = true\nopenvm-native-recursion.workspace = true\n\nopenvm-stark-sdk.workspace = true\nopenvm-stark-backend.workspace = true\n\npowdr-expression.workspace = true\npowdr-number.workspace = true\npowdr-riscv-elf.workspace = true\npowdr-autoprecompiles.workspace = true\npowdr-constraint-solver.workspace = true\npowdr-openvm-bus-interaction-handler.workspace = true\npowdr-openvm.workspace = true\n\npowdr-openvm-riscv-hints-transpiler.workspace = true\npowdr-openvm-riscv-hints-circuit.workspace = true\n\neyre.workspace = true\nserde.workspace = true\nderive_more.workspace = true\nitertools.workspace = true\n\ntracing.workspace = true\ntracing-subscriber = { version = \"0.3.17\", features = [\"std\", \"env-filter\"] }\n\nclap = { version = \"^4.3\", features = [\"derive\"] }\n\nlog.workspace = true\nstruct-reflection = { git = \"https://github.com/gzanitti/struct-reflection-rs.git\" }\n\nmetrics.workspace = true\ntoml = \"0.8.14\"\n\nrustc-demangle = \"0.1.25\"\n\ncfg-if = \"1.0.0\"\n\n[dev-dependencies]\npowdr-openvm = { workspace = true, features = [\"test-utils\"] }\ntest-log.workspace = true\ntempfile = \"3.20.0\"\npretty_assertions.workspace = true\nopenvm-ecc-circuit.workspace = true\nopenvm-algebra-circuit.workspace = true\nopenvm-bigint-circuit.workspace = true\nopenvm-pairing-circuit.workspace = true\nopenvm-pairing-transpiler.workspace = true\nexpect-test = \"1.5.1\"\ntracing-log = \"0.2.0\"\n\n[build-dependencies]\nopenvm-cuda-builder = { workspace = true, optional = true }\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-circuit/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm-riscv-hints-circuit\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[features]\ndefault = []\naot = [\"openvm-circuit/aot\", \"openvm-rv32im-circuit/aot\"]\ntco = [\"openvm-circuit/tco\", \"openvm-rv32im-circuit/tco\"]\n\n[dependencies]\nopenvm-circuit = { workspace = true }\nopenvm-instructions = { workspace = true }\nopenvm-rv32im-circuit = { workspace = true }\nopenvm-stark-backend = { workspace = true }\nopenvm-stark-sdk = { workspace = true }\npowdr-openvm-riscv-hints-transpiler = { workspace = true }\neyre.workspace = true\ncrypto-bigint = \"0.6.1\"\nelliptic-curve = \"0.13.8\"\nrand = { version = \"0.8.5\", default-features = false }\nserde.workspace = true\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-circuit/src/executors.rs",
    "content": "use openvm_circuit::arch::{PhantomSubExecutor, Streams};\nuse openvm_circuit::system::memory::online::GuestMemory;\nuse openvm_instructions::riscv::RV32_MEMORY_AS;\nuse openvm_instructions::PhantomDiscriminant;\nuse openvm_rv32im_circuit::adapters::read_rv32_register;\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse rand::rngs::StdRng;\n\nuse crate::field10x26_k256;\n\n/// Example hint implementation.\n/// Takes a single u32 as input and sets the hint to be the bytes of the u32 in reverse order.\npub struct ReverseBytesSubEx;\n\nimpl<F: PrimeField32> PhantomSubExecutor<F> for ReverseBytesSubEx {\n    fn phantom_execute(\n        &self,\n        memory: &GuestMemory,\n        streams: &mut Streams<F>,\n        _: &mut StdRng,\n        _: PhantomDiscriminant,\n        a: u32,\n        _: u32,\n        c_upper: u16,\n    ) -> eyre::Result<()> {\n        assert_eq!(c_upper, 0);\n        // read register\n        let rs1 = read_rv32_register(memory, a);\n        // read memory\n        let bytes = unsafe { memory.read::<u8, 4>(RV32_MEMORY_AS, rs1) };\n        // write hint as bytes in reverse\n        let hint_bytes = bytes\n            .into_iter()\n            .rev()\n            .map(|b| F::from_canonical_u8(b))\n            .collect();\n        streams.hint_stream = hint_bytes;\n        Ok(())\n    }\n}\n\n/// Takes as input a pointer to 32 bytes, the SEC1 encoding (i.e., big-endian) of a k256 coordinate field element.\n/// Sets the hint to be the inverse of the field element in the same encoding (if not zero).\n/// Sets the hint to zero when the input is zero.\npub struct K256InverseFieldSubEx;\n\nuse crypto_bigint::const_monty_form;\nuse crypto_bigint::impl_modulus;\nuse crypto_bigint::modular::ConstMontyParams;\nuse crypto_bigint::Encoding;\nuse crypto_bigint::Zero;\nuse crypto_bigint::U256;\nimpl_modulus!(\n    K256Mod,\n    U256,\n    \"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F\"\n);\n\nimpl<F: PrimeField32> PhantomSubExecutor<F> for K256InverseFieldSubEx {\n    fn phantom_execute(\n        &self,\n        memory: &GuestMemory,\n        streams: &mut Streams<F>,\n        _: &mut StdRng,\n        _: PhantomDiscriminant,\n        a: u32,\n        _: u32,\n        c_upper: u16,\n    ) -> eyre::Result<()> {\n        assert_eq!(c_upper, 0);\n        // read register\n        let rs1 = read_rv32_register(memory, a);\n        // read the field element\n        let bytes: [u8; 32] = unsafe { memory.read::<u8, 32>(RV32_MEMORY_AS, rs1) };\n\n        let n = U256::from_be_bytes(bytes);\n\n        // perform the inverse.\n        let n_mod = const_monty_form!(n, K256Mod);\n        let n_inv = if !(bool::from(n_mod.is_zero())) {\n            n_mod.inv().unwrap().retrieve()\n        } else {\n            U256::ZERO\n        };\n        let inv_bytes = n_inv\n            .to_be_bytes()\n            .into_iter()\n            .map(|b| F::from_canonical_u8(b))\n            .collect();\n        streams.hint_stream = inv_bytes;\n\n        Ok(())\n    }\n}\n\n/// Size in bytes of the k256 field element in 10x26 representation.\nconst FIELD10X26_BYTES: usize = 40; // [u32;10]\n\n/// Takes as input a pointer to the inner representation of a k256 coordinate field element (in 32-bit architectures).\n/// Sets the hint to be the inverse of the input (if not zero), in the same representation.\n/// If the input is zero (normalized or not), the hint is also set, but undefined.\npub struct K256InverseField10x26SubEx;\n\nimpl<F: PrimeField32> PhantomSubExecutor<F> for K256InverseField10x26SubEx {\n    fn phantom_execute(\n        &self,\n        memory: &GuestMemory,\n        streams: &mut Streams<F>,\n        _: &mut StdRng,\n        _: PhantomDiscriminant,\n        a: u32,\n        _: u32,\n        c_upper: u16,\n    ) -> eyre::Result<()> {\n        assert_eq!(c_upper, 0);\n        // read register\n        let rs1 = read_rv32_register(memory, a);\n        // read the k256 field_10x26 as raw bytes\n        let bytes: [u8; FIELD10X26_BYTES] =\n            unsafe { memory.read::<u8, { FIELD10X26_BYTES }>(RV32_MEMORY_AS, rs1) };\n\n        // we just reinterpret the bytes as a k256 field element. We don't use mem::transmute to avoid alignment issues\n        let mut elem = [0u32; 10];\n        unsafe {\n            std::ptr::copy_nonoverlapping(\n                bytes.as_ptr(),\n                elem.as_mut_ptr() as *mut u8,\n                FIELD10X26_BYTES,\n            );\n        }\n        let elem = field10x26_k256::FieldElement10x26(elem);\n        let inv = elem.invert().normalize();\n        // okay to transmute in the opposite direction\n        let inv_bytes: [u8; FIELD10X26_BYTES] = unsafe { std::mem::transmute(inv.0) };\n        streams.hint_stream = inv_bytes\n            .into_iter()\n            .map(|b| F::from_canonical_u8(b))\n            .collect();\n\n        Ok(())\n    }\n}\n\n/// Pre-defined non-quadratic residue for k256.\n/// The same value should be used by the guest to check the non-square case.\nconst K256_NON_QUADRATIC_RESIDUE: field10x26_k256::FieldElement10x26 =\n    field10x26_k256::FieldElement10x26([3, 0, 0, 0, 0, 0, 0, 0, 0, 0]);\n\n/// Takes as input a pointer to the inner representation of a k256 coordinate field element (in 32-bit architectures).\n/// If the number is square, sets the hint an u32 of value one, followed by a square root in the same inner representation.\n/// If the number is not square, sets the hint to an u32 of value zero.\npub struct K256SqrtField10x26SubEx;\n\nimpl<F: PrimeField32> PhantomSubExecutor<F> for K256SqrtField10x26SubEx {\n    fn phantom_execute(\n        &self,\n        memory: &GuestMemory,\n        streams: &mut Streams<F>,\n        _: &mut StdRng,\n        _: PhantomDiscriminant,\n        a: u32,\n        _: u32,\n        c_upper: u16,\n    ) -> eyre::Result<()> {\n        assert_eq!(c_upper, 0);\n        // read register\n        let rs1 = read_rv32_register(memory, a);\n        // read the k256 field_10x26 as raw bytes\n        let bytes: [u8; FIELD10X26_BYTES] =\n            unsafe { memory.read::<u8, { FIELD10X26_BYTES }>(RV32_MEMORY_AS, rs1) };\n\n        // we just reinterpret the bytes as a k256 field element. Can't use mem::transmute due to alighment requirements\n        let mut elem = [0u32; 10];\n        unsafe {\n            std::ptr::copy_nonoverlapping(\n                bytes.as_ptr(),\n                elem.as_mut_ptr() as *mut u8,\n                FIELD10X26_BYTES,\n            );\n        }\n        let elem = field10x26_k256::FieldElement10x26(elem);\n        let res = elem.sqrt();\n        if res.is_some().into() {\n            // return 1 followed by the result\n            let bytes: [u8; FIELD10X26_BYTES] = unsafe {\n                // safe to transmute into u8 array\n                std::mem::transmute(res.unwrap().0)\n            };\n            streams.hint_stream = 1u32\n                .to_le_bytes() // indicates that a square root exists\n                .into_iter()\n                .chain(bytes)\n                .map(|b| F::from_canonical_u8(b))\n                .collect();\n        } else {\n            // Number is not square.\n            // Find the square root of the number times the predefined non-quadratic residue\n            let res = (elem.mul(&K256_NON_QUADRATIC_RESIDUE)).sqrt().unwrap();\n            let bytes: [u8; FIELD10X26_BYTES] = unsafe {\n                // safe to transmute into u8 array\n                std::mem::transmute(res.0)\n            };\n            streams.hint_stream = 0u32\n                .to_le_bytes() // indicate number is not square\n                .into_iter()\n                .chain(bytes)\n                .map(|b| F::from_canonical_u8(b))\n                .collect();\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-circuit/src/field10x26_k256.rs",
    "content": "//! The code here has been mostly copied from the `k256` crate.\n//! Its the 32-bit implementation of the field element.\n\nuse elliptic_curve::consts::U32;\nuse elliptic_curve::{\n    subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption},\n    zeroize::Zeroize,\n    FieldBytesEncoding,\n};\n// use crypto_bigint::U256;\nuse elliptic_curve::bigint::ArrayEncoding;\nuse elliptic_curve::bigint::U256;\n\npub type FieldBytes = elliptic_curve::FieldBytes<Secp256k1>;\n\n/// Order of the secp256k1 elliptic curve in hexadecimal.\nconst ORDER_HEX: &str = \"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141\";\n\n/// Order of the secp256k1 elliptic curve.\nconst ORDER: U256 = U256::from_be_hex(ORDER_HEX);\n\n#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)]\npub struct Secp256k1;\n\nimpl elliptic_curve::Curve for Secp256k1 {\n    /// 32-byte serialized field elements.\n    type FieldBytesSize = U32;\n\n    /// 256-bit field modulus.\n    type Uint = U256;\n\n    /// Curve order.\n    const ORDER: U256 = ORDER;\n}\n\nimpl FieldBytesEncoding<Secp256k1> for U256 {\n    fn decode_field_bytes(field_bytes: &FieldBytes) -> Self {\n        U256::from_be_byte_array(*field_bytes)\n    }\n\n    fn encode_field_bytes(&self) -> FieldBytes {\n        self.to_be_byte_array()\n    }\n}\n\nimpl elliptic_curve::PrimeCurve for Secp256k1 {}\n\n// -----------------------------------------------------------------------------------------------------\n\n/// Scalars modulo SECP256k1 modulus (2^256 - 2^32 - 2^9 - 2^8 - 2^7 - 2^6 - 2^4 - 1).\n/// Uses 10 32-bit limbs (little-endian), where in the normalized form\n/// first 9 contain 26 bits of the value each, and the last one contains 22 bits.\n/// CurveArithmetic operations can be done without modulo reduction for some time,\n/// using the remaining overflow bits.\n#[derive(Clone, Copy, Debug)]\npub struct FieldElement10x26(pub(crate) [u32; 10]);\n\n// TODO: maybe instead clean this file up and only keep code that is used?\n#[allow(unused)]\nimpl FieldElement10x26 {\n    /// Zero element.\n    pub const ZERO: Self = Self([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);\n\n    /// Multiplicative identity.\n    pub const ONE: Self = Self([1, 0, 0, 0, 0, 0, 0, 0, 0, 0]);\n\n    /// Attempts to parse the given byte array as an SEC1-encoded field element.\n    /// Does not check the result for being in the correct range.\n    pub(crate) const fn from_bytes_unchecked(bytes: &[u8; 32]) -> Self {\n        let w0 = (bytes[31] as u32)\n            | ((bytes[30] as u32) << 8)\n            | ((bytes[29] as u32) << 16)\n            | (((bytes[28] & 0x3) as u32) << 24);\n        let w1 = (((bytes[28] >> 2) as u32) & 0x3f)\n            | ((bytes[27] as u32) << 6)\n            | ((bytes[26] as u32) << 14)\n            | (((bytes[25] & 0xf) as u32) << 22);\n        let w2 = (((bytes[25] >> 4) as u32) & 0xf)\n            | ((bytes[24] as u32) << 4)\n            | ((bytes[23] as u32) << 12)\n            | (((bytes[22] & 0x3f) as u32) << 20);\n        let w3 = (((bytes[22] >> 6) as u32) & 0x3)\n            | ((bytes[21] as u32) << 2)\n            | ((bytes[20] as u32) << 10)\n            | ((bytes[19] as u32) << 18);\n        let w4 = (bytes[18] as u32)\n            | ((bytes[17] as u32) << 8)\n            | ((bytes[16] as u32) << 16)\n            | (((bytes[15] & 0x3) as u32) << 24);\n        let w5 = (((bytes[15] >> 2) as u32) & 0x3f)\n            | ((bytes[14] as u32) << 6)\n            | ((bytes[13] as u32) << 14)\n            | (((bytes[12] & 0xf) as u32) << 22);\n        let w6 = (((bytes[12] >> 4) as u32) & 0xf)\n            | ((bytes[11] as u32) << 4)\n            | ((bytes[10] as u32) << 12)\n            | (((bytes[9] & 0x3f) as u32) << 20);\n        let w7 = (((bytes[9] >> 6) as u32) & 0x3)\n            | ((bytes[8] as u32) << 2)\n            | ((bytes[7] as u32) << 10)\n            | ((bytes[6] as u32) << 18);\n        let w8 = (bytes[5] as u32)\n            | ((bytes[4] as u32) << 8)\n            | ((bytes[3] as u32) << 16)\n            | (((bytes[2] & 0x3) as u32) << 24);\n        let w9 = (((bytes[2] >> 2) as u32) & 0x3f)\n            | ((bytes[1] as u32) << 6)\n            | ((bytes[0] as u32) << 14);\n\n        Self([w0, w1, w2, w3, w4, w5, w6, w7, w8, w9])\n    }\n\n    /// Attempts to parse the given byte array as an SEC1-encoded field element.\n    ///\n    /// Returns None if the byte array does not contain a big-endian integer in the range\n    /// [0, p).\n    pub fn from_bytes(bytes: &FieldBytes) -> CtOption<Self> {\n        let res = Self::from_bytes_unchecked(bytes.as_ref());\n        let overflow = res.get_overflow();\n\n        CtOption::new(res, !overflow)\n    }\n\n    pub const fn from_u64(val: u64) -> Self {\n        let w0 = (val as u32) & 0x3FFFFFF;\n        let val = val >> 26;\n        let w1 = (val as u32) & 0x3FFFFFF;\n        let w2 = (val >> 26) as u32;\n        Self([w0, w1, w2, 0, 0, 0, 0, 0, 0, 0])\n    }\n\n    /// Returns the SEC1 encoding of this field element.\n    pub fn to_bytes(self) -> FieldBytes {\n        let mut r = FieldBytes::default();\n        r[0] = (self.0[9] >> 14) as u8;\n        r[1] = (self.0[9] >> 6) as u8;\n        r[2] = ((self.0[9] as u8 & 0x3Fu8) << 2) | ((self.0[8] >> 24) as u8 & 0x3);\n        r[3] = (self.0[8] >> 16) as u8;\n        r[4] = (self.0[8] >> 8) as u8;\n        r[5] = self.0[8] as u8;\n        r[6] = (self.0[7] >> 18) as u8;\n        r[7] = (self.0[7] >> 10) as u8;\n        r[8] = (self.0[7] >> 2) as u8;\n        r[9] = ((self.0[7] as u8 & 0x3u8) << 6) | ((self.0[6] >> 20) as u8 & 0x3fu8);\n        r[10] = (self.0[6] >> 12) as u8;\n        r[11] = (self.0[6] >> 4) as u8;\n        r[12] = ((self.0[6] as u8 & 0xfu8) << 4) | ((self.0[5] >> 22) as u8 & 0xfu8);\n        r[13] = (self.0[5] >> 14) as u8;\n        r[14] = (self.0[5] >> 6) as u8;\n        r[15] = ((self.0[5] as u8 & 0x3fu8) << 2) | ((self.0[4] >> 24) as u8 & 0x3u8);\n        r[16] = (self.0[4] >> 16) as u8;\n        r[17] = (self.0[4] >> 8) as u8;\n        r[18] = self.0[4] as u8;\n        r[19] = (self.0[3] >> 18) as u8;\n        r[20] = (self.0[3] >> 10) as u8;\n        r[21] = (self.0[3] >> 2) as u8;\n        r[22] = ((self.0[3] as u8 & 0x3u8) << 6) | ((self.0[2] >> 20) as u8 & 0x3fu8);\n        r[23] = (self.0[2] >> 12) as u8;\n        r[24] = (self.0[2] >> 4) as u8;\n        r[25] = ((self.0[2] as u8 & 0xfu8) << 4) | ((self.0[1] >> 22) as u8 & 0xfu8);\n        r[26] = (self.0[1] >> 14) as u8;\n        r[27] = (self.0[1] >> 6) as u8;\n        r[28] = ((self.0[1] as u8 & 0x3fu8) << 2) | ((self.0[0] >> 24) as u8 & 0x3u8);\n        r[29] = (self.0[0] >> 16) as u8;\n        r[30] = (self.0[0] >> 8) as u8;\n        r[31] = self.0[0] as u8;\n        r\n    }\n\n    /// Adds `x * (2^256 - modulus)`.\n    fn add_modulus_correction(&self, x: u32) -> Self {\n        // add (2^256 - modulus) * x to the first limb\n        let t0 = self.0[0] + x * 0x3D1u32;\n\n        // Propagate excess bits up the limbs\n        let t1 = self.0[1] + (x << 6); // add `x` times the high bit of correction (2^32)\n        let t1 = t1 + (t0 >> 26);\n        let t0 = t0 & 0x3FFFFFFu32;\n\n        let t2 = self.0[2] + (t1 >> 26);\n        let t1 = t1 & 0x3FFFFFFu32;\n\n        let t3 = self.0[3] + (t2 >> 26);\n        let t2 = t2 & 0x3FFFFFFu32;\n\n        let t4 = self.0[4] + (t3 >> 26);\n        let t3 = t3 & 0x3FFFFFFu32;\n\n        let t5 = self.0[5] + (t4 >> 26);\n        let t4 = t4 & 0x3FFFFFFu32;\n\n        let t6 = self.0[6] + (t5 >> 26);\n        let t5 = t5 & 0x3FFFFFFu32;\n\n        let t7 = self.0[7] + (t6 >> 26);\n        let t6 = t6 & 0x3FFFFFFu32;\n\n        let t8 = self.0[8] + (t7 >> 26);\n        let t7 = t7 & 0x3FFFFFFu32;\n\n        let t9 = self.0[9] + (t8 >> 26);\n        let t8 = t8 & 0x3FFFFFFu32;\n\n        Self([t0, t1, t2, t3, t4, t5, t6, t7, t8, t9])\n    }\n\n    /// Subtracts the overflow in the last limb and return it with the new field element.\n    /// Equivalent to subtracting a multiple of 2^256.\n    fn subtract_modulus_approximation(&self) -> (Self, u32) {\n        let x = self.0[9] >> 22;\n        let t9 = self.0[9] & 0x03FFFFFu32; // equivalent to self -= 2^256 * x\n        (\n            Self([\n                self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6],\n                self.0[7], self.0[8], t9,\n            ]),\n            x,\n        )\n    }\n\n    /// Checks if the field element is greater or equal to the modulus.\n    fn get_overflow(&self) -> Choice {\n        let m = self.0[2] & self.0[3] & self.0[4] & self.0[5] & self.0[6] & self.0[7] & self.0[8];\n        let x = (self.0[9] >> 22 != 0)\n            | ((self.0[9] == 0x3FFFFFu32)\n                & (m == 0x3FFFFFFu32)\n                & ((self.0[1] + 0x40u32 + ((self.0[0] + 0x3D1u32) >> 26)) > 0x3FFFFFFu32));\n        Choice::from(x as u8)\n    }\n\n    /// Brings the field element's magnitude to 1, but does not necessarily normalize it.\n    pub fn normalize_weak(&self) -> Self {\n        // Reduce t9 at the start so there will be at most a single carry from the first pass\n        let (t, x) = self.subtract_modulus_approximation();\n\n        // The first pass ensures the magnitude is 1, ...\n        let res = t.add_modulus_correction(x);\n\n        // ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element)\n        debug_assert!(res.0[9] >> 23 == 0);\n\n        res\n    }\n\n    /// Fully normalizes the field element.\n    /// That is, first nine limbs are at most 26 bit large, the last limb is at most 22 bit large,\n    /// and the value is less than the modulus.\n    pub fn normalize(&self) -> Self {\n        let res = self.normalize_weak();\n\n        // At most a single final reduction is needed;\n        // check if the value is >= the field characteristic\n        let overflow = res.get_overflow();\n\n        // Apply the final reduction (for constant-time behaviour, we do it always)\n        let res_corrected = res.add_modulus_correction(1u32);\n        // Mask off the possible multiple of 2^256 from the final reduction\n        let (res_corrected, x) = res_corrected.subtract_modulus_approximation();\n\n        // If the last limb didn't carry to bit 23 already,\n        // then it should have after any final reduction\n        debug_assert!(x == (overflow.unwrap_u8() as u32));\n\n        Self::conditional_select(&res, &res_corrected, overflow)\n    }\n\n    /// Checks if the field element becomes zero if normalized.\n    pub fn normalizes_to_zero(&self) -> Choice {\n        let res = self.normalize_weak();\n\n        let t0 = res.0[0];\n        let t1 = res.0[1];\n        let t2 = res.0[2];\n        let t3 = res.0[3];\n        let t4 = res.0[4];\n        let t5 = res.0[5];\n        let t6 = res.0[6];\n        let t7 = res.0[7];\n        let t8 = res.0[8];\n        let t9 = res.0[9];\n\n        // z0 tracks a possible raw value of 0, z1 tracks a possible raw value of the modulus\n        let z0 = t0 | t1 | t2 | t3 | t4 | t5 | t6 | t7 | t8 | t9;\n        let z1 = (t0 ^ 0x3D0u32)\n            & (t1 ^ 0x40u32)\n            & t2\n            & t3\n            & t4\n            & t5\n            & t6\n            & t7\n            & t8\n            & (t9 ^ 0x3C00000u32);\n\n        Choice::from(((z0 == 0) | (z1 == 0x3FFFFFFu32)) as u8)\n    }\n\n    /// Determine if this `FieldElement10x26` is zero.\n    ///\n    /// # Returns\n    ///\n    /// If zero, return `Choice(1)`.  Otherwise, return `Choice(0)`.\n    pub fn is_zero(&self) -> Choice {\n        Choice::from(\n            ((self.0[0]\n                | self.0[1]\n                | self.0[2]\n                | self.0[3]\n                | self.0[4]\n                | self.0[5]\n                | self.0[6]\n                | self.0[7]\n                | self.0[8]\n                | self.0[9])\n                == 0) as u8,\n        )\n    }\n\n    /// Determine if this `FieldElement10x26` is odd in the SEC1 sense: `self mod 2 == 1`.\n    ///\n    /// # Returns\n    ///\n    /// If odd, return `Choice(1)`.  Otherwise, return `Choice(0)`.\n    pub fn is_odd(&self) -> Choice {\n        (self.0[0] as u8 & 1).into()\n    }\n\n    // The maximum number `m` for which `0x3FFFFFF * 2 * (m + 1) < 2^32`\n    pub const fn max_magnitude() -> u32 {\n        31u32\n    }\n\n    /// Returns -self, treating it as a value of given magnitude.\n    /// The provided magnitude must be equal or greater than the actual magnitude of `self`.\n    pub const fn negate(&self, magnitude: u32) -> Self {\n        let m: u32 = magnitude + 1;\n        let r0 = 0x3FFFC2Fu32 * 2 * m - self.0[0];\n        let r1 = 0x3FFFFBFu32 * 2 * m - self.0[1];\n        let r2 = 0x3FFFFFFu32 * 2 * m - self.0[2];\n        let r3 = 0x3FFFFFFu32 * 2 * m - self.0[3];\n        let r4 = 0x3FFFFFFu32 * 2 * m - self.0[4];\n        let r5 = 0x3FFFFFFu32 * 2 * m - self.0[5];\n        let r6 = 0x3FFFFFFu32 * 2 * m - self.0[6];\n        let r7 = 0x3FFFFFFu32 * 2 * m - self.0[7];\n        let r8 = 0x3FFFFFFu32 * 2 * m - self.0[8];\n        let r9 = 0x03FFFFFu32 * 2 * m - self.0[9];\n        Self([r0, r1, r2, r3, r4, r5, r6, r7, r8, r9])\n    }\n\n    /// Returns self + rhs mod p.\n    /// Sums the magnitudes.\n    pub const fn add(&self, rhs: &Self) -> Self {\n        Self([\n            self.0[0] + rhs.0[0],\n            self.0[1] + rhs.0[1],\n            self.0[2] + rhs.0[2],\n            self.0[3] + rhs.0[3],\n            self.0[4] + rhs.0[4],\n            self.0[5] + rhs.0[5],\n            self.0[6] + rhs.0[6],\n            self.0[7] + rhs.0[7],\n            self.0[8] + rhs.0[8],\n            self.0[9] + rhs.0[9],\n        ])\n    }\n\n    /// Multiplies by a single-limb integer.\n    /// Multiplies the magnitude by the same value.\n    pub const fn mul_single(&self, rhs: u32) -> Self {\n        Self([\n            self.0[0] * rhs,\n            self.0[1] * rhs,\n            self.0[2] * rhs,\n            self.0[3] * rhs,\n            self.0[4] * rhs,\n            self.0[5] * rhs,\n            self.0[6] * rhs,\n            self.0[7] * rhs,\n            self.0[8] * rhs,\n            self.0[9] * rhs,\n        ])\n    }\n\n    #[inline(always)]\n    fn mul_inner(&self, rhs: &Self) -> Self {\n        /*\n        `square()` is just `mul()` with equal arguments. Rust compiler is smart enough\n        to do all the necessary optimizations for this case, but it needs to have this information\n        inside a function. If a function is just *called* with the same arguments,\n        this information cannot be used, so the function must be inlined while using the same arguments.\n\n        Now `mul()` is quite long and therefore expensive to inline. So we have an inner (inlined)\n        function, that is used inside `mul()` and `square()`, and when it is used with the same\n        arguments in `square()`, compiler is able to use that fact after inlining.\n        */\n\n        let m = 0x3FFFFFFu64;\n        let rr0 = 0x3D10u64;\n        let rr1 = 0x400u64;\n\n        let a0 = self.0[0] as u64;\n        let a1 = self.0[1] as u64;\n        let a2 = self.0[2] as u64;\n        let a3 = self.0[3] as u64;\n        let a4 = self.0[4] as u64;\n        let a5 = self.0[5] as u64;\n        let a6 = self.0[6] as u64;\n        let a7 = self.0[7] as u64;\n        let a8 = self.0[8] as u64;\n        let a9 = self.0[9] as u64;\n\n        let b0 = rhs.0[0] as u64;\n        let b1 = rhs.0[1] as u64;\n        let b2 = rhs.0[2] as u64;\n        let b3 = rhs.0[3] as u64;\n        let b4 = rhs.0[4] as u64;\n        let b5 = rhs.0[5] as u64;\n        let b6 = rhs.0[6] as u64;\n        let b7 = rhs.0[7] as u64;\n        let b8 = rhs.0[8] as u64;\n        let b9 = rhs.0[9] as u64;\n\n        // [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.\n        // for 0 <= x <= 9, px is a shorthand for sum(a[i]*b[x-i], i=0..x).\n        // for 9 <= x <= 18, px is a shorthand for sum(a[i]*b[x-i], i=(x-9)..9)\n        // Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*rr1 x*rr0].\n\n        let mut c: u64;\n        let mut d: u64;\n\n        d = a0 * b9\n            + a1 * b8\n            + a2 * b7\n            + a3 * b6\n            + a4 * b5\n            + a5 * b4\n            + a6 * b3\n            + a7 * b2\n            + a8 * b1\n            + a9 * b0;\n        // [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0]\n        let t9 = (d & m) as u32;\n        d >>= 26;\n        debug_assert!(t9 >> 26 == 0);\n        debug_assert!(d >> 38 == 0);\n        // [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0]\n\n        c = a0 * b0;\n        debug_assert!(c >> 60 == 0);\n        // [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0]\n        d +=\n            a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 + a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1;\n        debug_assert!(d >> 63 == 0);\n        // [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0]\n        let u0 = (d & m) as u32;\n        d >>= 26;\n        c += u0 as u64 * rr0;\n        debug_assert!(u0 >> 26 == 0);\n        debug_assert!(d >> 37 == 0);\n        debug_assert!(c >> 61 == 0);\n        // [d u0 t9 0 0 0 0 0 0 0 0 c-u0*rr0] = [p10 p9 0 0 0 0 0 0 0 0 p0]\n        let t0 = (c & m) as u32;\n        c >>= 26;\n        c += u0 as u64 * rr1;\n        debug_assert!(t0 >> 26 == 0);\n        debug_assert!(c >> 37 == 0);\n        // [d u0 t9 0 0 0 0 0 0 0 c-u0*rr1 t0-u0*rr0] = [p10 p9 0 0 0 0 0 0 0 0 p0]\n        // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0]\n\n        c += a0 * b1 + a1 * b0;\n        debug_assert!(c >> 62 == 0);\n        // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0]\n        d += a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 + a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2;\n        debug_assert!(d >> 63 == 0);\n        // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0]\n        let u1 = (d & m) as u32;\n        d >>= 26;\n        c += u1 as u64 * rr0;\n        debug_assert!(u1 >> 26 == 0);\n        debug_assert!(d >> 37 == 0);\n        debug_assert!(c >> 63 == 0);\n        // [d u1 0 t9 0 0 0 0 0 0 0 c-u1*rr0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0]\n        let t1 = (c & m) as u32;\n        c >>= 26;\n        c += u1 as u64 * rr1;\n        debug_assert!(t1 >> 26 == 0);\n        debug_assert!(c >> 38 == 0);\n        // [d u1 0 t9 0 0 0 0 0 0 c-u1*rr1 t1-u1*rr0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0]\n        // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0]\n\n        c += a0 * b2 + a1 * b1 + a2 * b0;\n        debug_assert!(c >> 62 == 0);\n        // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0]\n        d += a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 + a8 * b4 + a9 * b3;\n        debug_assert!(d >> 63 == 0);\n        // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0]\n        let u2 = (d & m) as u32;\n        d >>= 26;\n        c += u2 as u64 * rr0;\n        debug_assert!(u2 >> 26 == 0);\n        debug_assert!(d >> 37 == 0);\n        debug_assert!(c >> 63 == 0);\n        // [d u2 0 0 t9 0 0 0 0 0 0 c-u2*rr0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0]\n        let t2 = (c & m) as u32;\n        c >>= 26;\n        c += u2 as u64 * rr1;\n        debug_assert!(t2 >> 26 == 0);\n        debug_assert!(c >> 38 == 0);\n        // [d u2 0 0 t9 0 0 0 0 0 c-u2*rr1 t2-u2*rr0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0]\n        // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0]\n\n        c += a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0;\n        debug_assert!(c >> 63 == 0);\n        // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0]\n        d += a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 + a9 * b4;\n        debug_assert!(d >> 63 == 0);\n        // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0]\n        let u3 = (d & m) as u32;\n        d >>= 26;\n        c += u3 as u64 * rr0;\n        debug_assert!(u3 >> 26 == 0);\n        debug_assert!(d >> 37 == 0);\n        // [d u3 0 0 0 t9 0 0 0 0 0 c-u3*rr0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0]\n        let t3 = (c & m) as u32;\n        c >>= 26;\n        c += u3 as u64 * rr1;\n        debug_assert!(t3 >> 26 == 0);\n        debug_assert!(c >> 39 == 0);\n        // [d u3 0 0 0 t9 0 0 0 0 c-u3*rr1 t3-u3*rr0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0]\n        // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0]\n\n        c += a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0;\n        debug_assert!(c >> 63 == 0);\n        // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0]\n        d += a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5;\n        debug_assert!(d >> 62 == 0);\n        // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0]\n        let u4 = (d & m) as u32;\n        d >>= 26;\n        c += u4 as u64 * rr0;\n        debug_assert!(u4 >> 26 == 0);\n        debug_assert!(d >> 36 == 0);\n        // [d u4 0 0 0 0 t9 0 0 0 0 c-u4*rr0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0]\n        let t4 = (c & m) as u32;\n        c >>= 26;\n        c += u4 as u64 * rr1;\n        debug_assert!(t4 >> 26 == 0);\n        debug_assert!(c >> 39 == 0);\n        // [d u4 0 0 0 0 t9 0 0 0 c-u4*rr1 t4-u4*rr0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0]\n\n        c += a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0;\n        debug_assert!(c >> 63 == 0);\n        // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0]\n        d += a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6;\n        debug_assert!(d >> 62 == 0);\n        // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0]\n        let u5 = (d & m) as u32;\n        d >>= 26;\n        c += u5 as u64 * rr0;\n        debug_assert!(u5 >> 26 == 0);\n        debug_assert!(d >> 36 == 0);\n        // [d u5 0 0 0 0 0 t9 0 0 0 c-u5*rr0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0]\n        let t5 = (c & m) as u32;\n        c >>= 26;\n        c += u5 as u64 * rr1;\n        debug_assert!(t5 >> 26 == 0);\n        debug_assert!(c >> 39 == 0);\n        // [d u5 0 0 0 0 0 t9 0 0 c-u5*rr1 t5-u5*rr0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0]\n\n        c += a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0;\n        debug_assert!(c >> 63 == 0);\n        // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0]\n        d += a7 * b9 + a8 * b8 + a9 * b7;\n        debug_assert!(d >> 61 == 0);\n        // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0]\n        let u6 = (d & m) as u32;\n        d >>= 26;\n        c += u6 as u64 * rr0;\n        debug_assert!(u6 >> 26 == 0);\n        debug_assert!(d >> 35 == 0);\n        // [d u6 0 0 0 0 0 0 t9 0 0 c-u6*rr0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0]\n        let t6 = (c & m) as u32;\n        c >>= 26;\n        c += u6 as u64 * rr1;\n        debug_assert!(t6 >> 26 == 0);\n        debug_assert!(c >> 39 == 0);\n        // [d u6 0 0 0 0 0 0 t9 0 c-u6*rr1 t6-u6*rr0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0]\n\n        c += a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 + a6 * b1 + a7 * b0;\n        debug_assert!(c <= 0x8000007C00000007u64);\n        // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0]\n        d += a8 * b9 + a9 * b8;\n        debug_assert!(d >> 58 == 0);\n        // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0]\n        let u7 = (d & m) as u32;\n        d >>= 26;\n        c += u7 as u64 * rr0;\n        debug_assert!(u7 >> 26 == 0);\n        debug_assert!(d >> 32 == 0);\n        let d32 = d as u32;\n        debug_assert!(c <= 0x800001703FFFC2F7u64);\n        // [d u7 0 0 0 0 0 0 0 t9 0 c-u7*rr0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0]\n        let t7 = (c & m) as u32;\n        c >>= 26;\n        c += u7 as u64 * rr1;\n        debug_assert!(t7 >> 26 == 0);\n        debug_assert!(c >> 38 == 0);\n        // [d u7 0 0 0 0 0 0 0 t9 c-u7*rr1 t7-u7*rr0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0]\n\n        c +=\n            a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 + a6 * b2 + a7 * b1 + a8 * b0;\n        debug_assert!(c <= 0x9000007B80000008u64);\n        // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        d = d32 as u64 + a9 * b9;\n        debug_assert!(d >> 57 == 0);\n        // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let u8 = (d & m) as u32;\n        d >>= 26;\n        c += u8 as u64 * rr0;\n        debug_assert!(u8 >> 26 == 0);\n        debug_assert!(d >> 31 == 0);\n        let d32 = d as u32;\n        debug_assert!(c <= 0x9000016FBFFFC2F8u64);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n\n        let r3 = t3;\n        debug_assert!(r3 >> 26 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r4 = t4;\n        debug_assert!(r4 >> 26 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r5 = t5;\n        debug_assert!(r5 >> 26 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r6 = t6;\n        debug_assert!(r6 >> 26 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r7 = t7;\n        debug_assert!(r7 >> 26 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*rr0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n\n        let r8 = (c & m) as u32;\n        c >>= 26;\n        c += u8 as u64 * rr1;\n        debug_assert!(r8 >> 26 == 0);\n        debug_assert!(c >> 39 == 0);\n        // [d u8 0 0 0 0 0 0 0 0 t9+c-u8*rr1 r8-u8*rr0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        c += d32 as u64 * rr0 + t9 as u64;\n        debug_assert!(c >> 45 == 0);\n        // [d 0 0 0 0 0 0 0 0 0 c-d*rr0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r9 = (c & (m >> 4)) as u32;\n        c >>= 22;\n        c += d * (rr1 << 4);\n        debug_assert!(r9 >> 22 == 0);\n        debug_assert!(c >> 46 == 0);\n        // [d 0 0 0 0 0 0 0 0 r9+((c-d*rr1<<4)<<22)-d*rr0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        // [d 0 0 0 0 0 0 0 -d*rr1 r9+(c<<22)-d*rr0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n\n        d = c * (rr0 >> 4) + t0 as u64;\n        debug_assert!(d >> 56 == 0);\n        // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*rr0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r0 = (d & m) as u32;\n        d >>= 26;\n        debug_assert!(r0 >> 26 == 0);\n        debug_assert!(d >> 30 == 0);\n        let d32 = d as u32;\n        // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*rr0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        d = d32 as u64 + c * (rr1 >> 4) + t1 as u64;\n        debug_assert!(d >> 53 == 0);\n        debug_assert!(d <= 0x10000003FFFFBFu64);\n        // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*rr1>>4 r0-c*rr0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        // [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r1 = (d & m) as u32;\n        d >>= 26;\n        debug_assert!(r1 >> 26 == 0);\n        debug_assert!(d >> 27 == 0);\n        let d32 = d as u32;\n        debug_assert!(d <= 0x4000000u64);\n        // [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        d = d32 as u64 + t2 as u64;\n        debug_assert!(d >> 27 == 0);\n        // [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n        let r2 = d as u32;\n        debug_assert!(r2 >> 27 == 0);\n        // [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0]\n\n        Self([r0, r1, r2, r3, r4, r5, r6, r7, r8, r9])\n    }\n\n    /// Returns self * rhs mod p\n    /// Brings the magnitude to 1 (but doesn't normalize the result).\n    /// The magnitudes of arguments should be <= 8.\n    pub fn mul(&self, rhs: &Self) -> Self {\n        self.mul_inner(rhs)\n    }\n\n    /// Returns self * self\n    /// Brings the magnitude to 1 (but doesn't normalize the result).\n    /// The magnitudes of arguments should be <= 8.\n    pub fn square(&self) -> Self {\n        self.mul_inner(self)\n    }\n\n    pub fn pow2k(&self, k: u32) -> Self {\n        let mut x = *self;\n        for _j in 0..k {\n            x = x.square();\n        }\n        x\n    }\n\n    /// Returns the multiplicative inverse of self, if self is non-zero.\n    /// The result has magnitude 1, but is not normalized.\n    pub fn invert(&self) -> Self {\n        let x2 = self.pow2k(1).mul(self);\n        let x3 = x2.pow2k(1).mul(self);\n        let x6 = x3.pow2k(3).mul(&x3);\n        let x9 = x6.pow2k(3).mul(&x3);\n        let x11 = x9.pow2k(2).mul(&x2);\n        let x22 = x11.pow2k(11).mul(&x11);\n        let x44 = x22.pow2k(22).mul(&x22);\n        let x88 = x44.pow2k(44).mul(&x44);\n        let x176 = x88.pow2k(88).mul(&x88);\n        let x220 = x176.pow2k(44).mul(&x44);\n        let x223 = x220.pow2k(3).mul(&x3);\n\n        // The final result is then assembled using a sliding window over the blocks.\n        x223.pow2k(23)\n            .mul(&x22)\n            .pow2k(5)\n            .mul(self)\n            .pow2k(3)\n            .mul(&x2)\n            .pow2k(2)\n            .mul(self)\n    }\n\n    /// Returns the square root of self mod p, or `None` if no square root exists.\n    /// The result has magnitude 1, but is not normalized.\n    pub fn sqrt(&self) -> CtOption<Self> {\n        let x2 = self.pow2k(1).mul(self);\n        let x3 = x2.pow2k(1).mul(self);\n        let x6 = x3.pow2k(3).mul(&x3);\n        let x9 = x6.pow2k(3).mul(&x3);\n        let x11 = x9.pow2k(2).mul(&x2);\n        let x22 = x11.pow2k(11).mul(&x11);\n        let x44 = x22.pow2k(22).mul(&x22);\n        let x88 = x44.pow2k(44).mul(&x44);\n        let x176 = x88.pow2k(88).mul(&x88);\n        let x220 = x176.pow2k(44).mul(&x44);\n        let x223 = x220.pow2k(3).mul(&x3);\n\n        // The final result is then assembled using a sliding window over the blocks.\n        let res = x223.pow2k(23).mul(&x22).pow2k(6).mul(&x2).pow2k(2);\n\n        let is_root = (res.mul(&res).negate(1).add(self)).normalizes_to_zero();\n\n        // Only return Some if it's the square root.\n        CtOption::new(res, is_root)\n    }\n}\n\nimpl Default for FieldElement10x26 {\n    fn default() -> Self {\n        Self::ZERO\n    }\n}\n\nimpl ConditionallySelectable for FieldElement10x26 {\n    #[inline(always)]\n    fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {\n        Self([\n            u32::conditional_select(&a.0[0], &b.0[0], choice),\n            u32::conditional_select(&a.0[1], &b.0[1], choice),\n            u32::conditional_select(&a.0[2], &b.0[2], choice),\n            u32::conditional_select(&a.0[3], &b.0[3], choice),\n            u32::conditional_select(&a.0[4], &b.0[4], choice),\n            u32::conditional_select(&a.0[5], &b.0[5], choice),\n            u32::conditional_select(&a.0[6], &b.0[6], choice),\n            u32::conditional_select(&a.0[7], &b.0[7], choice),\n            u32::conditional_select(&a.0[8], &b.0[8], choice),\n            u32::conditional_select(&a.0[9], &b.0[9], choice),\n        ])\n    }\n}\n\nimpl ConstantTimeEq for FieldElement10x26 {\n    fn ct_eq(&self, other: &Self) -> Choice {\n        self.0[0].ct_eq(&other.0[0])\n            & self.0[1].ct_eq(&other.0[1])\n            & self.0[2].ct_eq(&other.0[2])\n            & self.0[3].ct_eq(&other.0[3])\n            & self.0[4].ct_eq(&other.0[4])\n            & self.0[5].ct_eq(&other.0[5])\n            & self.0[6].ct_eq(&other.0[6])\n            & self.0[7].ct_eq(&other.0[7])\n            & self.0[8].ct_eq(&other.0[8])\n            & self.0[9].ct_eq(&other.0[9])\n    }\n}\n\nimpl Zeroize for FieldElement10x26 {\n    fn zeroize(&mut self) {\n        self.0.zeroize();\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-circuit/src/lib.rs",
    "content": "#![cfg_attr(feature = \"tco\", allow(internal_features))]\n#![cfg_attr(feature = \"tco\", allow(incomplete_features))]\n#![cfg_attr(feature = \"tco\", feature(explicit_tail_calls))]\n#![cfg_attr(feature = \"tco\", feature(core_intrinsics))]\n\nuse openvm_circuit::arch::{\n    AirInventory, AirInventoryError, ChipInventory, ChipInventoryError, ExecutorInventoryBuilder,\n    ExecutorInventoryError, VmCircuitExtension, VmExecutionExtension, VmProverExtension,\n};\nuse openvm_circuit::derive::{\n    AnyEnum, AotExecutor, AotMeteredExecutor, Executor, MeteredExecutor, PreflightExecutor,\n};\nuse openvm_circuit::system::phantom::PhantomExecutor;\nuse openvm_instructions::PhantomDiscriminant;\nuse openvm_stark_backend::config::{StarkGenericConfig, Val};\nuse openvm_stark_backend::p3_field::{Field, PrimeField32};\nuse openvm_stark_sdk::engine::StarkEngine;\nuse powdr_openvm_riscv_hints_transpiler::HintsPhantom;\nuse serde::{Deserialize, Serialize};\n\n// this module is mostly copy/pasted code from k256 for the field element representation in 32-bit architectures\nmod executors;\nmod field10x26_k256;\n\n/// OpenVM extension with miscellaneous hint implementations.\n#[derive(Clone, Serialize, Deserialize, Debug)]\npub struct HintsExtension;\n\n#[derive(\n    AnyEnum, PreflightExecutor, Executor, MeteredExecutor, AotExecutor, AotMeteredExecutor, Clone,\n)]\npub enum HintsExtensionExecutor<F: Field> {\n    Phantom(PhantomExecutor<F>),\n}\n\nimpl<F: PrimeField32> VmExecutionExtension<F> for HintsExtension {\n    type Executor = HintsExtensionExecutor<F>;\n\n    fn extend_execution(\n        &self,\n        inventory: &mut ExecutorInventoryBuilder<F, Self::Executor>,\n    ) -> Result<(), ExecutorInventoryError> {\n        inventory.add_phantom_sub_executor(\n            executors::ReverseBytesSubEx,\n            PhantomDiscriminant(HintsPhantom::HintReverseBytes as u16),\n        )?;\n        inventory.add_phantom_sub_executor(\n            executors::K256InverseFieldSubEx,\n            PhantomDiscriminant(HintsPhantom::HintK256InverseField as u16),\n        )?;\n        inventory.add_phantom_sub_executor(\n            executors::K256InverseField10x26SubEx,\n            PhantomDiscriminant(HintsPhantom::HintK256InverseField10x26 as u16),\n        )?;\n        inventory.add_phantom_sub_executor(\n            executors::K256SqrtField10x26SubEx,\n            PhantomDiscriminant(HintsPhantom::HintK256SqrtField10x26 as u16),\n        )?;\n        Ok(())\n    }\n}\n\nimpl<SC: StarkGenericConfig> VmCircuitExtension<SC> for HintsExtension {\n    fn extend_circuit(&self, _: &mut AirInventory<SC>) -> Result<(), AirInventoryError> {\n        Ok(())\n    }\n}\n\npub struct HintsProverExt;\n\nimpl<E, RA> VmProverExtension<E, RA, HintsExtension> for HintsProverExt\nwhere\n    E: StarkEngine,\n    Val<E::SC>: PrimeField32,\n{\n    fn extend_prover(\n        &self,\n        _: &HintsExtension,\n        _: &mut ChipInventory<E::SC, RA, E::PB>,\n    ) -> Result<(), ChipInventoryError> {\n        // No chips to add for hints\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-guest/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm-riscv-hints-guest\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[target.'cfg(target_os = \"zkvm\")'.dependencies]\nopenvm-platform = { workspace = true, features = [\"rust-runtime\"] }\nopenvm-rv32im-guest.workspace = true\nopenvm-custom-insn.workspace = true\n\n[dependencies]\nstrum_macros = \"0.27\"\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-guest/src/lib.rs",
    "content": "#![no_std]\n#[cfg(target_os = \"zkvm\")]\nuse openvm_custom_insn; // needed for the hint_store_u32 macro\nuse strum_macros::FromRepr;\n\n/// This is custom-2 defined in RISC-V spec document\npub const OPCODE: u8 = 0x5b;\npub const HINTS_FUNCT3: u8 = 0b000;\n\n#[derive(Debug, Copy, Clone, PartialEq, Eq, FromRepr)]\n#[repr(u8)]\npub enum HintsFunct7 {\n    ReverseBytes = 0,\n    K256InverseField,\n    K256InverseField10x26,\n    K256SqrtField10x26,\n}\n\n#[cfg(target_os = \"zkvm\")]\n#[inline(always)]\nfn insn_reverse_bytes(bytes: *const u8) {\n    openvm_platform::custom_insn_r!(\n        opcode = OPCODE,\n        funct3 = HINTS_FUNCT3,\n        funct7 = HintsFunct7::ReverseBytes as u8,\n        rd = Const \"x0\",\n        rs1 = In bytes,\n        rs2 = Const \"x0\"\n    );\n}\n\n#[cfg(target_os = \"zkvm\")]\n#[inline(always)]\nfn insn_k256_inverse_field(bytes: *const u8) {\n    openvm_platform::custom_insn_r!(\n        opcode = OPCODE,\n        funct3 = HINTS_FUNCT3,\n        funct7 = HintsFunct7::K256InverseField as u8,\n        rd = Const \"x0\",\n        rs1 = In bytes,\n        rs2 = Const \"x0\"\n    );\n}\n\n#[cfg(target_os = \"zkvm\")]\n#[inline(always)]\nfn insn_k256_inverse_field_10x26(bytes: *const u8) {\n    openvm_platform::custom_insn_r!(\n        opcode = OPCODE,\n        funct3 = HINTS_FUNCT3,\n        funct7 = HintsFunct7::K256InverseField10x26 as u8,\n        rd = Const \"x0\",\n        rs1 = In bytes,\n        rs2 = Const \"x0\",\n    );\n}\n\n#[cfg(target_os = \"zkvm\")]\n#[inline(always)]\nfn insn_k256_sqrt_field_10x26(bytes: *const u8) {\n    openvm_platform::custom_insn_r!(\n        opcode = OPCODE,\n        funct3 = HINTS_FUNCT3,\n        funct7 = HintsFunct7::K256SqrtField10x26 as u8,\n        rd = Const \"x0\",\n        rs1 = In bytes,\n        rs2 = Const \"x0\",\n    );\n}\n\n/// Just an example hint that reverses the bytes of a u32 value.\npub fn hint_reverse_bytes(val: u32) -> u32 {\n    #[cfg(target_os = \"zkvm\")]\n    {\n        let result = core::mem::MaybeUninit::<u32>::uninit();\n        insn_reverse_bytes(&val as *const u32 as *const u8);\n        unsafe {\n            openvm_rv32im_guest::hint_store_u32!(result.as_ptr() as *const u32);\n            result.assume_init()\n        }\n    }\n    #[cfg(not(target_os = \"zkvm\"))]\n    {\n        ((val & 0x000000FF) << 24)\n            | ((val & 0x0000FF00) << 8)\n            | ((val & 0x00FF0000) >> 8)\n            | ((val & 0xFF000000) >> 24)\n    }\n}\n\n/// Inverse of field element in SECP256k1 modulus (if not zero).\n/// The caller is responsible for handling the zero input case, and the returned value is zero in that case.\n#[cfg(target_os = \"zkvm\")]\npub fn hint_k256_inverse_field(sec1_bytes: &[u8]) -> [u8; 32] {\n    insn_k256_inverse_field(sec1_bytes.as_ptr() as *const u8);\n    let inverse = core::mem::MaybeUninit::<[u8; 32]>::uninit();\n    unsafe {\n        openvm_rv32im_guest::hint_buffer_u32!(inverse.as_ptr() as *const u8, 8);\n        inverse.assume_init()\n    }\n}\n\n/// Ensures that the 10 limbs are weakly normalized (i.e., the most significant limb is 22 bits and the others are 26 bits).\n/// For an honest prover, this is a no-op.\n#[cfg(target_os = \"zkvm\")]\nfn ensure_weakly_normalized_10x26(limbs: [u32; 10]) -> [u32; 10] {\n    [\n        limbs[0] & 0x3ffffff,\n        limbs[1] & 0x3ffffff,\n        limbs[2] & 0x3ffffff,\n        limbs[3] & 0x3ffffff,\n        limbs[4] & 0x3ffffff,\n        limbs[5] & 0x3ffffff,\n        limbs[6] & 0x3ffffff,\n        limbs[7] & 0x3ffffff,\n        limbs[8] & 0x3ffffff,\n        limbs[9] & 0x3fffff,\n    ]\n}\n\n/// Inverse of field element in SECP256k1 modulus (if not zero).\n/// Takes in the raw 32-bit architecture representation of the field element from k256 (`FieldElement10x26`).\n/// It is guaranteed to be weakly normalized, i.e., the most significant limb is 22 bits and the other\n/// limbs are 26 bits long.\n/// The caller is responsible for handling the zero input case, and the returned value is undefined in that case.\n#[cfg(target_os = \"zkvm\")]\npub fn hint_k256_inverse_field_10x26(elem: [u32; 10]) -> [u32; 10] {\n    insn_k256_inverse_field_10x26(elem.as_ptr() as *const u8);\n    let inverse = core::mem::MaybeUninit::<[u32; 10]>::uninit();\n    let inverse = unsafe {\n        openvm_rv32im_guest::hint_buffer_u32!(inverse.as_ptr() as *const u8, 10);\n        inverse.assume_init()\n    };\n    ensure_weakly_normalized_10x26(inverse)\n}\n\n/// Pre-defined non-quadratic residue for k256.\n/// The guest should use this value to prove the non-square case.\npub const K256_NON_QUADRATIC_RESIDUE: [u32; 10] = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0];\n\n/// If the input is square, returns true and the square root in the same representation.\n/// It is guaranteed to be weakly normalized, i.e., the most significant limb is 22 bits and the other\n/// limbs are 26 bits long.\n/// If the input is non-square, returns false and the square root of the element times a pre-defined non-quadratic residue.\n#[cfg(target_os = \"zkvm\")]\npub fn hint_k256_sqrt_field_10x26(elem: [u32; 10]) -> (bool, [u32; 10]) {\n    insn_k256_sqrt_field_10x26(elem.as_ptr() as *const u8);\n    // read the \"boolean\" result\n    let has_sqrt = unsafe {\n        let has_sqrt = core::mem::MaybeUninit::<u32>::uninit();\n        openvm_rv32im_guest::hint_store_u32!(has_sqrt.as_ptr() as *const u32);\n        has_sqrt.assume_init() != 0\n    };\n    // read the square root value\n    let sqrt = unsafe {\n        let sqrt = core::mem::MaybeUninit::<[u32; 10]>::uninit();\n        openvm_rv32im_guest::hint_buffer_u32!(sqrt.as_ptr() as *const u8, 10);\n        sqrt.assume_init()\n    };\n    let sqrt = ensure_weakly_normalized_10x26(sqrt);\n    (has_sqrt, sqrt)\n}\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-transpiler/Cargo.toml",
    "content": "[package]\nname = \"powdr-openvm-riscv-hints-transpiler\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[dependencies]\nopenvm-stark-backend = { workspace = true }\nopenvm-instructions = { workspace = true }\nopenvm-transpiler = { workspace = true }\nopenvm-instructions-derive = { workspace = true }\nrrs-lib = \"0.1.0\"\nstrum = { version = \"0.27\", features = [\"derive\"] }\n\npowdr-openvm-riscv-hints-guest = { workspace = true }\n"
  },
  {
    "path": "openvm-riscv/extensions/hints-transpiler/src/lib.rs",
    "content": "use openvm_instructions::{\n    instruction::Instruction, riscv::RV32_REGISTER_NUM_LIMBS, LocalOpcode, PhantomDiscriminant,\n};\nuse openvm_instructions_derive::LocalOpcode;\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_transpiler::{TranspilerExtension, TranspilerOutput};\nuse powdr_openvm_riscv_hints_guest::{HintsFunct7, HINTS_FUNCT3, OPCODE};\nuse rrs_lib::instruction_formats::RType;\nuse strum::{EnumCount, EnumIter, FromRepr};\n\n#[derive(\n    Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, EnumCount, EnumIter, FromRepr, LocalOpcode,\n)]\n#[opcode_offset = 0x800]\n#[repr(usize)]\npub enum HintsOpcode {\n    HINTS,\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq, FromRepr)]\n#[repr(u16)]\npub enum HintsPhantom {\n    // idk if there is a \"proper\" way for avoiding conflicts in this number,\n    // just looked at ovm code and picked the next range that didn't seem to be\n    // used\n    HintReverseBytes = 0x60,\n    HintK256InverseField = 0x61,\n    HintK256InverseField10x26 = 0x62,\n    HintK256SqrtField10x26 = 0x63,\n}\n\n#[derive(Default)]\npub struct HintsTranspilerExtension;\n\nimpl<F: PrimeField32> TranspilerExtension<F> for HintsTranspilerExtension {\n    fn process_custom(&self, instruction_stream: &[u32]) -> Option<TranspilerOutput<F>> {\n        if instruction_stream.is_empty() {\n            return None;\n        }\n        let instruction_u32 = instruction_stream[0];\n        let opcode = (instruction_u32 & 0x7f) as u8;\n        if opcode != OPCODE {\n            return None;\n        }\n\n        let insn = RType::new(instruction_u32);\n        if insn.funct3 as u8 != HINTS_FUNCT3 {\n            return None;\n        }\n\n        let funct7 = HintsFunct7::from_repr(insn.funct7 as u8)?;\n        let disc = match funct7 {\n            HintsFunct7::ReverseBytes => HintsPhantom::HintReverseBytes,\n            HintsFunct7::K256InverseField => HintsPhantom::HintK256InverseField,\n            HintsFunct7::K256InverseField10x26 => HintsPhantom::HintK256InverseField10x26,\n            HintsFunct7::K256SqrtField10x26 => HintsPhantom::HintK256SqrtField10x26,\n        };\n\n        let instruction = Instruction::phantom(\n            PhantomDiscriminant(disc as u16),\n            F::from_canonical_usize(RV32_REGISTER_NUM_LIMBS * insn.rs1),\n            F::ZERO,\n            0,\n        );\n\n        Some(TranspilerOutput::one_to_one(instruction))\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"powdr-openvm-guest-stdin-test\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n"
  },
  {
    "path": "openvm-riscv/guest/src/main.rs",
    "content": "#![cfg_attr(target_os = \"zkvm\", no_main)]\n#![cfg_attr(target_os = \"zkvm\", no_std)]\n\nopenvm::entry!(main);\n\nuse openvm::io::{read, reveal_u32};\n\npub fn main() {\n    let n: u32 = read();\n    let mut a: u32 = 0;\n    let mut b: u32 = 1;\n    for _ in 1..n {\n        let sum = a + b;\n        a = b;\n        b = sum;\n    }\n    if a == 0 {\n        panic!();\n    }\n\n    reveal_u32(a, 0);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-manual/Cargo.toml",
    "content": "\n[workspace]\n[package]\nname = \"openvm-ecc-test-programs\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nopenvm-ecc-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", subdirectory = \"extensions/ecc/guest\", default-features = false }\nopenvm-algebra-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", subdirectory = \"extensions/algebra/guest\", default-features = false }\nopenvm-k256 = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", subdirectory = \"guest-libs/k256\", package = \"k256\", features = [\n  \"ecdsa\",\n] }\n\nhex-literal = { version = \"0.4.1\", default-features = false }\n\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-manual/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n\n[app_vm_config.modular]\nsupported_moduli = [\n    \"115792089237316195423570985008687907853269984665640564039457584007908834671663\",\n    \"115792089237316195423570985008687907852837564279074904382605163141518161494337\",\n]\n\n[[app_vm_config.ecc.supported_curves]]\nstruct_name = \"Secp256k1Point\"\nmodulus = \"115792089237316195423570985008687907853269984665640564039457584007908834671663\"\nscalar = \"115792089237316195423570985008687907852837564279074904382605163141518161494337\"\na = \"0\"\nb = \"7\""
  },
  {
    "path": "openvm-riscv/guest-ecc-manual/src/main.rs",
    "content": "use hex_literal::hex;\nuse openvm::io::read;\nuse openvm_algebra_guest::IntMod;\nuse openvm_ecc_guest::{weierstrass::IntrinsicCurve, weierstrass::WeierstrassPoint};\nuse openvm_k256::{Secp256k1, Secp256k1Coord, Secp256k1Point, Secp256k1Scalar};\n\nopenvm::init!();\n\nopenvm::entry!(main);\n\npub fn main() {\n    let x1 = Secp256k1Coord::from_be_bytes(&[\n        177, 205, 72, 85, 29, 179, 168, 198, 125, 68, 123, 98, 49, 165, 115, 23, 117, 100, 184, 12,\n        125, 99, 103, 18, 245, 130, 15, 91, 76, 105, 85, 20,\n    ])\n    .expect(\"\");\n    let y1 = Secp256k1Coord::from_be_bytes(&[\n        219, 130, 184, 163, 86, 144, 60, 160, 181, 38, 124, 67, 141, 79, 174, 63, 60, 188, 208,\n        206, 139, 94, 72, 251, 222, 58, 13, 159, 189, 75, 97, 12,\n    ])\n    .expect(\"\");\n    let x2 = Secp256k1Coord::from_be_bytes(&[\n        146, 161, 155, 83, 76, 248, 129, 31, 87, 66, 55, 228, 112, 251, 3, 121, 113, 60, 97, 168,\n        52, 94, 83, 10, 224, 229, 14, 231, 182, 207, 33, 28,\n    ])\n    .expect(\"\");\n    let y2 = Secp256k1Coord::from_be_bytes(&[\n        163, 84, 112, 69, 78, 54, 106, 228, 95, 24, 73, 7, 216, 178, 14, 141, 200, 150, 92, 72, 29,\n        246, 91, 179, 165, 11, 29, 36, 68, 96, 135, 19,\n    ])\n    .expect(\"\");\n\n    let p1 = Secp256k1Point::from_xy(x1, y1).unwrap();\n    let p2 = Secp256k1Point::from_xy(x2, y2).unwrap();\n\n    let scalar_1 = Secp256k1Scalar::from_be_bytes(&hex!(\n        \"BFD5D7FA526B6954945C980C6C804E0E19840F2DA009C8B0C9A511189FB466BF\"\n    ))\n    .expect(\"\");\n    let scalar_2 = Secp256k1Scalar::from_be_bytes(&hex!(\n        \"369E07A2FC32462DD74AB67CE7D7595EC91FC11CC90A3C15A94B57A21E878614\"\n    ))\n    .expect(\"\");\n\n    let result_x = Secp256k1Coord::from_be_bytes(&[\n        112, 170, 75, 207, 229, 212, 237, 2, 131, 65, 143, 232, 168, 46, 48, 240, 56, 164, 245,\n        167, 23, 29, 43, 132, 130, 181, 145, 207, 3, 49, 25, 48,\n    ])\n    .expect(\"\");\n    let result_y = Secp256k1Coord::from_be_bytes(&[\n        225, 222, 233, 182, 14, 157, 47, 22, 177, 249, 107, 145, 57, 77, 133, 68, 6, 102, 101, 78,\n        5, 249, 10, 81, 202, 112, 204, 76, 117, 7, 231, 160,\n    ])\n    .expect(\"\");\n    let mut result = <Secp256k1 as IntrinsicCurve>::msm(&[scalar_1, scalar_2], &[p1, p2]);\n\n    assert_eq!(result.x(), &result_x);\n    assert_eq!(result.y(), &result_y);\n\n    // Benchmark\n    let n: u32 = read();\n    for _ in 0..n {\n        result = <Secp256k1 as IntrinsicCurve>::msm(&[scalar_1, scalar_2], &[result, result]);\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-powdr-affine-hint/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"openvm-ecc-powdr-affine-hint\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nk256 = { git = \"https://github.com/powdr-labs/elliptic-curves-k256\", rev = \"a48ad5c\", default-features = false, features = [\n  \"expose-field\",\n  \"arithmetic\",\n] }\nhex-literal = \"1.0.0\"\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-powdr-affine-hint/src/main.rs",
    "content": "use hex_literal::hex;\nuse k256::elliptic_curve::sec1::FromEncodedPoint;\nuse k256::elliptic_curve::PrimeField;\nuse k256::PowdrAffinePoint;\nuse k256::{AffinePoint, EncodedPoint, FieldBytes, FieldElement, Scalar};\nuse openvm::io::read;\n\nopenvm::entry!(main);\n\npub fn main() {\n    let x1 = &FieldBytes::from_slice(&[\n        177, 205, 72, 85, 29, 179, 168, 198, 125, 68, 123, 98, 49, 165, 115, 23, 117, 100, 184, 12,\n        125, 99, 103, 18, 245, 130, 15, 91, 76, 105, 85, 20,\n    ]);\n    let y1 = &FieldBytes::from_slice(&[\n        219, 130, 184, 163, 86, 144, 60, 160, 181, 38, 124, 67, 141, 79, 174, 63, 60, 188, 208,\n        206, 139, 94, 72, 251, 222, 58, 13, 159, 189, 75, 97, 12,\n    ]);\n\n    let x2 = &FieldBytes::from_slice(&[\n        146, 161, 155, 83, 76, 248, 129, 31, 87, 66, 55, 228, 112, 251, 3, 121, 113, 60, 97, 168,\n        52, 94, 83, 10, 224, 229, 14, 231, 182, 207, 33, 28,\n    ]);\n    let y2 = &FieldBytes::from_slice(&[\n        163, 84, 112, 69, 78, 54, 106, 228, 95, 24, 73, 7, 216, 178, 14, 141, 200, 150, 92, 72, 29,\n        246, 91, 179, 165, 11, 29, 36, 68, 96, 135, 19,\n    ]);\n\n    let point1 = PowdrAffinePoint(\n        AffinePoint::from_encoded_point(&EncodedPoint::from_affine_coordinates(x1, y1, false))\n            .expect(\"AffinePoint should be valid\"),\n    );\n    let point2 = PowdrAffinePoint(\n        AffinePoint::from_encoded_point(&EncodedPoint::from_affine_coordinates(x2, y2, false))\n            .expect(\"AffinePoint should be valid\"),\n    );\n\n    let result_x: FieldElement = FieldElement::from_bytes(FieldBytes::from_slice(&[\n        112, 170, 75, 207, 229, 212, 237, 2, 131, 65, 143, 232, 168, 46, 48, 240, 56, 164, 245,\n        167, 23, 29, 43, 132, 130, 181, 145, 207, 3, 49, 25, 48,\n    ]))\n    .unwrap()\n    .normalize();\n\n    let result_y: FieldElement = FieldElement::from_bytes(FieldBytes::from_slice(&[\n        225, 222, 233, 182, 14, 157, 47, 22, 177, 249, 107, 145, 57, 77, 133, 68, 6, 102, 101, 78,\n        5, 249, 10, 81, 202, 112, 204, 76, 117, 7, 231, 160,\n    ]))\n    .unwrap()\n    .normalize();\n\n    let scalar_1 = Scalar::from_repr(*FieldBytes::from_slice(&hex!(\n        \"BFD5D7FA526B6954945C980C6C804E0E19840F2DA009C8B0C9A511189FB466BF\"\n    )))\n    .unwrap();\n\n    let scalar_2 = Scalar::from_repr(*FieldBytes::from_slice(&hex!(\n        \"369E07A2FC32462DD74AB67CE7D7595EC91FC11CC90A3C15A94B57A21E878614\"\n    )))\n    .unwrap();\n\n    // Multi scalar multiplication\n    let mut result = PowdrAffinePoint::lincomb(&[(point1, scalar_1), (point2, scalar_2)]);\n    assert_eq!(result.x().normalize(), result_x);\n    assert_eq!(result.y().normalize(), result_y);\n\n    // Benchmark\n    let n: u32 = read();\n    for _ in 0..n {\n        result =\n            PowdrAffinePoint::lincomb(&[(result.clone(), scalar_1), (result.clone(), scalar_2)]);\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-projective/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"openvm-ecc-test-programs\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nk256 = { version = \"0.13\", default-features = false, features = [\"arithmetic\"] }\nhex-literal = \"1.0.0\"\n\n"
  },
  {
    "path": "openvm-riscv/guest-ecc-projective/src/main.rs",
    "content": "use hex_literal::hex;\nuse k256::elliptic_curve::ops::LinearCombination;\nuse k256::elliptic_curve::sec1::FromEncodedPoint;\nuse k256::elliptic_curve::PrimeField;\nuse k256::{AffinePoint, EncodedPoint, FieldBytes, ProjectivePoint, Scalar};\nuse openvm::io::read;\n\nopenvm::entry!(main);\n\npub fn main() {\n    let x1 = FieldBytes::from_slice(&[\n        177, 205, 72, 85, 29, 179, 168, 198, 125, 68, 123, 98, 49, 165, 115, 23, 117, 100, 184, 12,\n        125, 99, 103, 18, 245, 130, 15, 91, 76, 105, 85, 20,\n    ]);\n\n    let y1 = FieldBytes::from_slice(&[\n        219, 130, 184, 163, 86, 144, 60, 160, 181, 38, 124, 67, 141, 79, 174, 63, 60, 188, 208,\n        206, 139, 94, 72, 251, 222, 58, 13, 159, 189, 75, 97, 12,\n    ]);\n\n    let x2 = FieldBytes::from_slice(&[\n        146, 161, 155, 83, 76, 248, 129, 31, 87, 66, 55, 228, 112, 251, 3, 121, 113, 60, 97, 168,\n        52, 94, 83, 10, 224, 229, 14, 231, 182, 207, 33, 28,\n    ]);\n\n    let y2 = FieldBytes::from_slice(&[\n        163, 84, 112, 69, 78, 54, 106, 228, 95, 24, 73, 7, 216, 178, 14, 141, 200, 150, 92, 72, 29,\n        246, 91, 179, 165, 11, 29, 36, 68, 96, 135, 19,\n    ]);\n\n    let point1 =\n        AffinePoint::from_encoded_point(&EncodedPoint::from_affine_coordinates(x1, y1, false))\n            .expect(\"AffinePoint should be valid\");\n    let point2 =\n        AffinePoint::from_encoded_point(&EncodedPoint::from_affine_coordinates(x2, y2, false))\n            .expect(\"AffinePoint should be valid\");\n\n    let a = ProjectivePoint::from(point1);\n    let b = ProjectivePoint::from(point2);\n\n    let scalar_1 = Scalar::from_repr(*FieldBytes::from_slice(\n        hex!(\"BFD5D7FA526B6954945C980C6C804E0E19840F2DA009C8B0C9A511189FB466BF\").as_ref(),\n    ))\n    .unwrap();\n\n    let scalar_2 = Scalar::from_repr(*FieldBytes::from_slice(\n        hex!(\"369E07A2FC32462DD74AB67CE7D7595EC91FC11CC90A3C15A94B57A21E878614\").as_ref(),\n    ))\n    .unwrap();\n\n    let result_x = FieldBytes::from_slice(&[\n        112, 170, 75, 207, 229, 212, 237, 2, 131, 65, 143, 232, 168, 46, 48, 240, 56, 164, 245,\n        167, 23, 29, 43, 132, 130, 181, 145, 207, 3, 49, 25, 48,\n    ]);\n    let result_y = FieldBytes::from_slice(&[\n        225, 222, 233, 182, 14, 157, 47, 22, 177, 249, 107, 145, 57, 77, 133, 68, 6, 102, 101, 78,\n        5, 249, 10, 81, 202, 112, 204, 76, 117, 7, 231, 160,\n    ]);\n\n    let result_point = AffinePoint::from_encoded_point(&EncodedPoint::from_affine_coordinates(\n        result_x, result_y, false,\n    ))\n    .expect(\"AffinePoint should be valid\");\n\n    let mut result = ProjectivePoint::lincomb(&a, &scalar_1, &b, &scalar_2);\n    assert_eq!(result.to_affine(), result_point);\n\n    // Benchmark\n    let n: u32 = read();\n    for _ in 0..n {\n        result = ProjectivePoint::lincomb(&result, &scalar_1, &result, &scalar_2);\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-ecrecover/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"openvm-k256-ecrecover-programs\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nk256 = { git = \"https://github.com/powdr-labs/elliptic-curves-k256\", rev = \"a48ad5c\", default-features = false, features = [\n  \"expose-field\",\n  \"arithmetic\",\n  \"ecdsa\",\n] }\nhex-literal = { version = \"0.4.1\", default-features = false }\n"
  },
  {
    "path": "openvm-riscv/guest-ecrecover/src/main.rs",
    "content": "openvm::entry!(main);\n\nuse hex_literal::hex;\nuse k256::ecdsa::{PowdrVerifyKey, RecoveryId, Signature, VerifyingKey};\nuse k256::EncodedPoint;\nuse openvm::io::read;\n\n// Signature recovery test vectors\nstruct RecoveryTestVector {\n    pk: [u8; 33],\n    sig: [u8; 64],\n    recid: RecoveryId,\n}\n\nconst RECOVERY_TEST_VECTORS: &[RecoveryTestVector] = &[\n    // Recovery ID 0\n    RecoveryTestVector {\n        pk: hex!(\"021a7a569e91dbf60581509c7fc946d1003b60c7dee85299538db6353538d59574\"),\n        sig: hex!(\n            \"ce53abb3721bafc561408ce8ff99c909f7f0b18a2f788649d6470162ab1aa032\n                 3971edc523a6d6453f3fb6128d318d9db1a5ff3386feb1047d9816e780039d52\"\n        ),\n        recid: RecoveryId::new(false, false),\n    },\n    // Recovery ID 1\n    RecoveryTestVector {\n        pk: hex!(\"036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2\"),\n        sig: hex!(\n            \"46c05b6368a44b8810d79859441d819b8e7cdc8bfd371e35c53196f4bcacdb51\n                 35c7facce2a97b95eacba8a586d87b7958aaf8368ab29cee481f76e871dbd9cb\"\n        ),\n        recid: RecoveryId::new(true, false),\n    },\n];\n\n//Test public key recovery\npub fn main() {\n    let n: u32 = read();\n    for _ in 0..n {\n        for vector in RECOVERY_TEST_VECTORS {\n            let digest = [\n                173, 132, 205, 11, 16, 252, 2, 135, 56, 151, 27, 7, 129, 36, 174, 194, 160, 231,\n                198, 217, 134, 163, 129, 190, 11, 56, 111, 50, 190, 232, 135, 175,\n            ];\n            let sig = Signature::try_from(vector.sig.as_slice()).unwrap();\n            let recid = vector.recid;\n            let pk = <VerifyingKey as PowdrVerifyKey>::powdr_recover_from_prehash(\n                digest.as_slice(),\n                &sig,\n                recid,\n            )\n            .unwrap();\n            assert_eq!(&vector.pk[..], EncodedPoint::from(&pk).as_bytes());\n        }\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-ecrecover-manual/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"openvm-k256-test-programs\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nopenvm-algebra-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-algebra-moduli-macros = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-ecc-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-ecc-sw-macros = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-k256 = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", package = \"k256\" }\n\nelliptic-curve = { version = \"0.13.8\" }\necdsa = { version = \"0.16.9\" }\nhex-literal = { version = \"0.4.1\", default-features = false }\n"
  },
  {
    "path": "openvm-riscv/guest-ecrecover-manual/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.sha256]\n\n[app_vm_config.modular]\nsupported_moduli = [\n    \"115792089237316195423570985008687907853269984665640564039457584007908834671663\",\n    \"115792089237316195423570985008687907852837564279074904382605163141518161494337\",\n]\n\n[[app_vm_config.ecc.supported_curves]]\nstruct_name = \"Secp256k1Point\"\nmodulus = \"115792089237316195423570985008687907853269984665640564039457584007908834671663\"\nscalar = \"115792089237316195423570985008687907852837564279074904382605163141518161494337\"\na = \"0\"\nb = \"7\""
  },
  {
    "path": "openvm-riscv/guest-ecrecover-manual/src/main.rs",
    "content": "extern crate alloc;\n\nuse ecdsa::RecoveryId;\nuse hex_literal::hex;\nuse openvm_k256::ecdsa::{Signature, VerifyingKey};\n// clippy thinks this is unused, but it's used in the init! macro\nuse openvm::io::read;\n#[allow(unused)]\nuse openvm_k256::Secp256k1Point;\n\nopenvm::init!();\n\nopenvm::entry!(main);\n\n/// Signature recovery test vectors\nstruct RecoveryTestVector {\n    pk: [u8; 33],\n    sig: [u8; 64],\n    recid: RecoveryId,\n}\n\nconst RECOVERY_TEST_VECTORS: &[RecoveryTestVector] = &[\n    // Recovery ID 0\n    RecoveryTestVector {\n        pk: hex!(\"021a7a569e91dbf60581509c7fc946d1003b60c7dee85299538db6353538d59574\"),\n        sig: hex!(\n            \"ce53abb3721bafc561408ce8ff99c909f7f0b18a2f788649d6470162ab1aa032\n                 3971edc523a6d6453f3fb6128d318d9db1a5ff3386feb1047d9816e780039d52\"\n        ),\n        recid: RecoveryId::new(false, false),\n    },\n    // Recovery ID 1\n    RecoveryTestVector {\n        pk: hex!(\"036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2\"),\n        sig: hex!(\n            \"46c05b6368a44b8810d79859441d819b8e7cdc8bfd371e35c53196f4bcacdb51\n                 35c7facce2a97b95eacba8a586d87b7958aaf8368ab29cee481f76e871dbd9cb\"\n        ),\n        recid: RecoveryId::new(true, false),\n    },\n];\n\n// Test public key recovery\nfn main() {\n    let n: u32 = read();\n    for _ in 0..n {\n        for vector in RECOVERY_TEST_VECTORS {\n            let digest = [\n                173, 132, 205, 11, 16, 252, 2, 135, 56, 151, 27, 7, 129, 36, 174, 194, 160, 231,\n                198, 217, 134, 163, 129, 190, 11, 56, 111, 50, 190, 232, 135, 175,\n            ];\n            let sig = Signature::try_from(vector.sig.as_slice()).unwrap();\n            let recid = vector.recid;\n            let pk = VerifyingKey::recover_from_prehash(digest.as_slice(), &sig, recid).unwrap();\n            assert_eq!(&vector.pk[..], &pk.to_sec1_bytes(true));\n        }\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-hints-test/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"powdr-openvm-guest-hints-test\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\n# The `rev` here must point to the same version used in the workspace.\n# Otherwise, there is conflict with the `powdr-openvm-hints-guest` dependency (which is part of the workspace).\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\npowdr-openvm-hints-guest = { path = \"../extensions/hints-guest/\" }\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n"
  },
  {
    "path": "openvm-riscv/guest-hints-test/src/main.rs",
    "content": "#![cfg_attr(target_os = \"zkvm\", no_main)]\n#![cfg_attr(target_os = \"zkvm\", no_std)]\n\nopenvm::entry!(main);\nuse powdr_openvm_riscv_hints_guest::hint_reverse_bytes;\n\npub fn main() {\n    let res = hint_reverse_bytes(0x11223344);\n    assert_eq!(res, 0x44332211);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-keccak/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"guest-keccak-stdin\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\ntiny-keccak = { version = \"2.0.2\", features = [\"keccak\"] }\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n"
  },
  {
    "path": "openvm-riscv/guest-keccak/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nopenvm::entry!(main);\n\nuse core::hint::black_box;\n\nuse openvm::io::{read, reveal_u32};\nuse tiny_keccak::{Hasher, Keccak};\n\npub fn main() {\n    let n: u32 = read();\n    let mut output = black_box([0u8; 32]);\n    for _ in 0..n {\n        let mut hasher = Keccak::v256();\n        hasher.update(&output);\n        hasher.finalize(&mut output);\n    }\n\n    reveal_u32(output[0] as u32, 0);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-keccak-manual-precompile/Cargo.toml",
    "content": "[package]\nname = \"keccak-example\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-platform = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-keccak256 = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\n"
  },
  {
    "path": "openvm-riscv/guest-keccak-manual-precompile/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.keccak]"
  },
  {
    "path": "openvm-riscv/guest-keccak-manual-precompile/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse core::hint::black_box;\n\nuse openvm::io::{read, reveal_u32};\nuse openvm_keccak256::keccak256;\n\nopenvm::entry!(main);\n\npub fn main() {\n    let n: u32 = read();\n    let mut output = [0u8; 32];\n    for _ in 0..n {\n        output = keccak256(&black_box(output));\n    }\n\n    reveal_u32(output[0] as u32, 0);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-matmul/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"powdr-openvm-matmul-test\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n"
  },
  {
    "path": "openvm-riscv/guest-matmul/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nopenvm::entry!(main);\n\nuse openvm::io::reveal_u32;\n\npub fn main() {\n    loop_test_matrix();\n}\n\nconst SIZE: usize = 6;\n\ntype Mat = [[i32; SIZE]; SIZE];\n\n#[inline(never)]\nfn matrix_multiply_unrolled(a: &Mat, b: &Mat, c: &mut Mat) {\n    c[0][0] = a[0][0] * b[0][0]\n        + a[0][1] * b[1][0]\n        + a[0][2] * b[2][0]\n        + a[0][3] * b[3][0]\n        + a[0][4] * b[4][0]\n        + a[0][5] * b[5][0];\n    c[0][1] = a[0][0] * b[0][1]\n        + a[0][1] * b[1][1]\n        + a[0][2] * b[2][1]\n        + a[0][3] * b[3][1]\n        + a[0][4] * b[4][1]\n        + a[0][5] * b[5][1];\n    c[0][2] = a[0][0] * b[0][2]\n        + a[0][1] * b[1][2]\n        + a[0][2] * b[2][2]\n        + a[0][3] * b[3][2]\n        + a[0][4] * b[4][2]\n        + a[0][5] * b[5][2];\n    c[0][3] = a[0][0] * b[0][3]\n        + a[0][1] * b[1][3]\n        + a[0][2] * b[2][3]\n        + a[0][3] * b[3][3]\n        + a[0][4] * b[4][3]\n        + a[0][5] * b[5][3];\n    c[0][4] = a[0][0] * b[0][4]\n        + a[0][1] * b[1][4]\n        + a[0][2] * b[2][4]\n        + a[0][3] * b[3][4]\n        + a[0][4] * b[4][4]\n        + a[0][5] * b[5][4];\n    c[0][5] = a[0][0] * b[0][5]\n        + a[0][1] * b[1][5]\n        + a[0][2] * b[2][5]\n        + a[0][3] * b[3][5]\n        + a[0][4] * b[4][5]\n        + a[0][5] * b[5][5];\n\n    c[1][0] = a[1][0] * b[0][0]\n        + a[1][1] * b[1][0]\n        + a[1][2] * b[2][0]\n        + a[1][3] * b[3][0]\n        + a[1][4] * b[4][0]\n        + a[1][5] * b[5][0];\n    c[1][1] = a[1][0] * b[0][1]\n        + a[1][1] * b[1][1]\n        + a[1][2] * b[2][1]\n        + a[1][3] * b[3][1]\n        + a[1][4] * b[4][1]\n        + a[1][5] * b[5][1];\n    c[1][2] = a[1][0] * b[0][2]\n        + a[1][1] * b[1][2]\n        + a[1][2] * b[2][2]\n        + a[1][3] * b[3][2]\n        + a[1][4] * b[4][2]\n        + a[1][5] * b[5][2];\n    c[1][3] = a[1][0] * b[0][3]\n        + a[1][1] * b[1][3]\n        + a[1][2] * b[2][3]\n        + a[1][3] * b[3][3]\n        + a[1][4] * b[4][3]\n        + a[1][5] * b[5][3];\n    c[1][4] = a[1][0] * b[0][4]\n        + a[1][1] * b[1][4]\n        + a[1][2] * b[2][4]\n        + a[1][3] * b[3][4]\n        + a[1][4] * b[4][4]\n        + a[1][5] * b[5][4];\n    c[1][5] = a[1][0] * b[0][5]\n        + a[1][1] * b[1][5]\n        + a[1][2] * b[2][5]\n        + a[1][3] * b[3][5]\n        + a[1][4] * b[4][5]\n        + a[1][5] * b[5][5];\n\n    c[2][0] = a[2][0] * b[0][0]\n        + a[2][1] * b[1][0]\n        + a[2][2] * b[2][0]\n        + a[2][3] * b[3][0]\n        + a[2][4] * b[4][0]\n        + a[2][5] * b[5][0];\n    c[2][1] = a[2][0] * b[0][1]\n        + a[2][1] * b[1][1]\n        + a[2][2] * b[2][1]\n        + a[2][3] * b[3][1]\n        + a[2][4] * b[4][1]\n        + a[2][5] * b[5][1];\n    c[2][2] = a[2][0] * b[0][2]\n        + a[2][1] * b[1][2]\n        + a[2][2] * b[2][2]\n        + a[2][3] * b[3][2]\n        + a[2][4] * b[4][2]\n        + a[2][5] * b[5][2];\n    c[2][3] = a[2][0] * b[0][3]\n        + a[2][1] * b[1][3]\n        + a[2][2] * b[2][3]\n        + a[2][3] * b[3][3]\n        + a[2][4] * b[4][3]\n        + a[2][5] * b[5][3];\n    c[2][4] = a[2][0] * b[0][4]\n        + a[2][1] * b[1][4]\n        + a[2][2] * b[2][4]\n        + a[2][3] * b[3][4]\n        + a[2][4] * b[4][4]\n        + a[2][5] * b[5][4];\n    c[2][5] = a[2][0] * b[0][5]\n        + a[2][1] * b[1][5]\n        + a[2][2] * b[2][5]\n        + a[2][3] * b[3][5]\n        + a[2][4] * b[4][5]\n        + a[2][5] * b[5][5];\n\n    c[3][0] = a[3][0] * b[0][0]\n        + a[3][1] * b[1][0]\n        + a[3][2] * b[2][0]\n        + a[3][3] * b[3][0]\n        + a[3][4] * b[4][0]\n        + a[3][5] * b[5][0];\n    c[3][1] = a[3][0] * b[0][1]\n        + a[3][1] * b[1][1]\n        + a[3][2] * b[2][1]\n        + a[3][3] * b[3][1]\n        + a[3][4] * b[4][1]\n        + a[3][5] * b[5][1];\n    c[3][2] = a[3][0] * b[0][2]\n        + a[3][1] * b[1][2]\n        + a[3][2] * b[2][2]\n        + a[3][3] * b[3][2]\n        + a[3][4] * b[4][2]\n        + a[3][5] * b[5][2];\n    c[3][3] = a[3][0] * b[0][3]\n        + a[3][1] * b[1][3]\n        + a[3][2] * b[2][3]\n        + a[3][3] * b[3][3]\n        + a[3][4] * b[4][3]\n        + a[3][5] * b[5][3];\n    c[3][4] = a[3][0] * b[0][4]\n        + a[3][1] * b[1][4]\n        + a[3][2] * b[2][4]\n        + a[3][3] * b[3][4]\n        + a[3][4] * b[4][4]\n        + a[3][5] * b[5][4];\n    c[3][5] = a[3][0] * b[0][5]\n        + a[3][1] * b[1][5]\n        + a[3][2] * b[2][5]\n        + a[3][3] * b[3][5]\n        + a[3][4] * b[4][5]\n        + a[3][5] * b[5][5];\n\n    c[4][0] = a[4][0] * b[0][0]\n        + a[4][1] * b[1][0]\n        + a[4][2] * b[2][0]\n        + a[4][3] * b[3][0]\n        + a[4][4] * b[4][0]\n        + a[4][5] * b[5][0];\n    c[4][1] = a[4][0] * b[0][1]\n        + a[4][1] * b[1][1]\n        + a[4][2] * b[2][1]\n        + a[4][3] * b[3][1]\n        + a[4][4] * b[4][1]\n        + a[4][5] * b[5][1];\n    c[4][2] = a[4][0] * b[0][2]\n        + a[4][1] * b[1][2]\n        + a[4][2] * b[2][2]\n        + a[4][3] * b[3][2]\n        + a[4][4] * b[4][2]\n        + a[4][5] * b[5][2];\n    c[4][3] = a[4][0] * b[0][3]\n        + a[4][1] * b[1][3]\n        + a[4][2] * b[2][3]\n        + a[4][3] * b[3][3]\n        + a[4][4] * b[4][3]\n        + a[4][5] * b[5][3];\n    c[4][4] = a[4][0] * b[0][4]\n        + a[4][1] * b[1][4]\n        + a[4][2] * b[2][4]\n        + a[4][3] * b[3][4]\n        + a[4][4] * b[4][4]\n        + a[4][5] * b[5][4];\n    c[4][5] = a[4][0] * b[0][5]\n        + a[4][1] * b[1][5]\n        + a[4][2] * b[2][5]\n        + a[4][3] * b[3][5]\n        + a[4][4] * b[4][5]\n        + a[4][5] * b[5][5];\n\n    c[5][0] = a[5][0] * b[0][0]\n        + a[5][1] * b[1][0]\n        + a[5][2] * b[2][0]\n        + a[5][3] * b[3][0]\n        + a[5][4] * b[4][0]\n        + a[5][5] * b[5][0];\n    c[5][1] = a[5][0] * b[0][1]\n        + a[5][1] * b[1][1]\n        + a[5][2] * b[2][1]\n        + a[5][3] * b[3][1]\n        + a[5][4] * b[4][1]\n        + a[5][5] * b[5][1];\n    c[5][2] = a[5][0] * b[0][2]\n        + a[5][1] * b[1][2]\n        + a[5][2] * b[2][2]\n        + a[5][3] * b[3][2]\n        + a[5][4] * b[4][2]\n        + a[5][5] * b[5][2];\n    c[5][3] = a[5][0] * b[0][3]\n        + a[5][1] * b[1][3]\n        + a[5][2] * b[2][3]\n        + a[5][3] * b[3][3]\n        + a[5][4] * b[4][3]\n        + a[5][5] * b[5][3];\n    c[5][4] = a[5][0] * b[0][4]\n        + a[5][1] * b[1][4]\n        + a[5][2] * b[2][4]\n        + a[5][3] * b[3][4]\n        + a[5][4] * b[4][4]\n        + a[5][5] * b[5][4];\n    c[5][5] = a[5][0] * b[0][5]\n        + a[5][1] * b[1][5]\n        + a[5][2] * b[2][5]\n        + a[5][3] * b[3][5]\n        + a[5][4] * b[4][5]\n        + a[5][5] * b[5][5];\n}\n\n#[inline(never)]\nfn test_matrix() {\n    let a: Mat = [\n        [1, 2, 3, 4, 5, 6],\n        [7, 8, 9, 10, 11, 12],\n        [13, 14, 15, 16, 17, 18],\n        [19, 20, 21, 22, 23, 24],\n        [25, 26, 27, 28, 29, 30],\n        [31, 32, 33, 34, 35, 36],\n    ];\n\n    let b: Mat = [\n        [37, 38, 39, 40, 41, 42],\n        [43, 44, 45, 46, 47, 48],\n        [49, 50, 51, 52, 53, 54],\n        [55, 56, 57, 58, 59, 60],\n        [61, 62, 63, 64, 65, 66],\n        [67, 68, 69, 70, 71, 72],\n    ];\n\n    let mut c: Mat = [[0; SIZE]; SIZE];\n\n    matrix_multiply_unrolled(&a, &b, &mut c);\n\n    assert_eq!(c[0][0], 1197);\n    reveal_u32(c[0][0] as u32, 0);\n    reveal_u32(c[5][5] as u32, 1);\n}\n\n#[inline(never)]\nfn loop_test_matrix() {\n    for _ in 0..8000 {\n        test_matrix();\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-pairing/Cargo.toml",
    "content": "[package]\nname = \"guest-pairing\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\n\nark-bn254 = \"0.5\"\nark-ec = \"0.5\"\nark-ff = \"0.5\"\nhex = \"0.4\"\n"
  },
  {
    "path": "openvm-riscv/guest-pairing/src/main.rs",
    "content": "use ark_bn254::{Bn254, Fq, Fq2, G1Affine, G2Affine};\nuse ark_ec::pairing::Pairing;\nuse ark_ff::fields::PrimeField;\nuse ark_ff::One;\n\nopenvm::entry!(main);\n\nconst PAIR_ELEMENT_LEN: usize = 32 * (2 + 4); // G1 (2 Fq), G2 (4 Fq)\n\nfn main() {\n    let input = hex::decode(\n        \"\\\n            1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\\\n            3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\\\n            209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\\\n            04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\\\n            2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\\\n            120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550\\\n            111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c\\\n            2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411\\\n            198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2\\\n            1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed\\\n            090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b\\\n            12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa\",\n    )\n    .unwrap();\n\n    let elements = input.len() / PAIR_ELEMENT_LEN;\n\n    let mut g1_vec = Vec::with_capacity(elements);\n    let mut g2_vec = Vec::with_capacity(elements);\n\n    for idx in 0..elements {\n        let read_fq_at = |n: usize| {\n            debug_assert!(n < PAIR_ELEMENT_LEN / 32);\n            let start = idx * PAIR_ELEMENT_LEN + n * 32;\n            let slice = unsafe { input.get_unchecked(start..start + 32) };\n            Fq::from_be_bytes_mod_order(&slice[..32])\n        };\n\n        let g1_x = read_fq_at(0);\n        let g1_y = read_fq_at(1);\n        let g2_x_c1 = read_fq_at(2);\n        let g2_x_c0 = read_fq_at(3);\n        let g2_y_c1 = read_fq_at(4);\n        let g2_y_c0 = read_fq_at(5);\n\n        let g1 = G1Affine::new_unchecked(g1_x, g1_y);\n        let g2_x = Fq2::new(g2_x_c0, g2_x_c1);\n        let g2_y = Fq2::new(g2_y_c0, g2_y_c1);\n        let g2 = G2Affine::new_unchecked(g2_x, g2_y);\n\n        g1_vec.push(g1);\n        g2_vec.push(g2);\n    }\n\n    let result = Bn254::multi_pairing(g1_vec, g2_vec);\n    assert_eq!(result.0, <Bn254 as Pairing>::TargetField::one());\n}\n\n"
  },
  {
    "path": "openvm-riscv/guest-pairing-manual-precompile/Cargo.toml",
    "content": "[package]\nname = \"openvm-pairing-example\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\n\nopenvm-algebra-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false }\nopenvm-ecc-guest = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false }\nopenvm-pairing = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", default-features = false, features = [\n  \"bn254\",\n] }\n\nhex = { version = \"0.4.3\", default-features = false, features = [\"alloc\"] }\n"
  },
  {
    "path": "openvm-riscv/guest-pairing-manual-precompile/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.modular]\nsupported_moduli = [\n    \"21888242871839275222246405745257275088696311157297823662689037894645226208583\", # coordinate field\n    \"21888242871839275222246405745257275088548364400416034343698204186575808495617\", # scalar field\n]\n\n[app_vm_config.fp2]\nsupported_moduli = [\n    [\"Bn254Fp2\", \"21888242871839275222246405745257275088696311157297823662689037894645226208583\"],\n]\n\n[app_vm_config.pairing]\nsupported_curves = [\"Bn254\"]\n\n[[app_vm_config.ecc.supported_curves]]\nstruct_name = \"Bn254G1Affine\"\nmodulus = \"21888242871839275222246405745257275088696311157297823662689037894645226208583\"\nscalar = \"21888242871839275222246405745257275088548364400416034343698204186575808495617\"\na = \"0\"\nb = \"3\"\n"
  },
  {
    "path": "openvm-riscv/guest-pairing-manual-precompile/src/main.rs",
    "content": "use openvm_algebra_guest::IntMod;\nuse openvm_ecc_guest::AffinePoint;\nuse {\n    openvm_pairing::bn254::{Bn254, Fp, Fp2},\n    openvm_pairing::PairingCheck,\n};\n\nopenvm::init!();\n\nconst PAIR_ELEMENT_LEN: usize = 32 * (2 + 4); // 1 G1Affine (2 Fp), 1 G2Affine (4 Fp)\n\n// code mostly taken from openvm repo guest benchmarks\npub fn main() {\n    let input = hex::decode(\n        \"\\\n            1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\\\n            3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\\\n            209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\\\n            04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\\\n            2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\\\n            120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550\\\n            111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c\\\n            2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411\\\n            198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2\\\n            1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed\\\n            090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b\\\n            12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa\",\n    )\n    .unwrap();\n\n    let elements = input.len() / PAIR_ELEMENT_LEN;\n\n    let mut p = Vec::with_capacity(elements);\n    let mut q = Vec::with_capacity(elements);\n\n    for idx in 0..elements {\n        let read_fq_at = |n: usize| {\n            debug_assert!(n < PAIR_ELEMENT_LEN / 32);\n            let start = idx * PAIR_ELEMENT_LEN + n * 32;\n            let slice = unsafe { input.get_unchecked(start..start + 32) };\n            Fp::from_be_bytes(&slice[..32])\n        };\n        let g1_x = read_fq_at(0).unwrap();\n        let g1_y = read_fq_at(1).unwrap();\n        let g2_x_c1 = read_fq_at(2).unwrap();\n        let g2_x_c0 = read_fq_at(3).unwrap();\n        let g2_y_c1 = read_fq_at(4).unwrap();\n        let g2_y_c0 = read_fq_at(5).unwrap();\n\n        let g1 = AffinePoint::new(g1_x, g1_y);\n        let g2_x = Fp2::new(g2_x_c0, g2_x_c1);\n        let g2_y = Fp2::new(g2_y_c0, g2_y_c1);\n        let g2 = AffinePoint::new(g2_x, g2_y);\n\n        p.push(g1);\n        q.push(g2);\n    }\n    let success = Bn254::pairing_check(&p, &q).is_ok();\n    assert!(success);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-sha256/Cargo.toml",
    "content": "[workspace]\n[package]\nname = \"guest-sha256-stdin\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nsha2 = { version = \"0.10\", default-features = false }\ndigest = { version = \"0.10\", default-features = false }\n\n[profile.release-with-debug]\ninherits = \"release\"\ndebug = true\n"
  },
  {
    "path": "openvm-riscv/guest-sha256/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nopenvm::entry!(main);\n\nuse core::hint::black_box;\n\nuse openvm::io::{read, reveal_u32};\nuse sha2::{Digest, Sha256};\n\npub fn main() {\n    let n: u32 = read();\n    let mut output = black_box([0u8; 32]);\n    for _ in 0..n {\n        output = Sha256::digest(output).into();\n    }\n\n    reveal_u32(output[0] as u32, 0);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-sha256-manual-precompile/Cargo.toml",
    "content": "[package]\nname = \"sha256-example\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-platform = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\nopenvm-sha2 = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\" }\n"
  },
  {
    "path": "openvm-riscv/guest-sha256-manual-precompile/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.sha256]"
  },
  {
    "path": "openvm-riscv/guest-sha256-manual-precompile/src/main.rs",
    "content": "#![no_std]\n#![no_main]\n\nextern crate alloc;\n\nuse core::hint::black_box;\n\nuse openvm::io::{read, reveal_u32};\nuse openvm_sha2::sha256;\n\nopenvm::entry!(main);\n\npub fn main() {\n    let n = read();\n    let mut output = black_box([0u8; 32]);\n    for _ in 0..n {\n        output = sha256(&output);\n    }\n\n    reveal_u32(output[0] as u32, 0);\n}\n"
  },
  {
    "path": "openvm-riscv/guest-u256/Cargo.toml",
    "content": "[package]\nname = \"u256-example\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nruint = \"1.16\"\n"
  },
  {
    "path": "openvm-riscv/guest-u256/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.bigint]"
  },
  {
    "path": "openvm-riscv/guest-u256/src/main.rs",
    "content": "#![allow(clippy::needless_range_loop)]\nuse core::array;\n\nuse ruint::aliases::U256;\n\nopenvm::entry!(main);\n\nconst N: usize = 70;\ntype Matrix = [[U256; N]; N];\n\npub fn get_matrix(val: u32) -> Matrix {\n    array::from_fn(|_| array::from_fn(|_| U256::from(val)))\n}\n\npub fn mult(a: &Matrix, b: &Matrix) -> Matrix {\n    let mut c = get_matrix(0);\n    for i in 0..N {\n        for j in 0..N {\n            for k in 0..N {\n                c[i][j] += a[i][k] * b[k][j];\n            }\n        }\n    }\n    c\n}\n\npub fn get_identity_matrix() -> Matrix {\n    let mut res = get_matrix(0);\n    for i in 0..N {\n        res[i][i] = U256::from(1u32);\n    }\n    res\n}\n\npub fn main() {\n    let a: Matrix = get_identity_matrix();\n    let b: Matrix = get_matrix(28);\n    let c: Matrix = mult(&a, &b);\n    if c != b {\n        panic!(\"Matrix multiplication failed\");\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/guest-u256-manual-precompile/Cargo.toml",
    "content": "[package]\nname = \"u256-example\"\nversion = \"0.0.0\"\nedition = \"2021\"\n\n[workspace]\nmembers = []\n\n[dependencies]\nopenvm = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", features = [\n  \"std\",\n] }\nopenvm-ruint = { git = \"https://github.com/powdr-labs/openvm.git\", tag = \"v1.4.2-powdr-rc.4\", package = \"ruint\" }\n"
  },
  {
    "path": "openvm-riscv/guest-u256-manual-precompile/openvm.toml",
    "content": "[app_vm_config.rv32i]\n[app_vm_config.rv32m]\n[app_vm_config.io]\n[app_vm_config.bigint]"
  },
  {
    "path": "openvm-riscv/guest-u256-manual-precompile/src/main.rs",
    "content": "#![allow(clippy::needless_range_loop)]\nuse core::array;\n\nuse openvm_ruint::aliases::U256;\n\nopenvm::entry!(main);\n\nconst N: usize = 70;\ntype Matrix = [[U256; N]; N];\n\npub fn get_matrix(val: u32) -> Matrix {\n    array::from_fn(|_| array::from_fn(|_| U256::from(val)))\n}\n\npub fn mult(a: &Matrix, b: &Matrix) -> Matrix {\n    let mut c = get_matrix(0);\n    for i in 0..N {\n        for j in 0..N {\n            for k in 0..N {\n                c[i][j] += a[i][k] * b[k][j];\n            }\n        }\n    }\n    c\n}\n\npub fn get_identity_matrix() -> Matrix {\n    let mut res = get_matrix(0);\n    for i in 0..N {\n        res[i][i] = U256::from(1u32);\n    }\n    res\n}\n\npub fn main() {\n    let a: Matrix = get_identity_matrix();\n    let b: Matrix = get_matrix(28);\n    let c: Matrix = mult(&a, &b);\n    if c != b {\n        panic!(\"Matrix multiplication failed\");\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/scripts/basic_metrics.py",
    "content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom metrics_utils import load_metrics_dataframes, is_normal_instruction_air\n\n\ndef get_label(filepath):\n    \"\"\"Extract a label from a metrics file path.\n\n    Use parent directory name if file is metrics.json, otherwise use filename without extension.\n    \"\"\"\n    basename = os.path.basename(filepath)\n    if basename == \"metrics.json\":\n        return os.path.basename(os.path.dirname(filepath))\n    else:\n        return os.path.splitext(basename)[0]\n\ndef extract_metrics(filename):\n    app, leaf, internal = load_metrics_dataframes(filename)\n    metrics = OrderedDict()\n\n    powdr_air = app[app[\"air_name\"].fillna('').str.startswith(\"PowdrAir\")]\n    non_powdr_air = app[~app[\"air_name\"].fillna('').str.startswith(\"PowdrAir\")]\n    \n    # Split non_powdr_air into normal instructions and openvm precompiles\n    is_normal_instruction = non_powdr_air[\"air_name\"].fillna('').apply(is_normal_instruction_air)\n    normal_instruction_air = non_powdr_air[is_normal_instruction]\n    openvm_precompile_air = non_powdr_air[~is_normal_instruction]\n\n    def get_metric(df, metric_name):\n        return pd.to_numeric(df[df[\"metric\"] == metric_name][\"value\"]).sum()\n\n    # Compute total proof times\n    app_proof_time_ms = get_metric(app, \"total_proof_time_ms\")\n    leaf_proof_time_ms = get_metric(leaf, \"total_proof_time_ms\")\n    internal_proof_time_ms = get_metric(internal, \"total_proof_time_ms\")\n    total_proof_time_ms = app_proof_time_ms + leaf_proof_time_ms + internal_proof_time_ms\n\n    app_proof_time_excluding_trace_ms = get_metric(app, \"stark_prove_excluding_trace_time_ms\")\n    leaf_proof_time_excluding_trace_ms = get_metric(leaf, \"stark_prove_excluding_trace_time_ms\")\n    internal_proof_time_excluding_trace_ms = get_metric(internal, \"stark_prove_excluding_trace_time_ms\")\n    total_proof_time_excluding_trace_ms = app_proof_time_excluding_trace_ms + leaf_proof_time_excluding_trace_ms + internal_proof_time_excluding_trace_ms\n\n    # Compute total column counts\n    # Note that this sums the columns over *all* segments.\n    # This metric should roughly correlate with leaf proof time.\n    main_cols = get_metric(app, \"main_cols\")\n    prep_cols = get_metric(app, \"prep_cols\")\n    perm_cols = get_metric(app, \"perm_cols\")\n    app_proof_cols = main_cols + prep_cols + perm_cols\n\n    num_segments = int(pd.to_numeric(app[\"segment\"]).max()) + 1\n\n    metrics[\"filename\"] = filename\n    metrics[\"num_segments\"] = num_segments\n    metrics[\"app_proof_cells\"] = get_metric(app, \"total_cells\")\n    metrics[\"app_proof_cols\"] = app_proof_cols\n    metrics[\"total_proof_time_ms\"] = total_proof_time_ms\n    metrics[\"total_proof_time_excluding_trace_ms\"] = total_proof_time_excluding_trace_ms\n    metrics[\"app_proof_time_ms\"] = app_proof_time_ms\n    metrics[\"app_proof_time_excluding_trace_ms\"] = app_proof_time_excluding_trace_ms\n    metrics[\"app_execute_preflight_time_ms\"] = get_metric(app, \"execute_preflight_time_ms\")\n    metrics[\"app_execute_metered_time_ms\"] = get_metric(app, \"execute_metered_time_ms\")\n    metrics[\"app_trace_gen_time_ms\"] = get_metric(app, \"trace_gen_time_ms\")\n    metrics[\"leaf_proof_time_ms\"] = leaf_proof_time_ms\n    metrics[\"leaf_proof_time_excluding_trace_ms\"] = leaf_proof_time_excluding_trace_ms\n    metrics[\"inner_recursion_proof_time_ms\"] = internal_proof_time_ms\n    metrics[\"inner_recursion_proof_time_excluding_trace_ms\"] = internal_proof_time_excluding_trace_ms\n\n    normal_instruction_cells = get_metric(normal_instruction_air, \"cells\")\n    openvm_precompile_cells = get_metric(openvm_precompile_air, \"cells\")\n    powdr_cells = get_metric(powdr_air, \"cells\")\n    assert(metrics[\"app_proof_cells\"] == powdr_cells + normal_instruction_cells + openvm_precompile_cells)\n\n    metrics[\"normal_instruction_ratio\"] = normal_instruction_cells / metrics[\"app_proof_cells\"]\n    metrics[\"openvm_precompile_ratio\"] = openvm_precompile_cells / metrics[\"app_proof_cells\"]\n    metrics[\"powdr_ratio\"] = powdr_cells / metrics[\"app_proof_cells\"]\n    metrics[\"powdr_rows\"] = get_metric(powdr_air, \"rows\")\n\n    return metrics\n\ndef summary_table(metrics_files, csv):\n    file_metrics = [ extract_metrics(filename) for filename in metrics_files ]\n\n    df = pd.DataFrame(file_metrics)\n    if csv:\n        print(df.to_csv(index=False))\n    else:\n        print(df.to_string(index=False))\n\ndef plot(metrics_files, output):\n    file_metrics = [ extract_metrics(filename) for filename in metrics_files ]\n    df = pd.DataFrame(file_metrics)\n\n    # Compute app \"other\" time\n    df[\"app_other_ms\"] = (\n        df[\"app_proof_time_ms\"]\n        - df[\"app_proof_time_excluding_trace_ms\"]\n        - df[\"app_execute_preflight_time_ms\"]\n        - df[\"app_execute_metered_time_ms\"]\n        - df[\"app_trace_gen_time_ms\"]\n    )\n\n    # Stack components (bottom to top) with colors\n    # App components use shades of blue, others use distinct colors\n    components = [\n        (\"inner_recursion_proof_time_ms\", \"Inner recursion\", \"#9b3e00\"),        \n        (\"leaf_proof_time_ms\", \"Leaf recursion\", \"#d69600\"),                              \n        (\"app_proof_time_excluding_trace_ms\", \"App STARK (excl. trace)\", \"#1f77b4\"),  \n        (\"app_trace_gen_time_ms\", \"App trace gen\", \"#6baed6\"),                        \n        (\"app_execute_preflight_time_ms\", \"App preflight\", \"#9ecae1\"),        \n        (\"app_execute_metered_time_ms\", \"App metered\", \"#c6dbef\"),            \n        (\"app_other_ms\", \"App other\", \"#08519c\"),                                     \n    ]\n\n    x_labels = [get_label(f) for f in df[\"filename\"]]\n\n    import numpy as np\n    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7))\n    plt.subplots_adjust(bottom=0.18)  # Make room for legend at bottom\n\n    # === Left plot: Stacked bars ===\n    bottom = [0.0] * len(df)\n    bars_data = []  # Store bar info for labeling\n    for col, label, color in components:\n        values = [v / 1000 for v in df[col].tolist()]  # Convert ms to seconds\n        bars = ax1.bar(x_labels, values, bottom=bottom, label=label, color=color)\n        bars_data.append((bars, values, bottom.copy(), color))\n        bottom = [b + v for b, v in zip(bottom, values)]\n\n    # Get the total height for threshold calculation\n    max_height = max(bottom)\n    min_label_height = max_height * 0.02\n\n    # Add value labels to each segment\n    for bars, values, bottoms, color in bars_data:\n        for bar, value, bot, top in zip(bars, values, bottoms, bottom):\n            if value < min_label_height:\n                continue\n            center_y = bot + value / 2\n            center_x = bar.get_x() + bar.get_width() / 2\n            text_color = 'black'\n            percentage = value / top * 100 if top > 0 else 0\n            ax1.text(center_x, center_y, f'{value:.1f} ({percentage:.1f}%)', ha='center', va='center',\n                     fontsize=8, color=text_color, fontweight='bold')\n\n    # Add total labels on top of each stack\n    last_bars = bars_data[-1][0]\n    for bar, total in zip(last_bars, bottom):\n        center_x = bar.get_x() + bar.get_width() / 2\n        ax1.text(center_x, total + max_height * 0.01, f'Total: {total:.1f}', ha='center', va='bottom',\n                 fontsize=9, color='black', fontweight='bold')\n\n    ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.2f}'))\n    ax1.yaxis.set_minor_locator(AutoMinorLocator(2))\n    ax1.grid(axis='y', which='major', linestyle='-', alpha=0.4)\n    ax1.grid(axis='y', which='minor', linestyle='--', alpha=0.2)\n    ax1.set_axisbelow(True)\n    ax1.set_ylabel(\"Time (s)\")\n    ax1.set_title(\"Stacked\")\n\n    # === Right plot: Grouped bars ===\n    n_configs = len(x_labels)\n    n_components = len(components)\n    bar_width = 0.8 / n_components\n    x_pos = np.arange(n_configs)\n\n    for i, (col, label, color) in enumerate(components):\n        values = [v / 1000 for v in df[col].tolist()]\n        offset = (i - n_components / 2 + 0.5) * bar_width\n        ax2.bar(x_pos + offset, values, bar_width, label=label, color=color)\n\n    ax2.set_xticks(x_pos)\n    ax2.set_xticklabels(x_labels)\n    ax2.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.2f}'))\n    ax2.yaxis.set_minor_locator(AutoMinorLocator(2))\n    ax2.grid(axis='y', which='major', linestyle='-', alpha=0.4)\n    ax2.grid(axis='y', which='minor', linestyle='--', alpha=0.2)\n    ax2.set_axisbelow(True)\n    ax2.set_ylabel(\"Time (s)\")\n    ax2.set_title(\"By Component\")\n\n    # Shared legend below both plots\n    handles, legend_labels = ax1.get_legend_handles_labels()\n    fig.legend(handles, legend_labels, loc=\"upper center\", bbox_to_anchor=(0.5, 0.02),\n               ncol=len(components), frameon=False, fontsize=9)\n\n    plt.tight_layout()\n    if output:\n        plt.savefig(output, bbox_inches='tight')\n        print(f\"Plot saved to {output}\")\n    else:\n        plt.show()\n\ndef combine(metrics_files):\n    combined = OrderedDict()\n    for filepath in metrics_files:\n        label = get_label(filepath)\n        with open(filepath) as f:\n            combined[label] = json.load(f)\n    print(json.dumps(combined))\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Print basic metrics from a set of metrics JSON files.\")\n    subparsers = parser.add_subparsers(dest=\"command\", required=True)\n\n    summary_parser = subparsers.add_parser(\"summary-table\", help=\"Print a summary table of metrics\")\n    summary_parser.add_argument('metrics_files', nargs='+', help='Paths to the metrics JSON files')\n    summary_parser.add_argument('--csv', action='store_true', help='Output in CSV format')\n\n    plot_parser = subparsers.add_parser(\"plot\", help=\"Plot a stacked bar chart of proof time breakdown\")\n    plot_parser.add_argument('metrics_files', nargs='+', help='Paths to the metrics JSON files')\n    plot_parser.add_argument('--output', '-o', help='Output file path (if not specified, displays interactively)')\n\n    combine_parser = subparsers.add_parser(\"combine\", help=\"Combine metrics JSON files into a single JSON\")\n    combine_parser.add_argument('metrics_files', nargs='+', help='Paths to the metrics JSON files')\n\n    args = parser.parse_args()\n    if args.command == \"summary-table\":\n        summary_table(args.metrics_files, args.csv)\n    elif args.command == \"plot\":\n        plot(args.metrics_files, args.output)\n    elif args.command == \"combine\":\n        combine(args.metrics_files)\n"
  },
  {
    "path": "openvm-riscv/scripts/generate_bench_results_readme.py",
    "content": "from __future__ import annotations\n\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom urllib.parse import quote\n\nBENCH_RESULTS_BLOB_BASE = \"https://github.com/powdr-labs/bench-results/blob/gh-pages\"\nBENCH_RESULTS_TREE_BASE = \"https://github.com/powdr-labs/bench-results/tree/gh-pages\"\nAPC_ANALYZER_BASE = \"https://powdr-labs.github.io/powdr/autoprecompile-analyzer/\"\nMETRICS_VIEWER_BASE = \"https://powdr-labs.github.io/powdr/openvm/metrics-viewer/\"\n\n\ndef github_blob_url(relative_path: Path, run_id: str) -> str:\n    path = Path(\"results\") / run_id / relative_path\n    return f\"{BENCH_RESULTS_BLOB_BASE}/{path.as_posix()}\"\n\n\ndef github_tree_url(run_id: str, subdir: str | None = None) -> str:\n    path = Path(\"results\") / run_id\n    if subdir:\n        path = path / subdir\n    return f\"{BENCH_RESULTS_TREE_BASE}/{path.as_posix()}\"\n\n\ndef viewer_url(viewer_base: str, data_url: str) -> str:\n    return f\"{viewer_base}?data={quote(data_url, safe='')}\"\n\n\ndef find_apc_candidates(experiment_dir: Path) -> Path | None:\n    # All apc_candidates.json files within an experiment should be identical\n    # (with pgo=cell, all APCs are computed regardless of how many are selected),\n    # so we just pick any one deterministically.\n    candidates = sorted(experiment_dir.glob(\"**/apc_candidates.json\"))\n    if not candidates:\n        return None\n\n    return min(\n        candidates,\n        key=lambda path: (len(path.relative_to(experiment_dir).parts), path.as_posix()),\n    )\n\n\ndef generate_readme(results_dir: Path, run_id: str) -> str:\n    experiments: list[dict[str, str]] = []\n\n    for experiment_dir in sorted(path for path in results_dir.iterdir() if path.is_dir()):\n        name = experiment_dir.name\n        metrics_path = experiment_dir / \"combined_metrics.json\"\n        apc_path = find_apc_candidates(experiment_dir)\n\n        entry: dict[str, str] = {\"name\": name}\n\n        if metrics_path.exists():\n            metrics_data_url = github_blob_url(metrics_path.relative_to(results_dir), run_id)\n            entry[\"metrics_url\"] = viewer_url(METRICS_VIEWER_BASE, metrics_data_url)\n\n        if apc_path is not None:\n            apc_data_url = github_blob_url(apc_path.relative_to(results_dir), run_id)\n            entry[\"apc_url\"] = viewer_url(APC_ANALYZER_BASE, apc_data_url)\n\n        entry[\"tree_url\"] = github_tree_url(run_id, name)\n\n        experiments.append(entry)\n\n    # Put reth first if present, then the rest alphabetically.\n    experiments.sort(key=lambda e: (0 if e[\"name\"] == \"reth\" else 1, e[\"name\"]))\n\n    lines = [\n        f\"# Bench results — {run_id}\",\n        \"\",\n    ]\n\n    for exp in experiments:\n        name = exp[\"name\"]\n        links = [f\"📂 [Raw data]({exp['tree_url']})\"]\n        if \"metrics_url\" in exp:\n            links.append(f\"📊 [Metrics Viewer]({exp['metrics_url']})\")\n        if \"apc_url\" in exp:\n            links.append(f\"🔍 [APC Analyzer]({exp['apc_url']})\")\n        lines.append(f\"**{name}**: \" + \" &nbsp;|&nbsp; \".join(links))\n        lines.append(\"\")\n\n    return \"\\n\".join(lines)\n\n\ndef main() -> None:\n    parser = ArgumentParser(description=\"Generate a README for a published bench-results run.\")\n    parser.add_argument(\"results_dir\", type=Path)\n    parser.add_argument(\"run_id\")\n    parser.add_argument(\"--output\", type=Path, default=None)\n    args = parser.parse_args()\n\n    readme = generate_readme(args.results_dir, args.run_id)\n\n    if args.output is None:\n        print(readme, end=\"\")\n    else:\n        args.output.write_text(readme)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "openvm-riscv/scripts/metrics_utils.py",
    "content": "#!/usr/bin/env python3\n\nimport sys\nimport json\nimport re\nimport pandas as pd\n\ndef load_metrics_dataframes(filename):\n    \"\"\"Load metrics JSON file and return app, leaf, and internal dataframes.\n    \n    Each dataframe has a \"metric\" and \"value\" column, along with optional columns\n    like \"air_name\", or \"segment\".\n    \"\"\"\n    with open(filename) as f:\n        metrics_json = json.load(f)\n\n    entries = [\n        dict(c[\"labels\"]) | { \"metric\": c[\"metric\"], \"value\": c[\"value\"] }\n        for c in metrics_json[\"counter\"] + metrics_json[\"gauge\"]\n    ]\n\n    df = pd.DataFrame(entries)\n\n    # \"group\" has different values if coming from reth benchmark or the powdr cli\n    app = df[df[\"group\"].fillna('').str.startswith(\"app_proof\")]\n    if len(app) == 0:\n        app = df[df[\"group\"].fillna('').str.startswith(\"reth\")]\n    if len(app) == 0:\n        print(\"Invalid metrics.json\", file=sys.stderr)\n        exit(1)\n\n    leaf = df[df[\"group\"].fillna('').str.startswith(\"leaf\")]\n    internal = df[df[\"group\"].fillna('').str.startswith(\"internal\")]\n    \n    return app, leaf, internal\n\ndef is_normal_instruction_air(air_name):\n    \"\"\"Check if an AIR name represents a normal RISC-V instruction.\n    \n    Rules:\n    - Must be a VmAirWrapper<Adapter, Core>\n    - If the core chip is FieldExpressionCoreAir, return False\n    - If the core chip has numeric parameters and first one (number of limbs) is not 4, return False\n    - Otherwise return True\n    \"\"\"\n    # Match VmAirWrapper<Adapter, Core> pattern\n    match = re.match(r'^VmAirWrapper<[^,]+,\\s*([^>]+?)(?:<(\\d+)(?:,\\s*\\d+)*>)?\\s*>$', air_name)\n    \n    if not match:\n        return False\n    \n    core_name = match.group(1)\n    num_limbs = match.group(2)\n    \n    if \"FieldExpressionCoreAir\" == core_name:\n        return False\n    if num_limbs and int(num_limbs) != 4:\n        return False\n    \n    return True\n\ndef test_is_normal_instruction_air():\n    # Test cases from the reth benchmark\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32LoadStoreAdapterAir, LoadStoreCoreAir<4>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32BaseAluAdapterAir, BaseAluCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32BaseAluAdapterAir, ShiftCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32BranchAdapterAir, BranchEqualCoreAir<4>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32JalrAdapterAir, Rv32JalrCoreAir>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32BaseAluAdapterAir, LessThanCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32CondRdWriteAdapterAir, Rv32JalLuiCoreAir>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32BranchAdapterAir, BranchLessThanCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32RdWriteAdapterAir, Rv32AuipcCoreAir>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32MultAdapterAir, MultiplicationCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32MultAdapterAir, MulHCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32LoadStoreAdapterAir, LoadSignExtendCoreAir<4, 8>>\")\n    assert is_normal_instruction_air(\"VmAirWrapper<Rv32MultAdapterAir, DivRemCoreAir<4, 8>>\")\n    \n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<1, 2, 2, 32, 32>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<2, 2, 2, 32, 32>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<2, 6, 6, 16, 16>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<2, 1, 1, 32, 32>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<2, 3, 3, 16, 16>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32VecHeapAdapterAir<1, 6, 6, 16, 16>, FieldExpressionCoreAir>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32IsEqualModAdapterAir<2, 1, 32, 32>, ModularIsEqualCoreAir<32, 4, 8>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32HeapAdapterAir<2, 32, 32>, BaseAluCoreAir<32, 8>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32HeapBranchAdapterAir<2, 32>, BranchEqualCoreAir<32>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32HeapAdapterAir<2, 32, 32>, ShiftCoreAir<32, 8>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32HeapAdapterAir<2, 32, 32>, MultiplicationCoreAir<32, 8>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32HeapAdapterAir<2, 32, 32>, LessThanCoreAir<32, 8>>\")\n    assert not is_normal_instruction_air(\"VmAirWrapper<Rv32IsEqualModAdapterAir<2, 3, 16, 48>, ModularIsEqualCoreAir<48, 4, 8>>\")\n    assert not is_normal_instruction_air(\"KeccakVmAir\")\n    assert not is_normal_instruction_air(\"PowdrAir<BabyBearField>\")\n    assert not is_normal_instruction_air(\"Poseidon2PeripheryAir<BabyBearParameters>, 1>\")\n    assert not is_normal_instruction_air(\"MemoryMerkleAir<8>\")\n    assert not is_normal_instruction_air(\"AccessAdapterAir<8>\")\n    assert not is_normal_instruction_air(\"PersistentBoundaryAir<8>\")\n    assert not is_normal_instruction_air(\"Rv32HintStoreAir\")\n    assert not is_normal_instruction_air(\"AccessAdapterAir<16>\")\n    assert not is_normal_instruction_air(\"RangeTupleCheckerAir<2>\")\n    assert not is_normal_instruction_air(\"ProgramAir\")\n    assert not is_normal_instruction_air(\"AccessAdapterAir<32>\")\n    assert not is_normal_instruction_air(\"AccessAdapterAir<2>\")\n    assert not is_normal_instruction_air(\"AccessAdapterAir<4>\")\n    assert not is_normal_instruction_air(\"VariableRangeCheckerAir\")\n    assert not is_normal_instruction_air(\"BitwiseOperationLookupAir<8>\")\n    assert not is_normal_instruction_air(\"PhantomAir\")\n    assert not is_normal_instruction_air(\"VmConnectorAir\")\n"
  },
  {
    "path": "openvm-riscv/scripts/plot_trace_cells.py",
    "content": "#!/usr/bin/env python3\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport argparse\nfrom metrics_utils import load_metrics_dataframes\n\ndef autopct_with_billions(pct, total):\n    val = pct * total / 100\n    return f'{pct:.1f}%\\n{val/1e9:.2f}B'\n\ndef compute_cells_by_air(metrics_path):\n    # Load only the app dataframe\n    app, _, _ = load_metrics_dataframes(metrics_path)\n    \n    # Get total cells from app dataframe\n    total_cells_df = app[app[\"metric\"] == \"total_cells\"]\n    total_cells = pd.to_numeric(total_cells_df[\"value\"]).sum()\n    print(f\"Total cells: {total_cells/1e9:.2f}B\")\n    \n    # Get cell entries from app dataframe\n    cells_df = app[app[\"metric\"] == \"cells\"].copy()\n    cells_df[\"segment\"] = pd.to_numeric(cells_df[\"segment\"].fillna(0))\n    cells_df[\"cells\"] = pd.to_numeric(cells_df[\"value\"])\n    \n    # Create dataframe with required columns\n    df = cells_df[[\"segment\", \"air_name\", \"cells\"]]\n\n    # Group and threshold\n    cells_by_air = df.groupby('air_name')['cells'].sum().sort_values(ascending=False)\n\n    # Sanity check: #cells should match total_cells\n    assert total_cells == cells_by_air.sum()\n\n    return cells_by_air\n\ndef main(metrics_path, output_path=None, subtitle=None):\n    cells_by_air = compute_cells_by_air(metrics_path)\n    print(\"Cells by AIR:\")\n    print(cells_by_air)\n\n    threshold_ratio = 0.015\n    threshold = threshold_ratio * cells_by_air.sum()\n    large = cells_by_air[cells_by_air >= threshold]\n    small = cells_by_air[cells_by_air < threshold]\n\n    if not small.empty:\n        large['Other'] = small.sum()\n\n    _, ax = plt.subplots(figsize=(7.5, 7.5))\n    plot_title = \"Trace cells by AIR\" if subtitle is None else f\"Trace cells by AIR ({subtitle})\"\n    ax.set_title(plot_title)\n    total = large.sum()\n    colors = plt.get_cmap(\"tab20\")(range(len(large)))\n    def autopct_filtered(pct):\n        return autopct_with_billions(pct, total) if pct > 5 else ''\n\n    wedges, _, _ = ax.pie(\n        large,\n        autopct=autopct_filtered,\n        startangle=90,\n        colors=colors\n    )\n    percentages = 100 * large / total\n    legend_labels = [f\"{percent:.1f}% - {label}\" for label, percent in zip(large.index, percentages)]\n    ax.legend(\n        wedges,\n        legend_labels,\n        title=\"AIRs\",\n        loc=\"upper center\",\n        bbox_to_anchor=(0.5, 0),\n        ncol=1,\n        fontsize='small',\n        title_fontsize='medium',\n        frameon=False\n    )\n    plt.ylabel('')\n    plt.tight_layout(pad=5.0)\n    if output_path:\n        print(f\"Saving plot to {output_path}\")\n        plt.savefig(output_path, bbox_inches=\"tight\")\n    else:\n        plt.show()\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Visualize AIR cell metrics from a JSON file.\")\n    parser.add_argument(\"metrics_path\", help=\"Path to the metrics.json file\")\n    parser.add_argument(\"-o\", \"--output\", help=\"Optional path to save the output image\")\n    parser.add_argument(\"-s\", \"--subtitle\", help=\"Optional subtitle for the plot\")\n    args = parser.parse_args()\n\n    main(args.metrics_path, args.output, args.subtitle)"
  },
  {
    "path": "openvm-riscv/scripts/readme.md",
    "content": "### Scripts\n\nSet up (from the project root):\n\n```bash\npython3 -m venv .venv\nsource .venv/bin/activate\npip install -r openvm-riscv/scripts/requirements.txt\n```"
  },
  {
    "path": "openvm-riscv/scripts/requirements.txt",
    "content": "pandas\nmatplotlib\npsrecord\npytest\n"
  },
  {
    "path": "openvm-riscv/scripts/run_guest_benches.sh",
    "content": "#!/bin/bash\n\n# Script to collect some numbers from our OpenVM guest examples.\n# Mostly for CI usage, but can be easily modified for manual tests.\n\n# NOTE: The script expects the python environment to be set up with the required\n# dependencies. Should be run from the project root, will create a `results`\n# directory.\n\nset -e\n\nSCRIPT_PATH=$(realpath \"${BASH_SOURCE[0]}\")\nSCRIPTS_DIR=$(dirname \"$SCRIPT_PATH\")\n\nrun_bench() {\n    guest=\"$1\"\n    input=\"$2\"\n    apcs=\"$3\"\n    run_name=\"$4\"\n\n    echo \"\"\n    echo \"==== ${run_name} ====\"\n    echo \"\"\n\n    mkdir -p \"${run_name}\"\n\n    psrecord --include-children --interval 1 \\\n        --log \"${run_name}\"/psrecord.csv \\\n        --log-format csv \\\n        --plot \"${run_name}\"/psrecord.png \\\n        \"cargo run --bin powdr_openvm_riscv -r --features metrics prove \\\"$guest\\\" --input \\\"$input\\\" --autoprecompiles \\\"$apcs\\\" --metrics \\\"${run_name}/metrics.json\\\" --recursion --apc-candidates-dir \\\"${run_name}\\\"\"\n\n    python3 \"$SCRIPTS_DIR\"/plot_trace_cells.py -o \"${run_name}\"/trace_cells.png \"${run_name}\"/metrics.json > \"${run_name}\"/trace_cells.txt\n\n    # apc_candidates.json is only available when apcs > 0\n    if [ \"${apcs:-0}\" -ne 0 ]; then\n        python3 \"$SCRIPTS_DIR\"/../../autoprecompiles/scripts/plot_effectiveness.py \"${run_name}\"/apc_candidates.json --output \"${run_name}\"/effectiveness.png\n    fi\n\n    # Clean up some files that we don't want to to push.\n    rm -f \"${run_name}\"/apc_candidate_*\n}\n\n# TODO: Some benchmarks are currently disabled to keep the nightly run below 6h.\n\n### Keccak\ndir=\"results/keccak\"\ninput=\"10000\"\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-keccak-manual-precompile \"$input\" 0 manual\nrun_bench guest-keccak \"$input\" 0 apc000\nrun_bench guest-keccak \"$input\" 3 apc003\n# run_bench guest-keccak \"$input\" 10 apc010  # Save ~3mins\nrun_bench guest-keccak \"$input\" 30 apc030\n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### SHA256\ndir=\"results/sha256\"\ninput=\"30000\"\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-sha256-manual-precompile \"$input\" 0 manual\nrun_bench guest-sha256 \"$input\" 0 apc000\nrun_bench guest-sha256 \"$input\" 3 apc003\n# run_bench guest-sha256 \"$input\" 10 apc010  # Save ~5mins\nrun_bench guest-sha256 \"$input\" 30 apc030\n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### Pairing\ndir=\"results/pairing\"\ninput=\"0\" # No input\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-pairing-manual-precompile \"$input\" 0 manual\nrun_bench guest-pairing \"$input\" 0 apc000\nrun_bench guest-pairing \"$input\" 3 apc003\n# run_bench guest-pairing \"$input\" 10 apc010  # Save ~4mins\nrun_bench guest-pairing \"$input\" 30 apc030\n# run_bench guest-pairing \"$input\" 100 apc100  # Save ~7mins \n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### U256\ndir=\"results/u256\"\ninput=\"0\" # No input\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-u256-manual-precompile \"$input\" 0 manual\nrun_bench guest-u256 \"$input\" 0 apc000\nrun_bench guest-u256 \"$input\" 3 apc003\n# run_bench guest-u256 \"$input\" 10 apc010  # Save ~4mins\nrun_bench guest-u256 \"$input\" 30 apc030\n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### Matmul\ndir=\"results/matmul\"\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-matmul 0 0 apc000\nrun_bench guest-matmul 0 3 apc003\nrun_bench guest-matmul 0 10 apc010\nrun_bench guest-matmul 0 30 apc030\n\npython3 \"$SCRIPTS_DIR\"/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 \"$SCRIPTS_DIR\"/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 \"$SCRIPTS_DIR\"/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### ECC\ndir=\"results/ecc\"\ninput=\"50\"\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-ecc-manual $input 0 manual\nrun_bench guest-ecc-projective $input 0 projective-apc000\nrun_bench guest-ecc-projective $input 3 projective-apc003\n# run_bench guest-ecc-projective $input 10 projective-apc010  # Save ~12mins\nrun_bench guest-ecc-projective $input 30 projective-apc030\n# run_bench guest-ecc-projective $input 100 projective-apc100  # Save ~12mins\nrun_bench guest-ecc-powdr-affine-hint $input 0 affine-hint-apc000\nrun_bench guest-ecc-powdr-affine-hint $input 3 affine-hint-apc003\n# run_bench guest-ecc-powdr-affine-hint $input 10 affine-hint-apc010  # Save ~7mins\nrun_bench guest-ecc-powdr-affine-hint $input 30 affine-hint-apc030\n# run_bench guest-ecc-powdr-affine-hint $input 100 affine-hint-apc100  # Save ~7mins\n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd\n\n### ECRECOVER\ndir=\"results/ecrecover\"\ninput=\"20\"\n\nmkdir -p \"$dir\"\npushd \"$dir\"\n\nrun_bench guest-ecrecover-manual $input 0 manual\nrun_bench guest-ecrecover $input 0 apc000\nrun_bench guest-ecrecover $input 3 apc003\n# run_bench guest-ecrecover $input 10 apc010  # Save ~6mins\nrun_bench guest-ecrecover $input 30 apc030\n# run_bench guest-ecrecover $input 100 apc100  # Save ~6mins\n\npython3 $SCRIPTS_DIR/basic_metrics.py summary-table --csv **/metrics.json > basic_metrics.csv\npython3 $SCRIPTS_DIR/basic_metrics.py plot **/metrics.json -o proof_time_breakdown.png\npython3 $SCRIPTS_DIR/basic_metrics.py combine **/metrics.json > combined_metrics.json\npopd"
  },
  {
    "path": "openvm-riscv/src/isa/instruction_formatter.rs",
    "content": "use super::opcode::*;\nuse openvm_instructions::{instruction::Instruction, VmOpcode};\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse powdr_openvm::format_fe;\n\npub fn openvm_instruction_formatter<F: PrimeField32>(instruction: &Instruction<F>) -> String {\n    let Instruction {\n        opcode,\n        a,\n        b,\n        c,\n        d,\n        e,\n        f,\n        g,\n    } = instruction;\n    let opcode_number = opcode.as_usize();\n    let opcode_name = openvm_opcode_formatter(opcode);\n\n    match opcode_number {\n        // Alu instructions, see:\n        // https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/circuit/src/adapters/alu.rs#L197-L201\n        512..=521 => {\n            assert_eq!(d, &F::ONE);\n            assert_eq!(f, &F::ZERO);\n            assert_eq!(g, &F::ZERO);\n\n            format!(\"{opcode_name} rd_ptr = {a}, rs1_ptr = {b}, rs2 = {c}, rs2_as = {e}\")\n        }\n\n        // Load/Store instructions, see:\n        // https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/circuit/src/adapters/loadstore.rs#L340-L346\n        528..=535 => {\n            assert_eq!(d, &F::ONE);\n\n            format!(\"{opcode_name} rd_rs2_ptr = {a}, rs1_ptr = {b}, imm = {c}, mem_as = {e}, needs_write = {f}, imm_sign = {g}\")\n        }\n        OPCODE_BLT | OPCODE_BLTU | OPCODE_BGE | OPCODE_BGEU | OPCODE_BEQ | OPCODE_BNE => {\n            let c = format_fe(*c);\n            format!(\"{opcode_name} {a} {b} {c} {d} {e}\")\n        }\n\n        // All other opcodes in the list\n        x if ALL_OPCODES.contains(&x) => format!(\"{opcode_name} {a} {b} {c} {d} {e}\"),\n\n        // Opcodes not in the list\n        _ => format!(\"{opcode_name} {a} {b} {c} {d} {e} {f} {g}\"),\n    }\n}\n\npub fn openvm_opcode_formatter(opcode: &VmOpcode) -> String {\n    // Opcodes taken from:\n    // https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/transpiler/src/instructions.rs\n    match opcode.as_usize() {\n        // Rv32BaseAluChip opcodes\n        OPCODE_ADD => \"ADD\".to_string(),\n        OPCODE_SUB => \"SUB\".to_string(),\n        OPCODE_XOR => \"XOR\".to_string(),\n        OPCODE_OR => \"OR\".to_string(),\n        OPCODE_AND => \"AND\".to_string(),\n        // Rv32ShiftChip opcodes\n        OPCODE_SLL => \"SLL\".to_string(),\n        OPCODE_SRL => \"SRL\".to_string(),\n        OPCODE_SRA => \"SRA\".to_string(),\n        // Rv32LessThanChip opcodes\n        OPCODE_SLT => \"SLT\".to_string(),\n        OPCODE_SLTU => \"SLTU\".to_string(),\n        // Load/Store opcodes\n        OPCODE_LOADW => \"LOADW\".to_string(),\n        OPCODE_LOADBU => \"LOADBU\".to_string(),\n        OPCODE_LOADHU => \"LOADHU\".to_string(),\n        OPCODE_STOREW => \"STOREW\".to_string(),\n        OPCODE_STOREH => \"STOREH\".to_string(),\n        OPCODE_STOREB => \"STOREB\".to_string(),\n        OPCODE_LOADB => \"LOADB\".to_string(),\n        OPCODE_LOADH => \"LOADH\".to_string(),\n        // Other opcodes\n        OPCODE_BEQ => \"BEQ\".to_string(),\n        OPCODE_BNE => \"BNE\".to_string(),\n        OPCODE_BLT => \"BLT\".to_string(),\n        OPCODE_BLTU => \"BLTU\".to_string(),\n        OPCODE_BGE => \"BGE\".to_string(),\n        OPCODE_BGEU => \"BGEU\".to_string(),\n        OPCODE_JAL => \"JAL\".to_string(),\n        OPCODE_LUI => \"LUI\".to_string(),\n        OPCODE_JALR => \"JALR\".to_string(),\n        OPCODE_AUIPC => \"AUIPC\".to_string(),\n        OPCODE_MUL => \"MUL\".to_string(),\n        OPCODE_MULH => \"MULH\".to_string(),\n        OPCODE_MULHSU => \"MULHSU\".to_string(),\n        OPCODE_MULHU => \"MULHU\".to_string(),\n        OPCODE_DIV => \"DIV\".to_string(),\n        OPCODE_DIVU => \"DIVU\".to_string(),\n        OPCODE_REM => \"REM\".to_string(),\n        OPCODE_REMU => \"REMU\".to_string(),\n        OPCODE_HINT_STOREW => \"HINT_STOREW\".to_string(),\n        OPCODE_HINT_BUFFER => \"HINT_BUFFER\".to_string(),\n        // Bigint opcodes\n        BIGINT_OPCODE_BEQ => \"BIGINT_BEQ\".to_string(),\n        BIGINT_OPCODE_BNE => \"BIGINT_BNE\".to_string(),\n        BIGINT_OPCODE_BLT => \"BIGINT_BLT\".to_string(),\n        BIGINT_OPCODE_BLTU => \"BIGINT_BLTU\".to_string(),\n        BIGINT_OPCODE_BGE => \"BIGINT_BGE\".to_string(),\n        BIGINT_OPCODE_BGEU => \"BIGINT_BGEU\".to_string(),\n        other => format!(\"<opcode {other}>\"),\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/mod.rs",
    "content": "use std::collections::{BTreeSet, HashSet};\n\nuse openvm_circuit::arch::{AirInventory, ChipInventoryError, VmBuilder};\nuse openvm_instructions::{instruction::Instruction, program::DEFAULT_PC_STEP, VmOpcode};\nuse openvm_stark_backend::p3_field::PrimeField32;\nuse openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;\n#[cfg(feature = \"cuda\")]\nuse powdr_openvm::{\n    isa::OriginalGpuChipComplex, powdr_extension::trace_generator::SharedPeripheryChipsGpu,\n};\nuse powdr_openvm::{\n    isa::{OpenVmISA, OriginalCpuChipComplex},\n    powdr_extension::trace_generator::cpu::SharedPeripheryChipsCpu,\n    program::OriginalCompiledProgram,\n    BabyBearSC, SpecializedExecutor,\n};\nuse powdr_riscv_elf::{debug_info::SymbolTable, ElfProgram};\nuse serde::{Deserialize, Serialize};\n\n#[cfg(feature = \"cuda\")]\nuse crate::ExtendedVmConfigGpuBuilder;\nuse crate::{\n    isa::{\n        opcode::{branch_opcodes_bigint_set, branch_opcodes_set, instruction_allowlist},\n        trace_generator::{create_dummy_airs, create_dummy_chip_complex_cpu},\n    },\n    ExtendedVmConfig, ExtendedVmConfigCpuBuilder, ExtendedVmConfigExecutor,\n};\n\npub mod instruction_formatter;\npub mod opcode;\npub mod symbolic_instruction_builder;\n/// The trace generator for the powdr instructions\npub mod trace_generator;\n\n// Clone should not be required\n#[derive(Clone, Default)]\npub struct RiscvISA;\n\n/// A type to represent register addresses during execution\n#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct OpenVmRegisterAddress(u8);\n\n// This seems trivial but it's tricky to put into powdr-openvm  because of some From implementation issues.\nimpl<F: PrimeField32> From<ExtendedVmConfigExecutor<F>> for SpecializedExecutor<F, RiscvISA> {\n    fn from(value: ExtendedVmConfigExecutor<F>) -> Self {\n        Self::OriginalExecutor(value)\n    }\n}\n\nimpl OpenVmISA for RiscvISA {\n    type Executor<F: PrimeField32> = ExtendedVmConfigExecutor<F>;\n    type Config = ExtendedVmConfig;\n    type CpuBuilder = ExtendedVmConfigCpuBuilder;\n    #[cfg(feature = \"cuda\")]\n    type GpuBuilder = ExtendedVmConfigGpuBuilder;\n\n    fn branching_opcodes() -> HashSet<VmOpcode> {\n        branch_opcodes_set()\n    }\n\n    fn format<F: PrimeField32>(instruction: &Instruction<F>) -> String {\n        instruction_formatter::openvm_instruction_formatter(instruction)\n    }\n\n    fn allowed_opcodes() -> HashSet<VmOpcode> {\n        instruction_allowlist()\n    }\n\n    fn create_original_chip_complex(\n        config: &Self::Config,\n        airs: AirInventory<BabyBearSC>,\n    ) -> Result<OriginalCpuChipComplex, ChipInventoryError> {\n        <ExtendedVmConfigCpuBuilder as VmBuilder<BabyBearPoseidon2Engine>>::create_chip_complex(\n            &ExtendedVmConfigCpuBuilder,\n            config,\n            airs,\n        )\n    }\n\n    type LinkedProgram<'a> = ElfProgram;\n\n    fn get_symbol_table<'a>(program: &Self::LinkedProgram<'a>) -> SymbolTable {\n        let debug_info = program.debug_info();\n        let labels = SymbolTable::from_table(\n            debug_info\n                .symbols\n                .table()\n                .iter()\n                .map(|(addr, names)| {\n                    (\n                        *addr,\n                        names\n                            .iter()\n                            .map(|name| rustc_demangle::demangle(name).to_string())\n                            .collect(),\n                    )\n                })\n                .collect(),\n        );\n\n        labels\n    }\n\n    fn get_jump_destinations(program: &OriginalCompiledProgram<Self>) -> BTreeSet<u64> {\n        let labels = program.linked_program.text_labels();\n\n        let jump_dest = add_extra_targets(program, labels.clone(), DEFAULT_PC_STEP);\n\n        jump_dest.into_iter().map(Into::into).collect()\n    }\n\n    fn create_dummy_airs<E: openvm_circuit::arch::VmCircuitExtension<powdr_openvm::BabyBearSC>>(\n        config: &Self::Config,\n        shared_chips: E,\n    ) -> Result<AirInventory<powdr_openvm::BabyBearSC>, openvm_circuit::arch::AirInventoryError>\n    {\n        create_dummy_airs(config, shared_chips)\n    }\n\n    fn create_dummy_chip_complex_cpu(\n        config: &Self::Config,\n        circuit: AirInventory<powdr_openvm::BabyBearSC>,\n        shared_chips: SharedPeripheryChipsCpu<Self>,\n    ) -> Result<OriginalCpuChipComplex, ChipInventoryError> {\n        create_dummy_chip_complex_cpu(config, circuit, shared_chips)\n    }\n\n    #[cfg(feature = \"cuda\")]\n    fn create_dummy_chip_complex_gpu(\n        config: &Self::Config,\n        circuit: AirInventory<powdr_openvm::BabyBearSC>,\n        shared_chips: SharedPeripheryChipsGpu<Self>,\n    ) -> Result<OriginalGpuChipComplex, ChipInventoryError> {\n        use crate::isa::trace_generator::create_dummy_chip_complex_gpu;\n\n        create_dummy_chip_complex_gpu(config, circuit, shared_chips)\n    }\n}\n\n/// Besides the base RISC-V branching instructions, the bigint extension adds two more branching\n/// instruction classes over BranchEqual and BranchLessThan.\n/// Those instructions have the form <INSTR rs0 rs1 target_offset ...>, where target_offset is the\n/// relative jump we're interested in.\n/// This means that for a given program address A containing the instruction above,\n/// we add A + target_offset as a target as well.\nfn add_extra_targets(\n    compiled_program: &OriginalCompiledProgram<RiscvISA>,\n    mut labels: BTreeSet<u32>,\n    pc_step: u32,\n) -> BTreeSet<u32> {\n    let branch_opcodes_bigint = branch_opcodes_bigint_set();\n    let program = &compiled_program.exe.program;\n    let new_labels = program\n        .instructions_and_debug_infos\n        .iter()\n        .enumerate()\n        .filter_map(|(i, instr)| {\n            let instr = instr.as_ref().unwrap().0.clone();\n            let adjusted_pc = program.pc_base + (i as u32) * pc_step;\n            let op = instr.opcode;\n            branch_opcodes_bigint\n                .contains(&op)\n                .then_some(adjusted_pc + instr.c.as_canonical_u32())\n        });\n    labels.extend(new_labels);\n\n    labels\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/opcode.rs",
    "content": "use std::collections::HashSet;\n\nuse openvm_bigint_transpiler::{Rv32BranchEqual256Opcode, Rv32BranchLessThan256Opcode};\nuse openvm_instructions::{LocalOpcode, VmOpcode};\nuse openvm_rv32im_transpiler::*;\n\n/// Defines each opcode as a `pub const usize` and also generates\n/// a `pub const ALL_OPCODES: &[usize]` containing all of them.\nmacro_rules! define_opcodes {\n    (\n        // Non-bigint opcodes\n        // e.g. OPCODE_BEQ = BranchEqualOpcode::BEQ as usize + BranchEqualOpcode::CLASS_OFFSET\n        $( $non_big_int_name:ident = $ty:ident :: $variant:ident, )*\n        ; // Intentional pattern split delimiter\n        // Bigint opcodes\n        // e.g. BIGINT_OPCODE_BEQ = BranchEqualOpcode::BEQ as usize + Rv32BranchEqual256Opcode::CLASS_OFFSET\n        $( $bigint_name:ident = $big_ty:ident ; $small_ty:ident :: $small_variant:ident, )*\n    ) => {\n        $(\n            pub const $non_big_int_name: usize = (\n                $ty::$variant as usize\n                + < $ty as LocalOpcode >::CLASS_OFFSET\n            ) as usize;\n        )*\n\n        $(\n            pub const $bigint_name: usize = (\n                $small_ty::$small_variant as usize\n                + < $big_ty as LocalOpcode >::CLASS_OFFSET\n            ) as usize;\n        )*\n\n        /// All opcodes in one slice.\n        pub const ALL_OPCODES: &[usize] = &[\n            $( $non_big_int_name, )*\n            $( $bigint_name, )*\n        ];\n\n        /// All opcodes except bigint in one slice.\n        pub const ALL_OPCODES_EXCEPT_BIGINT: &[usize] = &[\n            $( $non_big_int_name, )*\n        ];\n\n    }\n}\n\ndefine_opcodes!(\n    // Rv32BaseAluChip\n    OPCODE_ADD = BaseAluOpcode::ADD,\n    OPCODE_SUB = BaseAluOpcode::SUB,\n    OPCODE_XOR = BaseAluOpcode::XOR,\n    OPCODE_OR = BaseAluOpcode::OR,\n    OPCODE_AND = BaseAluOpcode::AND,\n    // Rv32ShiftChip opcodes\n    OPCODE_SLL = ShiftOpcode::SLL,\n    OPCODE_SRL = ShiftOpcode::SRL,\n    OPCODE_SRA = ShiftOpcode::SRA,\n    // Rv32LessThanChip opcodes\n    OPCODE_SLT = LessThanOpcode::SLT,\n    OPCODE_SLTU = LessThanOpcode::SLTU,\n    // Load/Store opcodes\n    OPCODE_LOADW = Rv32LoadStoreOpcode::LOADW,\n    OPCODE_LOADBU = Rv32LoadStoreOpcode::LOADBU,\n    OPCODE_LOADHU = Rv32LoadStoreOpcode::LOADHU,\n    OPCODE_STOREW = Rv32LoadStoreOpcode::STOREW,\n    OPCODE_STOREH = Rv32LoadStoreOpcode::STOREH,\n    OPCODE_STOREB = Rv32LoadStoreOpcode::STOREB,\n    OPCODE_LOADB = Rv32LoadStoreOpcode::LOADB,\n    OPCODE_LOADH = Rv32LoadStoreOpcode::LOADH,\n    // Other opcodes\n    OPCODE_BEQ = BranchEqualOpcode::BEQ,\n    OPCODE_BNE = BranchEqualOpcode::BNE,\n    OPCODE_BLT = BranchLessThanOpcode::BLT,\n    OPCODE_BLTU = BranchLessThanOpcode::BLTU,\n    OPCODE_BGE = BranchLessThanOpcode::BGE,\n    OPCODE_BGEU = BranchLessThanOpcode::BGEU,\n    OPCODE_JAL = Rv32JalLuiOpcode::JAL,\n    OPCODE_LUI = Rv32JalLuiOpcode::LUI,\n    OPCODE_JALR = Rv32JalrOpcode::JALR,\n    OPCODE_AUIPC = Rv32AuipcOpcode::AUIPC,\n    OPCODE_MUL = MulOpcode::MUL,\n    OPCODE_MULH = MulHOpcode::MULH,\n    OPCODE_MULHSU = MulHOpcode::MULHSU,\n    OPCODE_MULHU = MulHOpcode::MULHU,\n    OPCODE_DIV = DivRemOpcode::DIV,\n    OPCODE_DIVU = DivRemOpcode::DIVU,\n    OPCODE_REM = DivRemOpcode::REM,\n    OPCODE_REMU = DivRemOpcode::REMU,\n    OPCODE_HINT_STOREW = Rv32HintStoreOpcode::HINT_STOREW,\n    OPCODE_HINT_BUFFER = Rv32HintStoreOpcode::HINT_BUFFER,\n    ; // Intentional pattern split delimiter\n    // Bigint opcodes\n    BIGINT_OPCODE_BEQ = Rv32BranchEqual256Opcode; BranchEqualOpcode::BEQ,\n    BIGINT_OPCODE_BNE = Rv32BranchEqual256Opcode; BranchEqualOpcode::BNE,\n    BIGINT_OPCODE_BLT = Rv32BranchLessThan256Opcode; BranchLessThanOpcode::BLT,\n    BIGINT_OPCODE_BLTU = Rv32BranchLessThan256Opcode; BranchLessThanOpcode::BLTU,\n    BIGINT_OPCODE_BGE = Rv32BranchLessThan256Opcode; BranchLessThanOpcode::BGE,\n    BIGINT_OPCODE_BGEU = Rv32BranchLessThan256Opcode; BranchLessThanOpcode::BGEU,\n);\n\npub const BRANCH_OPCODES_BIGINT: &[usize] = &[\n    BIGINT_OPCODE_BEQ,\n    BIGINT_OPCODE_BNE,\n    BIGINT_OPCODE_BLT,\n    BIGINT_OPCODE_BLTU,\n    BIGINT_OPCODE_BGE,\n    BIGINT_OPCODE_BGEU,\n];\n\npub const BRANCH_OPCODES: &[usize] = &[\n    OPCODE_BEQ,\n    OPCODE_BNE,\n    OPCODE_BLT,\n    OPCODE_BLTU,\n    OPCODE_BGE,\n    OPCODE_BGEU,\n    OPCODE_JAL,\n    OPCODE_JALR,\n];\n\n// Allowed opcodes = ALL_OPCODES_EXCEPT_BIGINT - HINT_STOREW - HINT_BUFFER\npub fn instruction_allowlist() -> HashSet<VmOpcode> {\n    // Filter out HINT_STOREW and HINT_BUFFER, which contain next references that don't work with apc\n    ALL_OPCODES_EXCEPT_BIGINT\n        .iter()\n        .copied()\n        .filter(|&op| op != OPCODE_HINT_BUFFER && op != OPCODE_HINT_STOREW)\n        .map(VmOpcode::from_usize)\n        .collect()\n}\n\npub fn branch_opcodes_bigint_set() -> HashSet<VmOpcode> {\n    let mut set = HashSet::new();\n    set.extend(\n        BRANCH_OPCODES_BIGINT\n            .iter()\n            .cloned()\n            .map(VmOpcode::from_usize),\n    );\n    set\n}\n\npub fn branch_opcodes_set() -> HashSet<VmOpcode> {\n    let mut set = branch_opcodes_bigint_set();\n    set.extend(BRANCH_OPCODES.iter().cloned().map(VmOpcode::from_usize));\n    set\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_all_opcodes() {\n        let expected = &[\n            512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, 529, 530, 531, 532, 533, 534,\n            535, 544, 545, 549, 550, 551, 552, 560, 561, 565, 576, 592, 593, 594, 595, 596, 597,\n            598, 599, 608, 609, 1056, 1057, 1061, 1062, 1063, 1064,\n        ];\n        assert_eq!(ALL_OPCODES.len(), 44); // 38 non-bigint + 6 bigint\n        assert_eq!(ALL_OPCODES, expected);\n    }\n\n    #[test]\n    fn test_all_opcodes_except_bigint() {\n        let expected = &[\n            512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, 529, 530, 531, 532, 533, 534,\n            535, 544, 545, 549, 550, 551, 552, 560, 561, 565, 576, 592, 593, 594, 595, 596, 597,\n            598, 599, 608, 609,\n        ];\n        assert_eq!(ALL_OPCODES_EXCEPT_BIGINT.len(), 38); // 38 non-bigint\n        assert_eq!(ALL_OPCODES_EXCEPT_BIGINT, expected);\n    }\n\n    #[test]\n    fn test_instruction_allowlist() {\n        let allowlist = instruction_allowlist();\n        let expected = [\n            512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, 529, 530, 531, 532, 533, 534,\n            535, 544, 545, 549, 550, 551, 552, 560, 561, 565, 576, 592, 593, 594, 595, 596, 597,\n            598, 599,\n        ]\n        .into_iter()\n        .map(VmOpcode::from_usize)\n        .collect();\n        assert_eq!(allowlist.len(), ALL_OPCODES_EXCEPT_BIGINT.len() - 2); // Excluding HINT_STOREW and HINT_BUFFER\n        assert_eq!(allowlist, expected);\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/symbolic_instruction_builder.rs",
    "content": "//! Builds Instruction to create input program for testing powdr_autoprecompile::build\nuse super::opcode::*;\nuse openvm_instructions::{instruction::Instruction, VmOpcode};\nuse openvm_stark_backend::p3_field::PrimeField32;\n\n// Generic instructions (5 args, fixed f=0, g=0)\nmacro_rules! build_instr5 {\n    (\n        $(\n            $(#[$doc:meta])*\n            ($name:ident, $code:expr)\n        ),+ $(,)?\n    ) => {\n        $(\n            $(#[$doc])*\n            pub fn $name<T: PrimeField32>(\n                a: u32,\n                b: u32,\n                c: u32,\n                d: u32,\n                e: u32,\n            ) -> Instruction<T> {\n                Instruction {\n                    opcode: VmOpcode::from_usize($code as usize),\n                    a: T::from_canonical_u32(a),\n                    b: T::from_canonical_u32(b),\n                    c: T::from_canonical_u32(c),\n                    d: T::from_canonical_u32(d),\n                    e: T::from_canonical_u32(e),\n                    f: T::ZERO,\n                    g: T::ZERO,\n                }\n            }\n        )+\n    };\n}\n\n// ALU instructions (4 args, fixed d=1, f=0, g=0)\nmacro_rules! alu_ops {\n    (\n        $(\n            $(#[$doc:meta])*\n            ($name:ident, $code:expr)\n        ),+ $(,)?\n    ) => {\n        $(\n            $(#[$doc])*\n            pub fn $name<T: PrimeField32>(\n                rd_ptr: u32,\n                rs1_ptr: u32,\n                rs2: u32,\n                rs2_as: u32,\n            ) -> Instruction<T> {\n                Instruction {\n                    opcode: VmOpcode::from_usize($code as usize),\n                    a: T::from_canonical_u32(rd_ptr),\n                    b: T::from_canonical_u32(rs1_ptr),\n                    c: T::from_canonical_u32(rs2),\n                    d: T::ONE,\n                    e: T::from_canonical_u32(rs2_as),\n                    f: T::ZERO,\n                    g: T::ZERO,\n                }\n            }\n        )+\n    };\n}\n\n// Load/Store and Load/Store Sign Extend instructions (6 args, fixed d=1)\nmacro_rules! ls_ops {\n    (\n        $(\n            $(#[$doc:meta])*\n            ($name:ident, $code:expr)\n        ),+ $(,)?\n    ) => {\n        $(\n            $(#[$doc])*\n            pub fn $name<T: PrimeField32>(\n                rd_rs2_ptr: u32,\n                rs1_ptr: u32,\n                imm: u32,\n                mem_as: u32,\n                needs_write: u32,\n                imm_sign: u32,\n            ) -> Instruction<T> {\n                Instruction {\n                    opcode: VmOpcode::from_usize($code as usize),\n                    a: T::from_canonical_u32(rd_rs2_ptr),\n                    b: T::from_canonical_u32(rs1_ptr),\n                    c: T::from_canonical_u32(imm),\n                    d: T::ONE,\n                    e: T::from_canonical_u32(mem_as),\n                    f: T::from_canonical_u32(needs_write),\n                    g: T::from_canonical_u32(imm_sign),\n                }\n            }\n        )+\n    };\n}\n\n// Branch Lt and Branch Eq instructions (3 args, fixed d=1, e=1, f=0, g=0)\nmacro_rules! branch_ops {\n    (\n        $(\n            $(#[$doc:meta])*\n            ($name:ident, $code:expr)\n        ),+ $(,)?\n    ) => {\n        $(\n            $(#[$doc])*\n            pub fn $name<T: PrimeField32>(\n                rs1_ptr: u32,\n                rs2_ptr: u32,\n                imm: i32,\n            ) -> Instruction<T> {\n                let imm = if imm >= 0 {\n                    T::from_canonical_u32(imm as u32)\n                } else {\n                    -T::from_canonical_u32((-imm) as u32)\n                };\n                Instruction {\n                    opcode: VmOpcode::from_usize($code as usize),\n                    a: T::from_canonical_u32(rs1_ptr),\n                    b: T::from_canonical_u32(rs2_ptr),\n                    c: imm,\n                    d: T::ONE,\n                    e: T::ONE,\n                    f: T::ZERO,\n                    g: T::ZERO,\n                }\n            }\n        )+\n    };\n}\n\n// Generic instructions\nbuild_instr5!(\n    /// Jump and link (Rdwrite adapter and JAL_LUI core):\n    /// - to_pc = pc + imm\n    /// - store(REG, rd_ptr, pc + 4)\n    (jal, OPCODE_JAL),\n    /// Load upper immediate (Rdwrite adapter and JAL_LUI core):\n    /// - store(REG, rd_ptr, imm * 2^8)\n    (lui, OPCODE_LUI),\n\n    /// Jump and link register (JALR adapter and JALR core):\n    /// - to_pc = load(REG, rs1_ptr) + imm\n    /// - store(REG, rd_ptr, pc + 4)\n    (jalr, OPCODE_JALR),\n\n    /// Add upper immediate to PC (but does not change PC) (Rdwrite adapter and AUIPC core):\n    /// - store(REG, rd_ptr, pc + imm * 2^8)\n    (auipc, OPCODE_AUIPC),\n\n    /// Multiplication (Mul adapter and Multiplication core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) * load(REG, rs2_ptr) % 2^32)\n    (mul, OPCODE_MUL),\n\n    /// Signed * signed multiplication high (Mul adapter and MULH core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) * load(REG, rs2_ptr) / 2^32), where `/` is integer division\n    (mulh, OPCODE_MULH),\n    /// Signed * unsigned multiplication high (Mul adapter and MULH core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) * load(REG, rs2_ptr) / 2^32), where `/` is integer division\n    (mulhsu, OPCODE_MULHSU),\n    /// Unsigned * unsigned multiplication high (Mul adapter and MULH core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) * load(REG, rs2_ptr) / 2^32), where `/` is integer division\n    (mulhu, OPCODE_MULHU),\n\n    /// Signed division (Mul adapter and Divrem core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) / load(REG, rs2_ptr)), where `/` is integer division\n    /// - Exception: store(REG, rd_ptr, -1) if `load(REG, rs2_ptr) == 0`\n    (div, OPCODE_DIV),\n    /// Unsigned division (Mul adapter and Divrem core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) / load(REG, rs2_ptr)), where `/` is integer division\n    /// - Exception: store(REG, rd_ptr, 2^32 - 1) if `load(REG, rs2_ptr) == 0`\n    (divu, OPCODE_DIVU),\n    /// Signed remainder (Mul adapter and Divrem core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) % load(REG, rs2_ptr))\n    (rem, OPCODE_REM),\n    /// Unsigned remainder (Mul adapter and Divrem core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) % load(REG, rs2_ptr))\n    (remu, OPCODE_REMU),\n\n    (hint_storew, OPCODE_HINT_STOREW),\n    (hint_buffer, OPCODE_HINT_BUFFER)\n);\n\n// ALU instructions\nalu_ops!(\n    /// Addition (ALU adapter and ALU core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) + load(rs2_as, rs2))\n    (add, OPCODE_ADD),\n    /// Subtraction (ALU adapter and ALU core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) - load(rs2_as, rs2))\n    (sub, OPCODE_SUB),\n    /// XOR (ALU adapter and ALU core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) XOR load(rs2_as, rs2))\n    (xor, OPCODE_XOR),\n    /// OR (ALU adapter and ALU core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) OR load(rs2_as, rs2))\n    (or, OPCODE_OR),\n    /// AND (ALU adapter and ALU core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) AND load(rs2_as, rs2))\n    (and, OPCODE_AND),\n\n    /// Shift left (ALU adapter and Shift core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) << (load(rs2_as, rs2) % 32))\n    (sll, OPCODE_SLL),\n    /// Shift right (ALU adapter and Shift core):\n    /// - store(REG, rd_ptr, load(REG, rs1_ptr) >> (load(rs2_as, rs2) % 32))\n    (srl, OPCODE_SRL),\n    /// Shift right arithmetic (signed) (ALU adapter and Shift core):\n    /// - store(REG, rd_ptr, sign_extend(load(REG, rs1_ptr) >> (load(rs2_as, rs2) % 32)))\n    (sra, OPCODE_SRA),\n\n    /// Less than signed (ALU adapter and Less than core):\n    /// - store(REG, rd_ptr, 1 if load(REG, rs1_ptr) < load(rs2_as, rs2) else 0)\n    (slt, OPCODE_SLT),\n    /// Less than unsigned (ALU adapter and Less than core):\n    /// - store(REG, rd_ptr, 1 if load(REG, rs1_ptr) < load(rs2_as, rs2) else 0)\n    (sltu, OPCODE_SLTU)\n);\n\n// Load/Store and Load/Store Sign Extend instructions\nls_ops!(\n    /// Load word (Load/store adapter and Load sign extend core):\n    /// - store(REG, rd_ptr, load(mem_as, val(rs1) + imm)), where val(rs1) = load(REG, rs1_ptr)\n    (loadw, OPCODE_LOADW),\n    /// Load byte unsigned (Load/store adapter and Load sign extend core):\n    /// - store(REG, rd_ptr, load_byte_unsigned(mem_as, val(rs1) + imm)), where val(rs1) = load(REG, rs1_ptr)\n    (loadbu, OPCODE_LOADBU),\n    /// Load half-word unsigned (Load/store adapter and Load sign extend core):\n    /// - store(REG, rd_ptr, load_half_word_unsigned(mem_as, val(rs1) + imm)), where val(rs1) = load(REG, rs1_ptr)\n    (loadhu, OPCODE_LOADHU),\n\n    /// Store word (Load/store adapter and Loadstore core):\n    /// - store(mem_as, val(rs1) + imm, load(REG, rd_ptr)), where val(rs1) = load(REG, rs1_ptr)\n    (storew, OPCODE_STOREW),\n    /// Store half-word (Load/store adapter and Loadstore core):\n    /// - store_half_word(mem_as, val(rs1) + imm, load(REG, rd_ptr)), where val(rs1) = load(REG, rs1_ptr)\n    (storeh, OPCODE_STOREH),\n    /// Store byte (Load/store adapter and Loadstore core):\n    /// - store_byte(mem_as, val(rs1) + imm, load(REG, rd_ptr)), where val(rs1) = load(REG, rs1_ptr)\n    (storeb, OPCODE_STOREB),\n\n    /// Load byte signed (Load/store adapter and Load sign extend core):\n    /// - store(REG, rd_ptr, load_byte_signed(mem_as, val(rs1) + imm)), where val(rs1) = load(REG, rs1_ptr)\n    (loadb, OPCODE_LOADB),\n    /// Load half-word signed (Load/store adapter and Load sign extend core):\n    /// - store(REG, rd_ptr, load_half_word_signed(mem_as, val(rs1) + imm)), where val(rs1) = load(REG, rs1_ptr)\n    (loadh, OPCODE_LOADH)\n);\n\n// Branch Eq and Branch Lt instructions\nbranch_ops!(\n    /// Branch equal (Branch adapter and Branch Eq core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) == load(REG, rs2_ptr) else pc + 4\n    (beq, OPCODE_BEQ),\n    /// Branch not equal (Branch adapter and Branch Eq core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) != load(REG, rs2_ptr) else pc + 4\n    (bne, OPCODE_BNE),\n\n    /// Branch less than signed (Branch adapter and Branch Lt core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) < load(REG, rs2_ptr) else pc + 4\n    (blt, OPCODE_BLT),\n    /// Branch less than unsigned (Branch adapter and Branch Lt core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) < load(REG, rs2_ptr) else pc + 4\n    (bltu, OPCODE_BLTU),\n    /// Branch greater than or equal signed (Branch adapter and Branch Lt core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) >= load(REG, rs2_ptr) else pc + 4\n    (bge, OPCODE_BGE),\n    /// Branch greater than or equal unsigned (Branch adapter and Branch Lt core):\n    /// - to_pc = pc + imm if load(REG, rs1_ptr) >= load(REG, rs2_ptr) else pc + 4\n    (bgeu, OPCODE_BGEU),\n);\n"
  },
  {
    "path": "openvm-riscv/src/isa/trace_generator/common.rs",
    "content": "use openvm_circuit::arch::{AirInventory, AirInventoryError, VmCircuitConfig, VmCircuitExtension};\nuse powdr_openvm::BabyBearSC;\n\nuse crate::ExtendedVmConfig;\n\npub fn create_dummy_airs<E: VmCircuitExtension<BabyBearSC>>(\n    config: &ExtendedVmConfig,\n    shared_chips: E,\n) -> Result<AirInventory<BabyBearSC>, AirInventoryError> {\n    let config = config.sdk.to_inner();\n    let mut inventory = config.system.create_airs()?;\n\n    // CHANGE: add dummy periphery\n    inventory.start_new_extension();\n    VmCircuitExtension::extend_circuit(&shared_chips, &mut inventory)?;\n    // END CHANGE\n\n    if let Some(rv32i) = &config.rv32i {\n        VmCircuitExtension::extend_circuit(rv32i, &mut inventory)?;\n    }\n    if let Some(io) = &config.io {\n        VmCircuitExtension::extend_circuit(io, &mut inventory)?;\n    }\n    if let Some(keccak) = &config.keccak {\n        VmCircuitExtension::extend_circuit(keccak, &mut inventory)?;\n    }\n    if let Some(sha256) = &config.sha256 {\n        VmCircuitExtension::extend_circuit(sha256, &mut inventory)?;\n    }\n    if let Some(native) = &config.native {\n        VmCircuitExtension::extend_circuit(native, &mut inventory)?;\n    }\n    if let Some(castf) = &config.castf {\n        VmCircuitExtension::extend_circuit(castf, &mut inventory)?;\n    }\n    if let Some(rv32m) = &config.rv32m {\n        VmCircuitExtension::extend_circuit(rv32m, &mut inventory)?;\n    }\n    if let Some(bigint) = &config.bigint {\n        VmCircuitExtension::extend_circuit(bigint, &mut inventory)?;\n    }\n    if let Some(modular) = &config.modular {\n        VmCircuitExtension::extend_circuit(modular, &mut inventory)?;\n    }\n    if let Some(fp2) = &config.fp2 {\n        VmCircuitExtension::extend_circuit(fp2, &mut inventory)?;\n    }\n    if let Some(pairing) = &config.pairing {\n        VmCircuitExtension::extend_circuit(pairing, &mut inventory)?;\n    }\n    if let Some(ecc) = &config.ecc {\n        VmCircuitExtension::extend_circuit(ecc, &mut inventory)?;\n    }\n    Ok(inventory)\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/trace_generator/cpu.rs",
    "content": "use openvm_algebra_circuit::AlgebraCpuProverExt;\nuse openvm_bigint_circuit::Int256CpuProverExt;\nuse openvm_circuit::arch::{AirInventory, ChipInventoryError, VmBuilder, VmProverExtension};\nuse openvm_circuit::system::SystemCpuBuilder;\nuse openvm_keccak256_circuit::Keccak256CpuProverExt;\nuse openvm_native_circuit::NativeCpuProverExt;\nuse openvm_pairing_circuit::PairingProverExt;\nuse openvm_rv32im_circuit::Rv32ImCpuProverExt;\nuse openvm_sha256_circuit::Sha2CpuProverExt;\nuse powdr_openvm::powdr_extension::trace_generator::cpu::SharedPeripheryChipsCpuProverExt;\nuse powdr_openvm::powdr_extension::trace_generator::{DummyChipComplex, SharedPeripheryChipsCpu};\nuse powdr_openvm::BabyBearSC;\n\nuse crate::{ExtendedVmConfig, RiscvISA};\n\nuse openvm_ecc_circuit::EccCpuProverExt;\nuse openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;\n\npub fn create_dummy_chip_complex_cpu(\n    config: &ExtendedVmConfig,\n    circuit: AirInventory<BabyBearSC>,\n    shared_chips: SharedPeripheryChipsCpu<RiscvISA>,\n) -> Result<DummyChipComplex<BabyBearSC>, ChipInventoryError> {\n    let config = config.sdk.to_inner();\n    let mut chip_complex = VmBuilder::<BabyBearPoseidon2Engine>::create_chip_complex(\n        &SystemCpuBuilder,\n        &config.system,\n        circuit,\n    )?;\n    let inventory = &mut chip_complex.inventory;\n\n    // CHANGE: inject the periphery chips so that they are not created by the extensions. This is done for memory footprint: the dummy periphery chips are thrown away anyway, so we reuse a single one for all APCs.\n    VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n        &SharedPeripheryChipsCpuProverExt,\n        &shared_chips,\n        inventory,\n    )?;\n    // END CHANGE\n\n    if let Some(rv32i) = &config.rv32i {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Rv32ImCpuProverExt,\n            rv32i,\n            inventory,\n        )?;\n    }\n    if let Some(io) = &config.io {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Rv32ImCpuProverExt,\n            io,\n            inventory,\n        )?;\n    }\n    if let Some(keccak) = &config.keccak {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Keccak256CpuProverExt,\n            keccak,\n            inventory,\n        )?;\n    }\n    if let Some(sha256) = &config.sha256 {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Sha2CpuProverExt,\n            sha256,\n            inventory,\n        )?;\n    }\n    if let Some(native) = &config.native {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &NativeCpuProverExt,\n            native,\n            inventory,\n        )?;\n    }\n    if let Some(castf) = &config.castf {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &NativeCpuProverExt,\n            castf,\n            inventory,\n        )?;\n    }\n    if let Some(rv32m) = &config.rv32m {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Rv32ImCpuProverExt,\n            rv32m,\n            inventory,\n        )?;\n    }\n    if let Some(bigint) = &config.bigint {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &Int256CpuProverExt,\n            bigint,\n            inventory,\n        )?;\n    }\n    if let Some(modular) = &config.modular {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &AlgebraCpuProverExt,\n            modular,\n            inventory,\n        )?;\n    }\n    if let Some(fp2) = &config.fp2 {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &AlgebraCpuProverExt,\n            fp2,\n            inventory,\n        )?;\n    }\n    if let Some(pairing) = &config.pairing {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &PairingProverExt,\n            pairing,\n            inventory,\n        )?;\n    }\n    if let Some(ecc) = &config.ecc {\n        VmProverExtension::<BabyBearPoseidon2Engine, _, _>::extend_prover(\n            &EccCpuProverExt,\n            ecc,\n            inventory,\n        )?;\n    }\n\n    Ok(chip_complex)\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/trace_generator/cuda.rs",
    "content": "use openvm_circuit::{\n    arch::{AirInventory, ChipInventoryError, VmBuilder, VmProverExtension},\n    system::cuda::extensions::SystemGpuBuilder,\n};\nuse openvm_pairing_circuit::PairingProverExt;\nuse powdr_openvm::{\n    powdr_extension::trace_generator::cuda::{\n        GpuDummyChipComplex, SharedPeripheryChipsGpu, SharedPeripheryChipsGpuProverExt,\n    },\n    BabyBearSC, GpuBabyBearPoseidon2Engine,\n};\n\nuse crate::{ExtendedVmConfig, RiscvISA};\n\npub fn create_dummy_chip_complex_gpu(\n    config: &ExtendedVmConfig,\n    circuit: AirInventory<BabyBearSC>,\n    shared_chips: SharedPeripheryChipsGpu<RiscvISA>,\n) -> Result<GpuDummyChipComplex<BabyBearSC>, ChipInventoryError> {\n    use openvm_algebra_circuit::AlgebraProverExt;\n    use openvm_bigint_circuit::Int256GpuProverExt;\n    use openvm_ecc_circuit::EccProverExt;\n    use openvm_keccak256_circuit::Keccak256GpuProverExt;\n    use openvm_native_circuit::NativeGpuProverExt;\n    use openvm_rv32im_circuit::Rv32ImGpuProverExt;\n    use openvm_sha256_circuit::Sha256GpuProverExt;\n\n    type E = GpuBabyBearPoseidon2Engine;\n\n    let config = config.sdk.to_inner();\n    let mut chip_complex =\n        VmBuilder::<E>::create_chip_complex(&SystemGpuBuilder, &config.system, circuit)?;\n    let inventory = &mut chip_complex.inventory;\n\n    // CHANGE: inject the periphery chips so that they are not created by the extensions. This is done for memory footprint: the dummy periphery chips are thrown away anyway, so we reuse a single one for all APCs.\n    VmProverExtension::<E, _, _>::extend_prover(\n        &SharedPeripheryChipsGpuProverExt,\n        &shared_chips,\n        inventory,\n    )?;\n    // END CHANGE\n\n    if let Some(rv32i) = &config.rv32i {\n        VmProverExtension::<E, _, _>::extend_prover(&Rv32ImGpuProverExt, rv32i, inventory)?;\n    }\n    if let Some(io) = &config.io {\n        VmProverExtension::<E, _, _>::extend_prover(&Rv32ImGpuProverExt, io, inventory)?;\n    }\n    if let Some(keccak) = &config.keccak {\n        VmProverExtension::<E, _, _>::extend_prover(&Keccak256GpuProverExt, keccak, inventory)?;\n    }\n    if let Some(sha256) = &config.sha256 {\n        VmProverExtension::<E, _, _>::extend_prover(&Sha256GpuProverExt, sha256, inventory)?;\n    }\n    if let Some(native) = &config.native {\n        VmProverExtension::<E, _, _>::extend_prover(&NativeGpuProverExt, native, inventory)?;\n    }\n    if let Some(castf) = &config.castf {\n        VmProverExtension::<E, _, _>::extend_prover(&NativeGpuProverExt, castf, inventory)?;\n    }\n    if let Some(rv32m) = &config.rv32m {\n        VmProverExtension::<E, _, _>::extend_prover(&Rv32ImGpuProverExt, rv32m, inventory)?;\n    }\n    if let Some(bigint) = &config.bigint {\n        VmProverExtension::<E, _, _>::extend_prover(&Int256GpuProverExt, bigint, inventory)?;\n    }\n    if let Some(modular) = &config.modular {\n        VmProverExtension::<E, _, _>::extend_prover(&AlgebraProverExt, modular, inventory)?;\n    }\n    if let Some(fp2) = &config.fp2 {\n        VmProverExtension::<E, _, _>::extend_prover(&AlgebraProverExt, fp2, inventory)?;\n    }\n    if let Some(pairing) = &config.pairing {\n        VmProverExtension::<E, _, _>::extend_prover(&PairingProverExt, pairing, inventory)?;\n    }\n    if let Some(ecc) = &config.ecc {\n        VmProverExtension::<E, _, _>::extend_prover(&EccProverExt, ecc, inventory)?;\n    }\n    Ok(chip_complex)\n}\n"
  },
  {
    "path": "openvm-riscv/src/isa/trace_generator/mod.rs",
    "content": "mod cpu;\n#[cfg(feature = \"cuda\")]\nmod cuda;\n\nmod common;\npub use common::create_dummy_airs;\npub use cpu::create_dummy_chip_complex_cpu;\n#[cfg(feature = \"cuda\")]\npub use cuda::create_dummy_chip_complex_gpu;\n"
  },
  {
    "path": "openvm-riscv/src/lib.rs",
    "content": "#![cfg_attr(feature = \"tco\", allow(internal_features))]\n#![cfg_attr(feature = \"tco\", allow(incomplete_features))]\n#![cfg_attr(feature = \"tco\", feature(explicit_tail_calls))]\n#![cfg_attr(feature = \"tco\", feature(core_intrinsics))]\n\nuse eyre::Result;\nuse openvm_build::{build_guest_package, find_unique_executable, get_package, TargetFilter};\nuse openvm_circuit::arch::execution_mode::metered::segment_ctx::SegmentationLimits;\n#[cfg(feature = \"cuda\")]\nuse openvm_circuit::arch::DenseRecordArena;\nuse openvm_circuit::arch::{\n    debug_proving_ctx, AirInventory, ChipInventoryError, InitFileGenerator, MatrixRecordArena,\n    SystemConfig, VmBuilder, VmChipComplex, VmProverExtension,\n};\n#[cfg(feature = \"cuda\")]\nuse openvm_circuit::system::cuda::SystemChipInventoryGPU;\nuse openvm_circuit::system::SystemChipInventory;\nuse openvm_sdk::config::SdkVmCpuBuilder;\n\nuse openvm_sdk::config::TranspilerConfig;\nuse openvm_sdk::prover::{verify_app_proof, AggStarkProver};\nuse openvm_sdk::{\n    config::{AppConfig, SdkVmConfig, SdkVmConfigExecutor, DEFAULT_APP_LOG_BLOWUP},\n    Sdk, StdIn,\n};\nuse openvm_stark_backend::config::Val;\nuse openvm_stark_backend::engine::StarkEngine;\nuse openvm_stark_backend::prover::cpu::{CpuBackend, CpuDevice};\nuse openvm_stark_sdk::config::FriParameters;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse openvm_transpiler::transpiler::Transpiler;\nuse powdr_autoprecompiles::empirical_constraints::EmpiricalConstraints;\nuse powdr_autoprecompiles::pgo::{CellPgo, InstructionPgo, NonePgo};\nuse powdr_autoprecompiles::PowdrConfig;\nuse powdr_openvm::customize_exe::OpenVmApcCandidate;\nuse powdr_openvm::extraction_utils::OriginalVmConfig;\nuse powdr_openvm::trace_generation::do_with_trace;\nuse powdr_openvm::BabyBearSC;\n#[cfg(not(feature = \"cuda\"))]\nuse powdr_openvm::PowdrSdkCpu;\n#[cfg(feature = \"cuda\")]\nuse powdr_openvm::{GpuBabyBearPoseidon2Engine, GpuBackend, PowdrSdkGpu};\nuse powdr_openvm_riscv_hints_circuit::{HintsExtension, HintsExtensionExecutor, HintsProverExt};\nuse powdr_openvm_riscv_hints_transpiler::HintsTranspilerExtension;\nuse serde::{Deserialize, Serialize};\nuse std::path::{Path, PathBuf};\n\npub use crate::isa::RiscvISA;\npub use crate::isa::{instruction_formatter, symbolic_instruction_builder};\npub use powdr_openvm::program::{CompiledProgram, OriginalCompiledProgram};\n\npub mod isa;\n\npub use powdr_autoprecompiles::DegreeBound;\npub use powdr_autoprecompiles::PgoConfig;\n\npub use powdr_openvm_bus_interaction_handler::bus_map;\n\npub use powdr_openvm::empirical_constraints::detect_empirical_constraints;\npub use powdr_openvm::{\n    default_powdr_openvm_config, DEFAULT_DEGREE_BOUND, DEFAULT_OPENVM_DEGREE_BOUND,\n};\n\npub use openvm_build::GuestOptions;\npub use powdr_autoprecompiles::bus_map::BusType;\npub use powdr_openvm::customize_exe::customize;\npub use powdr_openvm::customize_exe::Instr;\n\npub fn build_elf_path<P: AsRef<Path>>(\n    guest_opts: GuestOptions,\n    pkg_dir: P,\n    target_filter: &Option<TargetFilter>,\n) -> Result<PathBuf> {\n    let pkg = get_package(pkg_dir.as_ref());\n    let target_dir = match build_guest_package(&pkg, &guest_opts, None, target_filter) {\n        Ok(target_dir) => target_dir,\n        Err(Some(code)) => {\n            return Err(eyre::eyre!(\"Failed to build guest: code = {}\", code));\n        }\n        Err(None) => {\n            return Err(eyre::eyre!(\n                \"Failed to build guest (OPENVM_SKIP_BUILD is set)\"\n            ));\n        }\n    };\n\n    find_unique_executable(pkg_dir, target_dir, target_filter)\n}\n\n// compile the original openvm program without powdr extension\npub fn compile_openvm(\n    guest: &str,\n    guest_opts: GuestOptions,\n) -> Result<OriginalCompiledProgram<'static, RiscvISA>, Box<dyn std::error::Error>> {\n    // Build the ELF with guest options and a target filter.\n    // We need these extra Rust flags to get the labels.\n    let guest_opts = guest_opts.with_rustc_flags(vec![\"-C\", \"link-arg=--emit-relocs\"]);\n\n    // Point to our local guest\n    use std::path::PathBuf;\n    let mut path = PathBuf::from(env!(\"CARGO_MANIFEST_DIR\")).to_path_buf();\n    path.push(guest);\n    let target_path = path.to_str().unwrap();\n\n    // try to load the sdk config from the openvm.toml file, otherwise use the default\n    let openvm_toml_path = path.join(\"openvm.toml\");\n    let app_config = if openvm_toml_path.exists() {\n        let toml = std::fs::read_to_string(&openvm_toml_path)?;\n        toml::from_str(&toml)?\n    } else {\n        AppConfig::riscv32()\n    };\n\n    let mut sdk = Sdk::new(app_config)?;\n\n    let transpiler = sdk.transpiler().unwrap();\n\n    // Add our custom transpiler extensions\n    sdk.set_transpiler(\n        transpiler\n            .clone()\n            .with_extension(HintsTranspilerExtension {}),\n    );\n\n    let elf = sdk.build(\n        guest_opts.clone(),\n        target_path,\n        &Default::default(),\n        Default::default(),\n    )?;\n\n    // Transpile the ELF into a VmExe.\n    let exe = sdk.convert_to_exe(elf)?;\n\n    let elf_binary_path = build_elf_path(guest_opts.clone(), target_path, &Default::default())?;\n    let elf = powdr_riscv_elf::load_elf(&elf_binary_path);\n\n    let vm_config = ExtendedVmConfig {\n        sdk: sdk.app_config().app_vm_config.clone(),\n        hints: HintsExtension,\n    };\n\n    Ok(OriginalCompiledProgram {\n        exe,\n        vm_config: OriginalVmConfig::new(vm_config),\n        linked_program: elf,\n    })\n}\n\npub fn compile_exe(\n    original_program: OriginalCompiledProgram<RiscvISA>,\n    config: PowdrConfig,\n    pgo_config: PgoConfig,\n    empirical_constraints: EmpiricalConstraints,\n) -> Result<CompiledProgram<RiscvISA>, Box<dyn std::error::Error>> {\n    let compiled = match pgo_config {\n        PgoConfig::Cell(pgo_data, max_total_columns) => {\n            let max_total_apc_columns: Option<usize> = max_total_columns.map(|max_total_columns| {\n                let original_config = original_program.vm_config.clone();\n\n                let total_non_apc_columns: usize = original_config\n                    .chip_inventory_air_metrics(config.degree_bound.identities)\n                    .values()\n                    .map(|m| m.total_width())\n                    .sum::<usize>();\n                max_total_columns - total_non_apc_columns\n            });\n\n            customize(\n                original_program,\n                config,\n                CellPgo::<_, OpenVmApcCandidate<RiscvISA>>::with_pgo_data_and_max_columns(\n                    pgo_data,\n                    max_total_apc_columns,\n                ),\n                empirical_constraints,\n            )\n        }\n        PgoConfig::Instruction(pgo_data) => customize(\n            original_program,\n            config,\n            InstructionPgo::with_pgo_data(pgo_data),\n            empirical_constraints,\n        ),\n        PgoConfig::None => customize(\n            original_program,\n            config,\n            NonePgo::default(),\n            empirical_constraints,\n        ),\n    };\n    Ok(compiled)\n}\n\nuse openvm_circuit_derive::VmConfig;\n\n#[derive(Clone, Debug, Serialize, Deserialize, VmConfig)]\n// SdkVmConfig plus custom openvm extensions, before autoprecompile transformations.\n// For now, only includes custom hints.\npub struct ExtendedVmConfig {\n    #[config]\n    pub sdk: SdkVmConfig,\n    #[extension(executor = \"HintsExtensionExecutor<F>\")]\n    pub hints: HintsExtension,\n}\n\nimpl TranspilerConfig<BabyBear> for ExtendedVmConfig {\n    fn transpiler(&self) -> Transpiler<BabyBear> {\n        self.sdk.transpiler()\n    }\n}\n\n#[derive(Default, Clone)]\npub struct ExtendedVmConfigCpuBuilder;\n\nimpl<E> VmBuilder<E> for ExtendedVmConfigCpuBuilder\nwhere\n    E: StarkEngine<SC = BabyBearSC, PB = CpuBackend<BabyBearSC>, PD = CpuDevice<BabyBearSC>>,\n{\n    type VmConfig = ExtendedVmConfig;\n    type SystemChipInventory = SystemChipInventory<BabyBearSC>;\n    type RecordArena = MatrixRecordArena<Val<BabyBearSC>>;\n\n    fn create_chip_complex(\n        &self,\n        config: &ExtendedVmConfig,\n        circuit: AirInventory<BabyBearSC>,\n    ) -> Result<\n        VmChipComplex<BabyBearSC, Self::RecordArena, E::PB, Self::SystemChipInventory>,\n        ChipInventoryError,\n    > {\n        let mut chip_complex =\n            VmBuilder::<E>::create_chip_complex(&SdkVmCpuBuilder, &config.sdk, circuit)?;\n        let inventory = &mut chip_complex.inventory;\n        VmProverExtension::<E, _, _>::extend_prover(&HintsProverExt, &config.hints, inventory)?;\n        Ok(chip_complex)\n    }\n}\n\n#[cfg(feature = \"cuda\")]\n#[derive(Default, Clone)]\npub struct ExtendedVmConfigGpuBuilder;\n\n#[cfg(feature = \"cuda\")]\nimpl VmBuilder<GpuBabyBearPoseidon2Engine> for ExtendedVmConfigGpuBuilder {\n    type VmConfig = ExtendedVmConfig;\n    type SystemChipInventory = SystemChipInventoryGPU;\n    type RecordArena = DenseRecordArena;\n\n    fn create_chip_complex(\n        &self,\n        config: &ExtendedVmConfig,\n        circuit: AirInventory<BabyBearSC>,\n    ) -> Result<\n        VmChipComplex<BabyBearSC, Self::RecordArena, GpuBackend, Self::SystemChipInventory>,\n        ChipInventoryError,\n    > {\n        let mut chip_complex = VmBuilder::<GpuBabyBearPoseidon2Engine>::create_chip_complex(\n            &openvm_sdk::config::SdkVmGpuBuilder,\n            &config.sdk,\n            circuit,\n        )?;\n        let inventory = &mut chip_complex.inventory;\n        VmProverExtension::<GpuBabyBearPoseidon2Engine, _, _>::extend_prover(\n            &HintsProverExt,\n            &config.hints,\n            inventory,\n        )?;\n        Ok(chip_complex)\n    }\n}\n\nimpl InitFileGenerator for ExtendedVmConfig {\n    fn generate_init_file_contents(&self) -> Option<String> {\n        self.sdk.generate_init_file_contents()\n    }\n\n    fn write_to_init_file(\n        &self,\n        manifest_dir: &Path,\n        init_file_name: Option<&str>,\n    ) -> std::io::Result<()> {\n        self.sdk.write_to_init_file(manifest_dir, init_file_name)\n    }\n}\n\npub fn prove(\n    program: &CompiledProgram<RiscvISA>,\n    mock: bool,\n    recursion: bool,\n    inputs: StdIn,\n    segment_height: Option<usize>, // uses the default height if None\n) -> Result<(), Box<dyn std::error::Error>> {\n    if mock {\n        do_with_trace(program, inputs, |_segment_idx, vm, pk, ctx| {\n            debug_proving_ctx(vm, pk, &ctx);\n        })?;\n    } else {\n        let exe = &program.exe;\n        let mut vm_config = program.vm_config.clone();\n\n        // DefaultSegmentationStrategy { max_segment_len: 4194204, max_cells_per_chip_in_segment: 503304480 }\n        if let Some(segment_height) = segment_height {\n            vm_config\n                .original\n                .config_mut()\n                .sdk\n                .system\n                .config\n                .segmentation_limits =\n                SegmentationLimits::default().with_max_trace_height(segment_height as u32);\n            tracing::debug!(\"Setting max segment len to {}\", segment_height);\n        }\n\n        // Set app configuration\n        let app_fri_params =\n            FriParameters::standard_with_100_bits_conjectured_security(DEFAULT_APP_LOG_BLOWUP);\n        let app_config = AppConfig::new(app_fri_params, vm_config.clone());\n\n        // Create the SDK\n        #[cfg(feature = \"cuda\")]\n        let sdk = PowdrSdkGpu::new(app_config).unwrap();\n        #[cfg(not(feature = \"cuda\"))]\n        let sdk = PowdrSdkCpu::new(app_config).unwrap();\n        let mut app_prover = sdk.app_prover(exe.clone())?;\n\n        // Generate a proof\n        tracing::info!(\"Generating app proof...\");\n        let start = std::time::Instant::now();\n        let app_proof = app_prover.prove(inputs.clone())?;\n        tracing::info!(\"App proof took {:?}\", start.elapsed());\n\n        tracing::info!(\"Public values: {:?}\", app_proof.user_public_values);\n\n        // Verify\n        let app_vk = sdk.app_pk().get_app_vk();\n        verify_app_proof(&app_vk, &app_proof)?;\n        tracing::info!(\"App proof verification done.\");\n\n        if recursion {\n            let mut agg_prover: AggStarkProver<_, _> = sdk.prover(exe.clone())?.agg_prover;\n\n            // Note that this proof is not verified. We assume that any valid app proof\n            // (verified above) also leads to a valid aggregation proof.\n            // If this was not the case, it would be a completeness bug in OpenVM.\n            let start = std::time::Instant::now();\n            let _ = agg_prover.generate_root_verifier_input(app_proof)?;\n            tracing::info!(\"Agg proof (inner recursion) took {:?}\", start.elapsed());\n        }\n\n        tracing::info!(\"All done.\");\n    }\n\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use expect_test::{expect, Expect};\n    use itertools::Itertools;\n    use powdr_openvm::{\n        execution_profile_from_guest,\n        extraction_utils::{AirWidths, AirWidthsDiff},\n        AirMetrics,\n    };\n    use pretty_assertions::assert_eq;\n    use test_log::test;\n\n    #[allow(clippy::too_many_arguments)]\n    fn compile_and_prove(\n        guest: &str,\n        config: PowdrConfig,\n        mock: bool,\n        recursion: bool,\n        stdin: StdIn,\n        pgo_config: PgoConfig,\n        segment_height: Option<usize>,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        let guest = compile_openvm(guest, GuestOptions::default()).unwrap();\n        let program =\n            compile_exe(guest, config, pgo_config, EmpiricalConstraints::default()).unwrap();\n        prove(&program, mock, recursion, stdin, segment_height)\n    }\n\n    fn prove_simple(\n        guest: &str,\n        config: PowdrConfig,\n        stdin: StdIn,\n        pgo_config: PgoConfig,\n        segment_height: Option<usize>,\n    ) {\n        compile_and_prove(\n            guest,\n            config,\n            false,\n            false,\n            stdin,\n            pgo_config,\n            segment_height,\n        )\n        .unwrap()\n    }\n\n    fn prove_mock(\n        guest: &str,\n        config: PowdrConfig,\n        stdin: StdIn,\n        pgo_config: PgoConfig,\n        segment_height: Option<usize>,\n    ) {\n        compile_and_prove(\n            guest,\n            config,\n            true,\n            false,\n            stdin,\n            pgo_config,\n            segment_height,\n        )\n        .unwrap()\n    }\n\n    fn prove_recursion(\n        guest: &str,\n        config: PowdrConfig,\n        stdin: StdIn,\n        pgo_config: PgoConfig,\n        segment_height: Option<usize>,\n    ) {\n        compile_and_prove(\n            guest,\n            config,\n            false,\n            true,\n            stdin,\n            pgo_config,\n            segment_height,\n        )\n        .unwrap()\n    }\n\n    const GUEST: &str = \"guest\";\n    const GUEST_ITER: u32 = 1 << 10;\n    const GUEST_APC: u64 = 1;\n    const GUEST_SKIP_NO_APC_EXECUTED: u64 = 56;\n    const GUEST_SKIP_PGO: u64 = 0;\n\n    const GUEST_KECCAK: &str = \"guest-keccak\";\n    const GUEST_KECCAK_ITER: u32 = 1_000;\n    const GUEST_KECCAK_ITER_SMALL: u32 = 10;\n    const GUEST_KECCAK_ITER_LARGE: u32 = 25_000;\n    const GUEST_KECCAK_APC: u64 = 1;\n    const GUEST_KECCAK_APC_PGO: u64 = 10;\n    const GUEST_KECCAK_APC_PGO_LARGE: u64 = 100;\n    const GUEST_KECCAK_SKIP: u64 = 0;\n\n    const GUEST_SHA256_ITER: u32 = 1_000;\n    const GUEST_SHA256_ITER_SMALL: u32 = 10;\n    const GUEST_SHA256_ITER_LARGE: u32 = 25_000;\n    const GUEST_SHA256: &str = \"guest-sha256\";\n    const GUEST_SHA256_APC_PGO: u64 = 10;\n    const GUEST_SHA256_APC_PGO_LARGE: u64 = 50;\n    const GUEST_SHA256_SKIP: u64 = 0;\n\n    const GUEST_U256: &str = \"guest-u256\";\n    const GUEST_U256_APC_PGO: u64 = 10;\n    const GUEST_U256_SKIP: u64 = 0;\n\n    const GUEST_PAIRING: &str = \"guest-pairing\";\n    const GUEST_PAIRING_APC_PGO: u64 = 10;\n    const GUEST_PAIRING_SKIP: u64 = 0;\n\n    const GUEST_HINTS_TEST: &str = \"guest-hints-test\";\n\n    const GUEST_ECC_HINTS: &str = \"guest-ecc-powdr-affine-hint\";\n    const GUEST_ECC_APC_PGO: u64 = 50;\n    const GUEST_ECC_SKIP: u64 = 0;\n    // Even with an iteration of 0, the test does one linear combination\n    // (and asserts that the result is correct)\n    const GUEST_ECC_ITER: u32 = 0;\n\n    const GUEST_ECC_PROJECTIVE: &str = \"guest-ecc-projective\";\n    const GUEST_ECC_PROJECTIVE_APC_PGO: u64 = 50;\n    const GUEST_ECC_PROJECTIVE_SKIP: u64 = 0;\n\n    const GUEST_ECRECOVER_HINTS: &str = \"guest-ecrecover\";\n    const GUEST_ECRECOVER_APC_PGO: u64 = 50;\n    const GUEST_ECRECOVER_SKIP: u64 = 0;\n    const GUEST_ECRECOVER_ITER: u32 = 1;\n\n    #[test]\n    fn guest_prove_simple_no_apc_executed() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ITER);\n\n        // Create execution profile but don't prove with it, just to assert that the APC we select isn't executed\n        let guest = compile_openvm(GUEST, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let config = default_powdr_openvm_config(GUEST_APC, GUEST_SKIP_NO_APC_EXECUTED);\n        let program = compile_exe(\n            guest,\n            config,\n            PgoConfig::None,\n            EmpiricalConstraints::default(),\n        )\n        .unwrap();\n\n        // Assert that all APCs aren't executed\n        program\n            .vm_config\n            .powdr\n            .precompiles\n            .iter()\n            .for_each(|precompile| {\n                assert!(!pgo_data\n                    .pc_count\n                    .keys()\n                    .contains(&precompile.apc.block.try_as_basic_block().unwrap().start_pc));\n            });\n\n        let result = prove(&program, false, false, stdin, None);\n        assert!(result.is_ok());\n    }\n\n    #[test]\n    fn guest_prove_simple() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ITER);\n        let config = default_powdr_openvm_config(GUEST_APC, GUEST_SKIP_PGO);\n        let guest = compile_openvm(GUEST, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        prove_simple(GUEST, config, stdin, PgoConfig::Instruction(pgo_data), None);\n    }\n\n    #[test]\n    fn guest_prove_mock() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ITER);\n        let config = default_powdr_openvm_config(GUEST_APC, GUEST_SKIP_PGO);\n        let guest = compile_openvm(GUEST, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        prove_mock(GUEST, config, stdin, PgoConfig::Instruction(pgo_data), None);\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn guest_prove_recursion() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ITER);\n        let config = default_powdr_openvm_config(GUEST_APC, GUEST_SKIP_PGO);\n        let guest = compile_openvm(GUEST, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        prove_recursion(GUEST, config, stdin, PgoConfig::Instruction(pgo_data), None);\n    }\n\n    #[test]\n    #[ignore = \"Too long\"]\n    fn matmul_compile() {\n        let guest = compile_openvm(\"guest-matmul\", GuestOptions::default()).unwrap();\n        let config = default_powdr_openvm_config(1, 0);\n        assert!(compile_exe(\n            guest,\n            config,\n            PgoConfig::default(),\n            EmpiricalConstraints::default()\n        )\n        .is_ok());\n    }\n\n    #[test]\n    fn keccak_small_prove_simple() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        prove_simple(GUEST_KECCAK, config, stdin, PgoConfig::None, None);\n    }\n\n    #[test]\n    fn keccak_small_prove_simple_multi_segment() {\n        // Set the default segmentation height to a small value to test multi-segment proving\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        // should create two segments\n        prove_simple(GUEST_KECCAK, config, stdin, PgoConfig::None, Some(4_000));\n    }\n\n    #[test]\n    #[ignore = \"Too long\"]\n    fn keccak_prove_simple() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        prove_simple(GUEST_KECCAK, config, stdin, PgoConfig::None, None);\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn keccak_prove_many_apcs() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER);\n        let guest = compile_openvm(GUEST_KECCAK, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC_PGO_LARGE, GUEST_KECCAK_SKIP);\n        prove_recursion(\n            GUEST_KECCAK,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Instruction(pgo_data.clone()),\n            None,\n        );\n\n        prove_recursion(\n            GUEST_KECCAK,\n            config.clone(),\n            stdin,\n            PgoConfig::Cell(pgo_data, None),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn keccak_prove_large() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_LARGE);\n        let guest = compile_openvm(GUEST_KECCAK, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC_PGO, GUEST_KECCAK_SKIP);\n        prove_recursion(\n            GUEST_KECCAK,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    fn keccak_small_prove_mock() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        prove_mock(GUEST_KECCAK, config, stdin, PgoConfig::None, None);\n    }\n\n    #[test]\n    #[ignore = \"Too long\"]\n    fn keccak_prove_mock() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        prove_mock(GUEST_KECCAK, config, stdin, PgoConfig::None, None);\n    }\n\n    // Create multiple APC for 10 Keccak iterations to test different PGO modes\n    #[test]\n    fn keccak_prove_multiple_pgo_modes() {\n        use std::time::Instant;\n        // Config\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC_PGO, GUEST_KECCAK_SKIP);\n\n        // Pgo data\n        let guest = compile_openvm(GUEST_KECCAK, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        // Pgo Cell mode\n        let start = Instant::now();\n        prove_simple(\n            GUEST_KECCAK,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\"Proving keccak with PgoConfig::Cell took {:?}\", elapsed);\n\n        // Pgo Instruction mode\n        let start = Instant::now();\n        prove_simple(\n            GUEST_KECCAK,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\n            \"Proving keccak with PgoConfig::Instruction took {:?}\",\n            elapsed\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too long\"]\n    fn sha256_prove_simple() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER);\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        prove_simple(\n            GUEST_SHA256,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too long\"]\n    fn sha256_prove_mock() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER);\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        prove_mock(\n            GUEST_SHA256,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn sha256_prove_many_apcs() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER);\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO_LARGE, GUEST_SHA256_SKIP);\n        prove_recursion(\n            GUEST_SHA256,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Instruction(pgo_data.clone()),\n            None,\n        );\n\n        prove_recursion(\n            GUEST_SHA256,\n            config.clone(),\n            stdin,\n            PgoConfig::Cell(pgo_data, None),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn sha256_prove_large() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER_LARGE);\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n        prove_recursion(\n            GUEST_SHA256,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    fn sha256_small_prove_simple() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        prove_simple(\n            GUEST_SHA256,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    fn sha256_small_prove_mock() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        prove_mock(\n            GUEST_SHA256,\n            config,\n            stdin,\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n    }\n\n    #[test]\n    fn sha256_prove_multiple_pgo_modes() {\n        use std::time::Instant;\n\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER_SMALL);\n        let config = default_powdr_openvm_config(GUEST_SHA256_APC_PGO, GUEST_SHA256_SKIP);\n\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let start = Instant::now();\n        prove_simple(\n            GUEST_SHA256,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\"Proving sha256 with PgoConfig::Cell took {:?}\", elapsed);\n\n        let start = Instant::now();\n        prove_simple(\n            GUEST_SHA256,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Instruction(pgo_data),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\n            \"Proving sha256 with PgoConfig::Instruction took {:?}\",\n            elapsed\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn u256_prove_large() {\n        use std::time::Instant;\n\n        let stdin = StdIn::default();\n        let config = default_powdr_openvm_config(GUEST_U256_APC_PGO, GUEST_U256_SKIP);\n\n        let guest = compile_openvm(GUEST_U256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let start = Instant::now();\n        prove_simple(\n            GUEST_U256,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\"Proving U256 with PgoConfig::Cell took {:?}\", elapsed);\n    }\n\n    #[test]\n    #[ignore = \"Too slow\"]\n    fn pairing_prove() {\n        use std::time::Instant;\n\n        let stdin = StdIn::default();\n        let config = default_powdr_openvm_config(GUEST_PAIRING_APC_PGO, GUEST_PAIRING_SKIP);\n\n        let guest = compile_openvm(GUEST_PAIRING, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        let start = Instant::now();\n        prove_simple(\n            GUEST_PAIRING,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n        let elapsed = start.elapsed();\n        tracing::debug!(\n            \"Proving pairing guest with PgoConfig::Cell took {:?}\",\n            elapsed\n        );\n    }\n\n    #[test]\n    /// check that the hints test guest compiles and proves successfully\n    fn hints_test_prove() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_HINTS_TEST);\n        let config = default_powdr_openvm_config(0, 0);\n\n        prove_simple(GUEST_SHA256, config, stdin, PgoConfig::None, None);\n    }\n\n    #[test]\n    fn ecc_hint_prove() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECC_ITER);\n        let guest = compile_openvm(GUEST_ECC_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        let config = default_powdr_openvm_config(GUEST_ECC_APC_PGO, GUEST_ECC_SKIP);\n        prove_simple(\n            GUEST_ECC_HINTS,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n    }\n\n    #[test]\n    fn ecrecover_prove() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECRECOVER_ITER);\n        let guest = compile_openvm(GUEST_ECRECOVER_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        let config = default_powdr_openvm_config(GUEST_ECRECOVER_APC_PGO, GUEST_ECRECOVER_SKIP);\n        prove_simple(\n            GUEST_ECRECOVER_HINTS,\n            config.clone(),\n            stdin.clone(),\n            PgoConfig::Cell(pgo_data.clone(), None),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn ecc_hint_prove_recursion_large() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECC_ITER);\n        let guest = compile_openvm(GUEST_ECC_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        let config = default_powdr_openvm_config(GUEST_ECC_APC_PGO, GUEST_ECC_SKIP);\n        prove_recursion(\n            GUEST_ECC_HINTS,\n            config,\n            stdin,\n            PgoConfig::Cell(pgo_data, None),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn ecrecover_prove_recursion_large() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECRECOVER_ITER);\n        let guest = compile_openvm(GUEST_ECRECOVER_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n        let config = default_powdr_openvm_config(GUEST_ECRECOVER_APC_PGO, GUEST_ECRECOVER_SKIP);\n        prove_recursion(\n            GUEST_ECRECOVER_HINTS,\n            config,\n            stdin,\n            PgoConfig::Cell(pgo_data, None),\n            None,\n        );\n    }\n\n    #[test]\n    fn ecc_projective_prove() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECC_ITER);\n        let config =\n            default_powdr_openvm_config(GUEST_ECC_PROJECTIVE_APC_PGO, GUEST_ECC_PROJECTIVE_SKIP);\n\n        let guest = compile_openvm(GUEST_ECC_PROJECTIVE, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        prove_simple(\n            GUEST_ECC_PROJECTIVE,\n            config,\n            stdin,\n            PgoConfig::Cell(pgo_data, None),\n            None,\n        );\n    }\n\n    #[test]\n    #[ignore = \"Too much RAM\"]\n    fn keccak_prove_recursion() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER);\n        let config = default_powdr_openvm_config(GUEST_KECCAK_APC, GUEST_KECCAK_SKIP);\n        prove_recursion(GUEST_KECCAK, config, stdin, PgoConfig::None, None);\n    }\n\n    // The following are compilation tests only\n\n    struct GuestTestConfig {\n        pgo_config: PgoConfig,\n        name: &'static str,\n        apc: u64,\n        skip: u64,\n    }\n\n    struct MachineTestMetrics {\n        powdr_expected_sum: Expect,\n        powdr_expected_machine_count: Expect,\n        non_powdr_expected_sum: AirMetrics,\n        non_powdr_expected_machine_count: usize,\n    }\n\n    fn test_machine_compilation(\n        guest: GuestTestConfig,\n        expected_metrics: MachineTestMetrics,\n        expected_columns_saved: Option<Expect>,\n    ) {\n        let apc_candidates_dir = tempfile::tempdir().unwrap();\n        let apc_candidates_dir_path = apc_candidates_dir.path();\n        let config = default_powdr_openvm_config(guest.apc, guest.skip)\n            .with_apc_candidates_dir(apc_candidates_dir_path);\n        let is_cell_pgo = matches!(guest.pgo_config, PgoConfig::Cell(_, _));\n        let max_degree = config.degree_bound.identities;\n        let guest_program = compile_openvm(guest.name, GuestOptions::default()).unwrap();\n        let compiled_program = compile_exe(\n            guest_program,\n            config,\n            guest.pgo_config,\n            EmpiricalConstraints::default(),\n        )\n        .unwrap();\n\n        let (powdr_air_metrics, non_powdr_air_metrics) = compiled_program.air_metrics(max_degree);\n\n        expected_metrics.powdr_expected_sum.assert_debug_eq(\n            &powdr_air_metrics\n                .iter()\n                .map(|(metrics, _)| metrics.clone())\n                .sum::<AirMetrics>(),\n        );\n        expected_metrics\n            .powdr_expected_machine_count\n            .assert_debug_eq(&powdr_air_metrics.len());\n        assert_eq!(\n            non_powdr_air_metrics.len(),\n            expected_metrics.non_powdr_expected_machine_count\n        );\n        assert_eq!(\n            non_powdr_air_metrics.into_iter().sum::<AirMetrics>(),\n            expected_metrics.non_powdr_expected_sum\n        );\n        let columns_saved = is_cell_pgo.then(|| {\n            // Test cells saved in Pgo::Cell\n            powdr_air_metrics\n                .into_iter()\n                .map(|(_, columns_saved)| columns_saved.unwrap())\n                .sum::<AirWidthsDiff>()\n        });\n        assert_eq!(columns_saved.is_some(), expected_columns_saved.is_some());\n        if let Some(expected) = expected_columns_saved {\n            expected.assert_debug_eq(&columns_saved.unwrap());\n        }\n\n        let files = std::fs::read_dir(apc_candidates_dir_path)\n            .unwrap()\n            .filter_map(Result::ok)\n            .map(|entry| {\n                entry\n                    .path()\n                    .file_name()\n                    .unwrap()\n                    .to_string_lossy()\n                    .to_string()\n            })\n            .collect_vec();\n        // Check that the snapshot json files are there.\n        assert!(\n            files\n                .iter()\n                .any(|filename| filename.starts_with(\"apc_candidate_\")\n                    && filename.ends_with(\".json\")),\n            \"APC candidates snapshot JSON file not found\"\n        );\n        if is_cell_pgo {\n            // In Cell PGO, check that the apc candidates were persisted to disk\n            assert!(\n                files.contains(&\"apc_candidates.json\".to_string()),\n                \"Candidates file not present.\"\n            );\n        } else {\n            assert!(\n                !files.contains(&\"apc_candidates.json\".to_string()),\n                \"Candidates file present, but not expected.\"\n            );\n        }\n    }\n\n    const NON_POWDR_EXPECTED_MACHINE_COUNT: usize = 19;\n    const NON_POWDR_EXPECTED_SUM: AirMetrics = AirMetrics {\n        widths: AirWidths {\n            preprocessed: 7,\n            main: 798,\n            log_up: 684,\n        },\n        constraints: 604,\n        bus_interactions: 253,\n    };\n\n    #[test]\n    fn guest_machine_pgo_modes() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ITER);\n        let guest = compile_openvm(GUEST, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin);\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Instruction(pgo_data.clone()),\n                name: GUEST,\n                apc: GUEST_APC,\n                skip: GUEST_SKIP_PGO,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 38,\n                            log_up: 56,\n                        },\n                        constraints: 12,\n                        bus_interactions: 26,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    1\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            None,\n        );\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, None),\n                name: GUEST,\n                apc: GUEST_APC,\n                skip: GUEST_SKIP_PGO,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 38,\n                            log_up: 56,\n                        },\n                        constraints: 12,\n                        bus_interactions: 26,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    1\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 170,\n                        log_up: 236,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 38,\n                        log_up: 56,\n                    },\n                }\n            \"#]]),\n        );\n    }\n\n    #[test]\n    fn sha256_machine_pgo() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_SHA256_ITER_SMALL);\n        let guest = compile_openvm(GUEST_SHA256, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin);\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Instruction(pgo_data.clone()),\n                name: GUEST_SHA256,\n                apc: GUEST_SHA256_APC_PGO,\n                skip: GUEST_SHA256_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 14254,\n                            log_up: 22752,\n                        },\n                        constraints: 4279,\n                        bus_interactions: 11143,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    10\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            None,\n        );\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, None),\n                name: GUEST_SHA256,\n                apc: GUEST_SHA256_APC_PGO,\n                skip: GUEST_SHA256_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 14226,\n                            log_up: 22720,\n                        },\n                        constraints: 4255,\n                        bus_interactions: 11133,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    10\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 183410,\n                        log_up: 227144,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 14226,\n                        log_up: 22720,\n                    },\n                }\n            \"#]]),\n        );\n    }\n\n    #[test]\n    fn ecc_hint_machine_pgo_cell() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECC_ITER);\n        let guest = compile_openvm(GUEST_ECC_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin);\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, None),\n                name: GUEST_ECC_HINTS,\n                apc: GUEST_ECC_APC_PGO,\n                skip: GUEST_ECC_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 17184,\n                            log_up: 27796,\n                        },\n                        constraints: 8573,\n                        bus_interactions: 11892,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    50\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 127688,\n                        log_up: 169860,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 17184,\n                        log_up: 27796,\n                    },\n                }\n            \"#]]),\n        );\n    }\n\n    #[test]\n    fn ecrecover_machine_pgo_cell() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_ECRECOVER_ITER);\n        let guest = compile_openvm(GUEST_ECRECOVER_HINTS, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin);\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, None),\n                name: GUEST_ECRECOVER_HINTS,\n                apc: GUEST_ECRECOVER_APC_PGO,\n                skip: GUEST_ECRECOVER_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 19873,\n                            log_up: 30884,\n                        },\n                        constraints: 10968,\n                        bus_interactions: 13423,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    50\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 150546,\n                        log_up: 198172,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 19873,\n                        log_up: 30884,\n                    },\n                }\n            \"#]]),\n        );\n    }\n\n    #[test]\n    fn keccak_machine_pgo_modes() {\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n        let guest = compile_openvm(GUEST_KECCAK, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin);\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::None,\n                name: GUEST_KECCAK,\n                apc: GUEST_KECCAK_APC,\n                skip: GUEST_KECCAK_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 2022,\n                            log_up: 3472,\n                        },\n                        constraints: 187,\n                        bus_interactions: 1734,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    1\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            None,\n        );\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Instruction(pgo_data.clone()),\n                name: GUEST_KECCAK,\n                apc: GUEST_KECCAK_APC,\n                skip: GUEST_KECCAK_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 2022,\n                            log_up: 3472,\n                        },\n                        constraints: 187,\n                        bus_interactions: 1734,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    1\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            None,\n        );\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, None),\n                name: GUEST_KECCAK,\n                apc: GUEST_KECCAK_APC,\n                skip: GUEST_KECCAK_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 2022,\n                            log_up: 3472,\n                        },\n                        constraints: 187,\n                        bus_interactions: 1734,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    1\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 27521,\n                        log_up: 35156,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 2022,\n                        log_up: 3472,\n                    },\n                }\n            \"#]]),\n        );\n    }\n\n    #[test]\n    fn keccak_machine_cell_pgo_max_columns() {\n        const MAX_TOTAL_COLUMNS: usize = 10_000;\n\n        let mut stdin = StdIn::default();\n        stdin.write(&GUEST_KECCAK_ITER_SMALL);\n\n        let guest = compile_openvm(GUEST_KECCAK, GuestOptions::default()).unwrap();\n        let pgo_data = execution_profile_from_guest(&guest, stdin.clone());\n\n        test_machine_compilation(\n            GuestTestConfig {\n                pgo_config: PgoConfig::Cell(pgo_data, Some(MAX_TOTAL_COLUMNS)),\n                name: GUEST_KECCAK,\n                apc: GUEST_KECCAK_APC_PGO_LARGE,\n                skip: GUEST_KECCAK_SKIP,\n            },\n            MachineTestMetrics {\n                powdr_expected_sum: expect![[r#\"\n                    AirMetrics {\n                        widths: AirWidths {\n                            preprocessed: 0,\n                            main: 3234,\n                            log_up: 5264,\n                        },\n                        constraints: 571,\n                        bus_interactions: 2562,\n                    }\n                \"#]],\n                powdr_expected_machine_count: expect![[r#\"\n                    22\n                \"#]],\n                non_powdr_expected_sum: NON_POWDR_EXPECTED_SUM,\n                non_powdr_expected_machine_count: NON_POWDR_EXPECTED_MACHINE_COUNT,\n            },\n            Some(expect![[r#\"\n                AirWidthsDiff {\n                    before: AirWidths {\n                        preprocessed: 0,\n                        main: 32376,\n                        log_up: 41660,\n                    },\n                    after: AirWidths {\n                        preprocessed: 0,\n                        main: 3234,\n                        log_up: 5264,\n                    },\n                }\n            \"#]]),\n        );\n\n        // TODO\n\n        // // Assert that total columns don't exceed the initial limit set\n        // let total_columns = (powdr_metrics_sum + NON_POWDR_EXPECTED_SUM).widths.total();\n        // assert!(\n        //     total_columns <= MAX_TOTAL_COLUMNS,\n        //     \"Total columns exceeded the limit: {total_columns} > {MAX_TOTAL_COLUMNS}\"\n        // );\n    }\n\n    mod extraction {\n        use crate::{ExtendedVmConfig, RiscvISA, DEFAULT_OPENVM_DEGREE_BOUND};\n\n        use openvm_algebra_circuit::{Fp2Extension, ModularExtension};\n        use openvm_bigint_circuit::Int256;\n        use openvm_circuit::arch::SystemConfig;\n        use openvm_ecc_circuit::{WeierstrassExtension, SECP256K1_CONFIG};\n        use openvm_pairing_circuit::{PairingCurve, PairingExtension};\n        use openvm_rv32im_circuit::Rv32M;\n        use openvm_sdk::config::SdkVmConfig;\n        use powdr_openvm::extraction_utils::OriginalVmConfig;\n        use powdr_openvm_riscv_hints_circuit::HintsExtension;\n\n        #[test]\n        fn test_get_bus_map() {\n            let use_kzg_intrinsics = true;\n\n            let system_config = SystemConfig::default()\n                .with_continuations()\n                .with_max_constraint_degree(DEFAULT_OPENVM_DEGREE_BOUND)\n                .with_public_values(32);\n            let int256 = Int256::default();\n            let bn_config = PairingCurve::Bn254.curve_config();\n            let bls_config = PairingCurve::Bls12_381.curve_config();\n            let rv32m = Rv32M {\n                range_tuple_checker_sizes: int256.range_tuple_checker_sizes,\n            };\n            let mut supported_moduli = vec![\n                bn_config.modulus.clone(),\n                bn_config.scalar.clone(),\n                SECP256K1_CONFIG.modulus.clone(),\n                SECP256K1_CONFIG.scalar.clone(),\n            ];\n            let mut supported_complex_moduli =\n                vec![(\"Bn254Fp2\".to_string(), bn_config.modulus.clone())];\n            let mut supported_curves = vec![bn_config.clone(), SECP256K1_CONFIG.clone()];\n            let mut supported_pairing_curves = vec![PairingCurve::Bn254];\n            if use_kzg_intrinsics {\n                supported_moduli.push(bls_config.modulus.clone());\n                supported_moduli.push(bls_config.scalar.clone());\n                supported_complex_moduli\n                    .push((\"Bls12_381Fp2\".to_string(), bls_config.modulus.clone()));\n                supported_curves.push(bls_config.clone());\n                supported_pairing_curves.push(PairingCurve::Bls12_381);\n            }\n            let sdk_vm_config = SdkVmConfig::builder()\n                .system(system_config.into())\n                .rv32i(Default::default())\n                .rv32m(rv32m)\n                .io(Default::default())\n                .keccak(Default::default())\n                .sha256(Default::default())\n                .bigint(int256)\n                .modular(ModularExtension::new(supported_moduli))\n                .fp2(Fp2Extension::new(supported_complex_moduli))\n                .ecc(WeierstrassExtension::new(supported_curves))\n                .pairing(PairingExtension::new(supported_pairing_curves))\n                .build();\n\n            let _ = OriginalVmConfig::<RiscvISA>::new(ExtendedVmConfig {\n                sdk: sdk_vm_config,\n                hints: HintsExtension,\n            })\n            .bus_map();\n        }\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/tests/apc_builder_complex.rs",
    "content": "mod common;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::BasicBlock;\nuse powdr_openvm_riscv::symbolic_instruction_builder::*;\nuse test_log::test;\n\nfn assert_machine_output(program: Vec<Instruction<BabyBear>>, test_name: &str) {\n    let bb = BasicBlock {\n        start_pc: 0,\n        instructions: program,\n    };\n    common::apc_builder_utils::assert_machine_output(bb.into(), \"complex\", test_name);\n}\n\n#[test]\nfn guest_top_block() {\n    // Top block from `guest` with `--pgo cell`, with 4 instructions:\n    // Instruction { opcode: 512, args: [8, 8, 16777200, 1, 0, 0, 0] }\n    // Instruction { opcode: 531, args: [4, 8, 12, 1, 2, 1, 0] }\n    // Instruction { opcode: 576, args: [4, 0, 0, 1, 0, 0, 0] }\n    // Instruction { opcode: 565, args: [4, 4, 1780, 1, 0, 1, 0] }\n\n    let program = [\n        add(8, 8, 16777200, 0),\n        storew(4, 8, 12, 2, 1, 0),\n        auipc(4, 0, 0, 1, 0),\n        jalr(4, 4, 1780, 1, 0),\n    ];\n\n    assert_machine_output(program.to_vec(), \"guest_top_block\");\n}\n\n#[test]\nfn memcpy_block() {\n    // AND rd_ptr = 52, rs1_ptr = 44, rs2 = 3, rs2_as = 0\n    // SLTU rd_ptr = 52, rs1_ptr = 52, rs2 = 1, rs2_as = 0\n    // SLTU rd_ptr = 56, rs1_ptr = 56, rs2 = 1, rs2_as = 0\n    // OR rd_ptr = 52, rs1_ptr = 52, rs2 = 56, rs2_as = 1\n    // BNE 52 0 248 1 1\n\n    let program = [\n        and(52, 44, 3, 0),\n        sltu(52, 52, 1, 0),\n        sltu(56, 56, 1, 0),\n        or(52, 52, 56, 1),\n        bne(52, 0, 248),\n    ];\n\n    assert_machine_output(program.to_vec(), \"memcpy_block\");\n}\n\n#[test]\nfn stack_accesses() {\n    // The memory optimizer should realize that [x2 + 24] is accessed twice,\n    // with the same value of x2. Therefore, we can reduce it to just one access.\n    let program = [\n        // Load [x2 + 20] into x8\n        loadw(8, 2, 20, 2, 1, 0),\n        // Load [x2 + 24] into x9\n        loadw(9, 2, 24, 2, 1, 0),\n        // Store [x8] into [x2 + 24]\n        storew(8, 2, 24, 2, 1, 0),\n    ];\n\n    assert_machine_output(program.to_vec(), \"stack_accesses\");\n}\n\n// Reth blocks, taken from:\n// https://georgwiese.github.io/autoprecompile-analyzer/?data=https%3A%2F%2Fgithub.com%2Fpowdr-labs%2Fbench-results%2Fblob%2Fgh-pages%2Fresults%2F2025-09-25-0815%2Freth%2Fapc_candidates.json\n\n#[test]\nfn aligned_memcpy() {\n    // Block 0x200a1c of the Reth benchmark.\n    // => 1.1B trace cells, executed 986.1K times, effectiveness 3.48x.\n    // C code:\n    // https://github.com/kraj/musl/blob/d1c1058ee7a61cf86dc0292590e3f7eb09212d70/src/string/memcpy.c#L27-L30\n    // RISC-V assembly:\n    // https://github.com/openvm-org/openvm/blob/13362dc64fc2ec6f585018b408061bf56e7b7429/crates/toolchain/openvm/src/memcpy.s#L291-L302\n    let program = [\n        loadw(60, 56, 0, 2, 1, 0),\n        loadw(64, 56, 4, 2, 1, 0),\n        loadw(68, 56, 8, 2, 1, 0),\n        loadw(20, 56, 12, 2, 1, 0),\n        storew(60, 52, 0, 2, 1, 0),\n        storew(64, 52, 4, 2, 1, 0),\n        storew(68, 52, 8, 2, 1, 0),\n        storew(20, 52, 12, 2, 1, 0),\n        add(56, 56, 16, 0),\n        add(48, 48, 16777200, 0),\n        add(52, 52, 16, 0),\n        bltu(44, 48, -44),\n    ];\n    assert_machine_output(program.to_vec(), \"aligned_memcpy\");\n}\n\n#[test]\nfn unaligned_memcpy() {\n    // Block 0x200914 of the Reth benchmark.\n    // => 484.1M trace cells, executed 442.9K times, effectiveness 4.61x.\n    // C code:\n    // https://github.com/kraj/musl/blob/d1c1058ee7a61cf86dc0292590e3f7eb09212d70/src/string/memcpy.c#L23\n    // RISC-V assembly:\n    // https://github.com/openvm-org/openvm/blob/13362dc64fc2ec6f585018b408061bf56e7b7429/crates/toolchain/openvm/src/memcpy.s#L220-L232\n    // Circuit visualization:\n    // https://docs.google.com/drawings/d/1JfLRuoWCyAsN3pht27W6UXUgtE_AiNx6r36lf-cAIfs/edit?usp=sharing\n    let program = [\n        loadb(68, 44, 0, 2, 1, 0),\n        add(56, 44, 1, 0),\n        add(52, 64, 1, 0),\n        storeb(68, 64, 0, 2, 1, 0),\n        add(48, 48, 16777215, 0),\n        and(44, 60, 3, 0),\n        sltu(44, 0, 44, 1),\n        sltu(64, 0, 48, 1),\n        and(68, 44, 64, 1),\n        add(60, 60, 1, 0),\n        add(44, 56, 0, 0),\n        add(64, 52, 0, 0),\n        bne(68, 0, -48),\n    ];\n    assert_machine_output(program.to_vec(), \"unaligned_memcpy\");\n}\n\n#[test]\nfn load_two_bytes_compare() {\n    // Block 0x3bc8fc of the Reth benchmark.\n    // => 70.3M trace cells, executed 293k times, especially ineffective (1.85x reduction).\n    let program = [\n        loadb(52, 40, 0, 2, 1, 0),\n        loadb(56, 44, 0, 2, 1, 0),\n        bne(52, 56, 28),\n    ];\n    assert_machine_output(program.to_vec(), \"load_two_bytes_compare\");\n}\n\n#[test]\nfn load_two_bytes_compare_unsigned() {\n    // Similar to `load_two_bytes_compare`, but using `loadbu` instead of `loadb`.\n    // Note that the two tests are largely equivalent; the sign extension of `loadb` does not\n    // change the comparison result (though the contents of r52 and r56 will differ between the two).\n    let program = [\n        loadbu(52, 40, 0, 2, 1, 0),\n        loadbu(56, 44, 0, 2, 1, 0),\n        bne(52, 56, 28),\n    ];\n    assert_machine_output(program.to_vec(), \"load_two_bytes_compare_unsigned\");\n}\n\n#[test]\nfn store_to_same_address() {\n    // Store two different values to the same memory address.\n    // The memory optimizer should realize the two memory addresses are the same,\n    // and eliminate creating two separate memory columns.\n    let program = [storeb(4, 8, 8, 2, 1, 0), storeb(32, 8, 8, 2, 1, 0)];\n    assert_machine_output(program.to_vec(), \"store_to_same_memory_address\");\n}\n\n#[test]\nfn many_stores_relative_to_same_register() {\n    // Many stores to different offsets relative to the same base register.\n    // For a real-world example of something similar, see:\n    // https://georgwiese.github.io/autoprecompile-analyzer/?data=https%3A%2F%2Fgist.githubusercontent.com%2Fgeorgwiese%2Faa85dcc145f26d37f8f03f9a04665971%2Fraw%2F6ce661ec86302d2fef0282908117c0427d9888db%2Freth_with_labels.json&block=0x260648\n\n    // Reproduces issue: Compute memory pointers in the field for intermediate pointers\n    // https://github.com/powdr-labs/powdr/issues/3365\n\n    let program = [\n        storew(5, 2, 12, 2, 1, 0),\n        storew(6, 2, 16, 2, 1, 0),\n        storew(7, 2, 20, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"many_stores_relative_to_same_register\");\n}\n\n#[test]\nfn copy_byte() {\n    // Copies a byte from one memory location to another, using loadb and storeb.\n    // See this real-world example with a similar pattern:\n    // https://georgwiese.github.io/autoprecompile-analyzer/?data=https%3A%2F%2Fgist.githubusercontent.com%2Fgeorgwiese%2Faa85dcc145f26d37f8f03f9a04665971%2Fraw%2F6ce661ec86302d2fef0282908117c0427d9888db%2Freth_with_labels.json&block=0x200914\n\n    let program = [\n        loadb(8, 2, 0, 2, 1, 0),\n        storeb(8, 3, 0, 2, 1, 0),\n        // Overwrite r8 with value 3.\n        // Something similar happens in the block above: The sign extension of `loadb` is not actually needed.\n        add(8, 0, 3, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"copy_byte\");\n}\n\n#[test]\nfn rotate() {\n    // Rotation, implemented as `(x >> imm) | (x << (32-imm))`\n    // for imm = 1\n    let program = [srl(1, 3, 1, 0), sll(2, 3, 31, 0), or(3, 1, 2, 1)];\n    assert_machine_output(program.to_vec(), \"rotate\");\n}\n"
  },
  {
    "path": "openvm-riscv/tests/apc_builder_pseudo_instructions.rs",
    "content": "mod common;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::BasicBlock;\nuse powdr_openvm_riscv::symbolic_instruction_builder::*;\nuse test_log::test;\n\nfn assert_machine_output(program: Vec<Instruction<BabyBear>>, test_name: &str) {\n    let bb = BasicBlock {\n        start_pc: 0,\n        instructions: program,\n    };\n    common::apc_builder_utils::assert_machine_output(bb.into(), \"pseudo_instructions\", test_name);\n}\n\n// Arithmetic pseudo instructions\n#[test]\nfn mv() {\n    // mv rd, rs1 expands to: addi rd, rs1, 0\n    let program = [\n        // [x8] = [x5]\n        add(8, 5, 0, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"mv\");\n}\n\n#[test]\nfn not() {\n    // not rd, rs1 expands to: xori rd, rs1, -1\n    // -1 in 24-bit 2's complement is 0xFFFFFF\n    let minus_one: u32 = 0xFFFFFF;\n    let program = [\n        // [x8] = ~[x5]\n        xor(8, 5, minus_one, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"not\");\n}\n\n#[test]\nfn neg() {\n    // neg rd, rs1 expands to: sub rd, x0, rs1\n    let program = [\n        // [x8] = -[x5]\n        sub(8, 0, 5, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"neg\");\n}\n\n// Set pseudo instructions\n#[test]\nfn seqz() {\n    // seqz rd, rs1 expands to: sltiu rd, rs1, 1\n    // which in our case is: sltu rd, rs1, 1 (with rs2_as = 0 for immediate)\n    // This sets rd = 1 if rs1 == 0, else rd = 0\n    let program = [\n        // [x8] = 1 if [x5] == 0, else 0\n        sltu(8, 5, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"seqz\");\n}\n\n#[test]\nfn snez() {\n    // snez rd, rs1 expands to: sltu rd, x0, rs1\n    let program = [\n        // [x8] = 1 if [x5] != 0, else 0\n        sltu(8, 0, 5, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"snez\");\n}\n\n#[test]\nfn sltz() {\n    // sltz rd, rs1 expands to: slt rd, rs1, x0\n    let program = [\n        // [x8] = 1 if [x5] < 0 (signed), else 0\n        slt(8, 5, 0, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"sltz\");\n}\n\n#[test]\nfn sgtz() {\n    // sgtz rd, rs1 expands to: slt rd, x0, rs1\n    let program = [\n        // [x8] = 1 if [x5] > 0 (signed), else 0\n        slt(8, 0, 5, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"sgtz\");\n}\n\n// Branch pseudo instructions\n#[test]\nfn beqz() {\n    // beqz rs1, offset expands to: beq rs1, x0, offset\n    let program = [\n        // pc = pc + 8 if [x5] == 0\n        beq(5, 0, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"beqz\");\n}\n\n#[test]\nfn bnez() {\n    // bnez rs1, offset expands to: bne rs1, x0, offset\n    let program = [\n        // pc = pc + 8 if [x5] != 0\n        bne(5, 0, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"bnez\");\n}\n\n#[test]\nfn blez() {\n    // blez rs1, offset expands to: bge x0, rs1, offset\n    let program = [\n        // pc = pc + 8 if [x5] <= 0 (signed)\n        bge(0, 5, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"blez\");\n}\n\n#[test]\nfn bgez() {\n    // bgez rs1, offset expands to: bge rs1, x0, offset\n    let program = [\n        // pc = pc + 8 if [x5] >= 0 (signed)\n        bge(5, 0, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"bgez\");\n}\n\n#[test]\nfn bltz() {\n    // bltz rs1, offset expands to: blt rs1, x0, offset\n    let program = [\n        // pc = pc + 8 if [x5] < 0 (signed)\n        blt(5, 0, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"bltz\");\n}\n\n#[test]\nfn bgtz() {\n    // bgtz rs1, offset expands to: blt x0, rs1, offset\n    let program = [\n        // pc = pc + 8 if [x5] > 0 (signed)\n        blt(0, 5, 8),\n    ];\n    assert_machine_output(program.to_vec(), \"bgtz\");\n}\n\n// Jump pseudo instructions\n#[test]\nfn j() {\n    // j offset expands to: jal x0, offset\n    let program = [\n        // pc = pc + 8\n        jal(0, 0, 8, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"j\");\n}\n\n#[test]\nfn jr() {\n    // jr offset expands to: jal x1, offset\n    let program = [\n        // pc = pc + 8, [x1] = pc + 4\n        jal(1, 0, 8, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"jr\");\n}\n\n#[test]\nfn ret() {\n    // ret expands to: jalr x0, x1, 0\n    let program = [\n        // pc = [x1] + 0\n        jalr(0, 1, 0, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"ret\");\n}\n\n#[test]\nfn load_immediate() {\n    // [x48] = [x0] + 216 = 216\n    let program = [add(48, 0, 216, 0)];\n    assert_machine_output(program.to_vec(), \"load_immediate\");\n}\n"
  },
  {
    "path": "openvm-riscv/tests/apc_builder_single_instructions.rs",
    "content": "mod common;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::BasicBlock;\nuse powdr_openvm_riscv::symbolic_instruction_builder::*;\nuse test_log::test;\n\nfn assert_machine_output(program: Vec<Instruction<BabyBear>>, test_name: &str) {\n    let bb = BasicBlock {\n        start_pc: 0,\n        instructions: program,\n    };\n    common::apc_builder_utils::assert_machine_output(bb.into(), \"single_instructions\", test_name);\n}\n\n// ALU Chip instructions\n#[test]\nfn single_add_1() {\n    let program = [\n        // [x8] = [x8] + 1\n        add(8, 8, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_add_1\");\n}\n\n#[test]\nfn single_sub() {\n    let program = [\n        // [x8] = [x7] - [x5]\n        sub(8, 7, 5, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"single_sub\");\n}\n\n#[test]\nfn single_and_0() {\n    let program = [\n        // [x8] = [x0] & 5\n        and(8, 0, 5, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_and_0\");\n}\n\n#[test]\nfn single_xor() {\n    let program = [\n        // [x8] = [x7] ^ [x5]\n        xor(8, 7, 5, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"single_xor\");\n}\n\n#[test]\nfn single_mul() {\n    let program = [\n        // [x8] = [x7] * [x5]\n        mul(8, 7, 5, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_mul\");\n}\n\n// Load/Store Chip instructions\n// `needs_write` can be 0 iff `rd=0` for load, but must be 1 if store.\n#[test]\nfn single_loadw() {\n    let program = [\n        // Load [x2 + 20]_2 into x8\n        loadw(8, 2, 20, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_loadw\");\n}\n\n#[test]\nfn single_loadbu() {\n    let program = [\n        // Load [x2 + 21]_2 into x8\n        loadbu(8, 2, 21, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_loadbu\");\n}\n\n#[test]\nfn single_loadhu() {\n    let program = [\n        // Load [x2 + 22]_2 but `needs_write=0`\n        loadhu(0, 2, 22, 2, 0, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_loadhu\");\n}\n\n#[test]\nfn single_storew() {\n    let program = [\n        // Store [x8] into [x2 - 4]_2\n        storew(8, 2, 4, 2, 1, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"single_storew\");\n}\n\n#[test]\nfn single_storeh() {\n    let program = [\n        // Store [x8] into [x2 - 6]_2\n        storeh(8, 2, 6, 2, 1, 1),\n    ];\n    assert_machine_output(program.to_vec(), \"single_storeh\");\n}\n\n#[test]\nfn single_storeb() {\n    let program = [\n        // Store [x8] into [x2 + 3]_2\n        storeb(8, 2, 3, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_storeb\");\n}\n\n// Load/Store Sign Extend Chip instructions\n#[test]\nfn single_loadh() {\n    let program = [\n        // Load [x2 + 6]_2 into x8\n        loadh(8, 2, 6, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_loadh\");\n}\n\n#[test]\nfn single_loadb() {\n    let program = [\n        // Load [x2 + 3]_2 into x8\n        loadb(8, 2, 3, 2, 1, 0),\n    ];\n    assert_machine_output(program.to_vec(), \"single_loadb\");\n}\n\n#[test]\nfn single_loadb_imm0() {\n    let program = [\n        // Load [x2]_2 into x8\n        loadb(8, 2, 0, 2, 1, 0),\n    ];\n    // The x2 + imm addition should be optimized away.\n    assert_machine_output(program.to_vec(), \"single_loadb_imm0\");\n}\n\n#[test]\nfn single_loadb_x0() {\n    let program = [\n        // Load [x2 + 3]_2 into x0, i.e. `needs_write=0`\n        loadb(0, 2, 3, 2, 0, 0),\n    ];\n    // The instruction is a no-op, ideally, the APC would be empty.\n    assert_machine_output(program.to_vec(), \"single_loadb_x0\");\n}\n\n// Branch Eq Chip instructions\n#[test]\nfn single_beq() {\n    let program = [\n        // pc = pc + 2 if x8 == x5\n        beq(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_beq\");\n}\n\n#[test]\nfn single_bne() {\n    let program = [\n        // pc = pc + 2 if x8 != x5\n        bne(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_bne\");\n}\n\n// Branch Lt Chip instructions\n#[test]\nfn single_blt() {\n    let program = [\n        // pc = pc + 2 if x8 < x5 (signed)\n        blt(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_blt\");\n}\n\n#[test]\nfn single_bltu() {\n    let program = [\n        // pc = pc + 2 if x8 < x5\n        bltu(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_bltu\");\n}\n\n#[test]\nfn single_bge() {\n    let program = [\n        // pc = pc + 2 if x8 >= x5 (signed)\n        bge(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_bge\");\n}\n\n#[test]\nfn single_bgeu() {\n    let program = [\n        // pc = pc + 2 if x8 >= x5\n        bgeu(8, 5, 2),\n    ];\n    assert_machine_output(program.to_vec(), \"single_bgeu\");\n}\n\n// Shift Chip instructions\n#[test]\nfn single_srl() {\n    // Instruction 416 from the largest basic block of the Keccak guest program.\n    let program = [srl(68, 40, 25, 0)];\n    assert_machine_output(program.to_vec(), \"single_srl\");\n}\n\n#[test]\nfn single_sll() {\n    // r68 = r40 << 3\n    let program = [sll(68, 40, 3, 0)];\n    assert_machine_output(program.to_vec(), \"single_sll\");\n}\n\n#[test]\nfn single_sll_by_8() {\n    // r68 = r40 << 8\n    let program = [sll(68, 40, 8, 0)];\n    assert_machine_output(program.to_vec(), \"single_sll_by_8\");\n}\n\n#[test]\nfn single_sra() {\n    // r68 = sign_extend(r40 >> val(R3))\n    let program = [sra(68, 40, 3, 1)];\n    assert_machine_output(program.to_vec(), \"single_sra\");\n}\n\n// DivRem Chip instructions\n#[test]\nfn single_div() {\n    // [x8] = [x7] / [x5] (signed)\n    let program = [div(8, 7, 5, 1, 0)];\n    assert_machine_output(program.to_vec(), \"single_div\");\n}\n\n#[test]\nfn single_divu() {\n    // [x8] = [x7] / [x5] (unsigned)\n    let program = [divu(8, 7, 5, 1, 0)];\n    assert_machine_output(program.to_vec(), \"single_divu\");\n}\n\n#[test]\nfn single_rem() {\n    // [x8] = [x7] % [x5] (signed)\n    let program = [rem(8, 7, 5, 1, 0)];\n    assert_machine_output(program.to_vec(), \"single_rem\");\n}\n\n#[test]\nfn single_remu() {\n    // [x8] = [x7] % [x5] (unsigned)\n    let program = [remu(8, 7, 5, 1, 0)];\n    assert_machine_output(program.to_vec(), \"single_remu\");\n}\n"
  },
  {
    "path": "openvm-riscv/tests/apc_builder_superblocks.rs",
    "content": "mod common;\nuse openvm_instructions::instruction::Instruction;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::BasicBlock;\nuse powdr_openvm_riscv::symbolic_instruction_builder::*;\nuse test_log::test;\n\nfn assert_machine_output(program: Vec<BasicBlock<Instruction<BabyBear>>>, test_name: &str) {\n    common::apc_builder_utils::assert_machine_output(program.into(), \"superblocks\", test_name);\n}\n\nfn bb(\n    start_pc: u64,\n    instructions: Vec<Instruction<BabyBear>>,\n) -> BasicBlock<Instruction<BabyBear>> {\n    BasicBlock {\n        start_pc,\n        instructions,\n    }\n}\n\n#[test]\nfn beq0_fallthrough() {\n    // Superblock where the BEQ instruction falls through to the next instruction.\n    // This should enforce that x8 != 0.\n    let program = [\n        bb(0, vec![beq(8, 0, 40)]),\n        // PC=4, fallthrough\n        bb(4, vec![add(9, 9, 1, 0)]),\n    ];\n\n    assert_machine_output(program.to_vec(), \"beq0_fallthrough\");\n}\n\n#[test]\nfn beq0_jump() {\n    // Superblock where the BEQ instruction jumps to the given address.\n    // This should enforce that x8 == 0.\n    let program = [\n        bb(0, vec![beq(8, 0, 40)]),\n        // PC=40, jump taken\n        bb(40, vec![add(9, 9, 1, 0)]),\n    ];\n\n    assert_machine_output(program.to_vec(), \"beq0_jump\");\n}\n\n#[test]\nfn beq_fallthrough() {\n    // Superblock where the BEQ instruction falls through to the next instruction.\n    // This should enforce that x8 != x10 (x10 holds 33).\n    let program = [\n        bb(0, vec![add(10, 0, 33, 0), beq(8, 10, 40)]),\n        // PC=8, fallthrough (BEQ at PC=4)\n        bb(8, vec![add(9, 9, 1, 0)]),\n    ];\n\n    assert_machine_output(program.to_vec(), \"beq_fallthrough\");\n}\n\n#[test]\nfn beq_jump() {\n    // Superblock where the BEQ instruction jumps to the given address.\n    // This should enforce that x8 == x10 (x10 holds 33).\n    let program = [\n        bb(0, vec![add(10, 0, 33, 0), beq(8, 10, 40)]),\n        // PC=44, jump taken (BEQ at PC=4 with imm=40 jumps to PC=44)\n        bb(44, vec![add(9, 9, 1, 0)]),\n    ];\n\n    assert_machine_output(program.to_vec(), \"beq_jump\");\n}\n\n#[test]\nfn many_blocks() {\n    // Superblock with 3 basic blocks.\n    // Constraints should propagate accross the jump instructions:\n    // x10 = 10\n    // x11 = x10\n    // x12 = x11 + 5 = 15\n    // x8 = x12 = 15\n    let program = [\n        bb(\n            0,\n            vec![\n                add(10, 0, 10, 0), // x10 = 10\n                bne(10, 11, 100),\n            ],\n        ),\n        // PC=8, BNE fallthrough (x10 = x11)\n        bb(\n            8,\n            vec![\n                add(12, 11, 5, 0), // x12 = x11 + 5 = 15 (known after propagation)\n                beq(8, 12, 60),    // PC=12, BEQ jump to PC+60=72\n            ],\n        ),\n        // PC=72, BEQ jump (x8 = x12 = 15)\n        bb(72, vec![add(9, 9, 1, 0)]),\n    ];\n\n    assert_machine_output(program.to_vec(), \"many_blocks\");\n}\n"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/aligned_memcpy.txt",
    "content": "Instructions:\n   0: LOADW rd_rs2_ptr = 60, rs1_ptr = 56, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n   4: LOADW rd_rs2_ptr = 64, rs1_ptr = 56, imm = 4, mem_as = 2, needs_write = 1, imm_sign = 0\n   8: LOADW rd_rs2_ptr = 68, rs1_ptr = 56, imm = 8, mem_as = 2, needs_write = 1, imm_sign = 0\n  12: LOADW rd_rs2_ptr = 20, rs1_ptr = 56, imm = 12, mem_as = 2, needs_write = 1, imm_sign = 0\n  16: STOREW rd_rs2_ptr = 60, rs1_ptr = 52, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  20: STOREW rd_rs2_ptr = 64, rs1_ptr = 52, imm = 4, mem_as = 2, needs_write = 1, imm_sign = 0\n  24: STOREW rd_rs2_ptr = 68, rs1_ptr = 52, imm = 8, mem_as = 2, needs_write = 1, imm_sign = 0\n  28: STOREW rd_rs2_ptr = 20, rs1_ptr = 52, imm = 12, mem_as = 2, needs_write = 1, imm_sign = 0\n  32: ADD rd_ptr = 56, rs1_ptr = 56, rs2 = 16, rs2_as = 0\n  36: ADD rd_ptr = 48, rs1_ptr = 48, rs2 = 16777200, rs2_as = 0\n  40: ADD rd_ptr = 52, rs1_ptr = 52, rs2 = 16, rs2_as = 0\n  44: BLTU 44 48 -44 1 1\n\nAPC advantage:\n  - Main columns: 468 -> 132 (3.55x reduction)\n  - Bus interactions: 209 -> 89 (2.35x reduction)\n  - Constraints: 291 -> 45 (6.47x reduction)\n\nSymbolic machine using 132 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  read_data_aux__base__prev_timestamp_2\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2\n  mem_ptr_limbs__0_2\n  mem_ptr_limbs__1_2\n  write_base_aux__prev_timestamp_2\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_2\n  read_data__0_2\n  read_data__1_2\n  read_data__2_2\n  read_data__3_2\n  prev_data__0_2\n  prev_data__1_2\n  prev_data__2_2\n  prev_data__3_2\n  read_data_aux__base__prev_timestamp_3\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_3\n  mem_ptr_limbs__0_3\n  mem_ptr_limbs__1_3\n  write_base_aux__prev_timestamp_3\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_3\n  read_data__0_3\n  read_data__1_3\n  read_data__2_3\n  read_data__3_3\n  prev_data__0_3\n  prev_data__1_3\n  prev_data__2_3\n  prev_data__3_3\n  rs1_data__0_4\n  rs1_data__1_4\n  rs1_data__2_4\n  rs1_data__3_4\n  rs1_aux_cols__base__prev_timestamp_4\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_4\n  mem_ptr_limbs__0_4\n  mem_ptr_limbs__1_4\n  write_base_aux__prev_timestamp_4\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_4\n  prev_data__0_4\n  prev_data__1_4\n  prev_data__2_4\n  prev_data__3_4\n  mem_ptr_limbs__0_5\n  mem_ptr_limbs__1_5\n  write_base_aux__prev_timestamp_5\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_5\n  prev_data__0_5\n  prev_data__1_5\n  prev_data__2_5\n  prev_data__3_5\n  mem_ptr_limbs__0_6\n  mem_ptr_limbs__1_6\n  write_base_aux__prev_timestamp_6\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_6\n  prev_data__0_6\n  prev_data__1_6\n  prev_data__2_6\n  prev_data__3_6\n  mem_ptr_limbs__0_7\n  mem_ptr_limbs__1_7\n  write_base_aux__prev_timestamp_7\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_7\n  prev_data__0_7\n  prev_data__1_7\n  prev_data__2_7\n  prev_data__3_7\n  a__0_8\n  a__1_8\n  a__2_8\n  a__3_8\n  reads_aux__0__base__prev_timestamp_9\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_9\n  writes_aux__prev_data__0_9\n  writes_aux__prev_data__1_9\n  writes_aux__prev_data__2_9\n  writes_aux__prev_data__3_9\n  a__0_9\n  a__1_9\n  a__2_9\n  a__3_9\n  a__0_10\n  a__1_10\n  a__2_10\n  a__3_10\n  reads_aux__0__base__prev_timestamp_11\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_11\n  a__0_11\n  a__1_11\n  a__2_11\n  a__3_11\n  cmp_result_11\n  diff_marker__0_11\n  diff_marker__1_11\n  diff_marker__2_11\n  diff_marker__3_11\n  diff_val_11\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[48 - 48 * cmp_result_11, from_state__timestamp_0 + 35]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 56, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 60, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[1, 64, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_2 + 65536 * mem_ptr_limbs__1_2, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, read_data_aux__base__prev_timestamp_2]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_2 + 65536 * mem_ptr_limbs__1_2, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, from_state__timestamp_0 + 7]\nmult=is_valid * -1, args=[1, 68, prev_data__0_2, prev_data__1_2, prev_data__2_2, prev_data__3_2, write_base_aux__prev_timestamp_2]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_3 + 65536 * mem_ptr_limbs__1_3, read_data__0_3, read_data__1_3, read_data__2_3, read_data__3_3, read_data_aux__base__prev_timestamp_3]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_3 + 65536 * mem_ptr_limbs__1_3, read_data__0_3, read_data__1_3, read_data__2_3, read_data__3_3, from_state__timestamp_0 + 10]\nmult=is_valid * -1, args=[1, 20, prev_data__0_3, prev_data__1_3, prev_data__2_3, prev_data__3_3, write_base_aux__prev_timestamp_3]\nmult=is_valid * -1, args=[1, 52, rs1_data__0_4, rs1_data__1_4, rs1_data__2_4, rs1_data__3_4, rs1_aux_cols__base__prev_timestamp_4]\nmult=is_valid * 1, args=[1, 60, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 13]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_4 + 65536 * mem_ptr_limbs__1_4, prev_data__0_4, prev_data__1_4, prev_data__2_4, prev_data__3_4, write_base_aux__prev_timestamp_4]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_4 + 65536 * mem_ptr_limbs__1_4, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 14]\nmult=is_valid * 1, args=[1, 64, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 16]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_5 + 65536 * mem_ptr_limbs__1_5, prev_data__0_5, prev_data__1_5, prev_data__2_5, prev_data__3_5, write_base_aux__prev_timestamp_5]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_5 + 65536 * mem_ptr_limbs__1_5, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 17]\nmult=is_valid * 1, args=[1, 68, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, from_state__timestamp_0 + 19]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_6 + 65536 * mem_ptr_limbs__1_6, prev_data__0_6, prev_data__1_6, prev_data__2_6, prev_data__3_6, write_base_aux__prev_timestamp_6]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_6 + 65536 * mem_ptr_limbs__1_6, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, from_state__timestamp_0 + 20]\nmult=is_valid * 1, args=[1, 20, read_data__0_3, read_data__1_3, read_data__2_3, read_data__3_3, from_state__timestamp_0 + 22]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_7 + 65536 * mem_ptr_limbs__1_7, prev_data__0_7, prev_data__1_7, prev_data__2_7, prev_data__3_7, write_base_aux__prev_timestamp_7]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_7 + 65536 * mem_ptr_limbs__1_7, read_data__0_3, read_data__1_3, read_data__2_3, read_data__3_3, from_state__timestamp_0 + 23]\nmult=is_valid * 1, args=[1, 56, a__0_8, a__1_8, a__2_8, a__3_8, from_state__timestamp_0 + 26]\nmult=is_valid * -1, args=[1, 48, writes_aux__prev_data__0_9, writes_aux__prev_data__1_9, writes_aux__prev_data__2_9, writes_aux__prev_data__3_9, reads_aux__0__base__prev_timestamp_9]\nmult=is_valid * 1, args=[1, 52, a__0_10, a__1_10, a__2_10, a__3_10, from_state__timestamp_0 + 32]\nmult=is_valid * -1, args=[1, 44, a__0_11, a__1_11, a__2_11, a__3_11, reads_aux__0__base__prev_timestamp_11]\nmult=is_valid * 1, args=[1, 44, a__0_11, a__1_11, a__2_11, a__3_11, from_state__timestamp_0 + 33]\nmult=is_valid * 1, args=[1, 48, a__0_9, a__1_9, a__2_9, a__3_9, from_state__timestamp_0 + 34]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_2), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_2, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_2 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 92160), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_2 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 107520), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_3), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_3, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_3, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_3 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_3 - (15360 * from_state__timestamp_0 + 138240), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_3, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_3 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_3 - (15360 * from_state__timestamp_0 + 153600), 12]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_4, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_4 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_4 - (15360 * from_state__timestamp_0 + 168960), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_4), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_4, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_4, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_4 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_4 - (15360 * from_state__timestamp_0 + 199680), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_5), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_5, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_5, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_5 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_5 - (15360 * from_state__timestamp_0 + 245760), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_6), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_6, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_6, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_6 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_6 - (15360 * from_state__timestamp_0 + 291840), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_7), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_7, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_7, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_7 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_7 - (15360 * from_state__timestamp_0 + 337920), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_9, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_9 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_9 - (15360 * from_state__timestamp_0 + 399360), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_11, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_11 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_11 - (15360 * from_state__timestamp_0 + 491520), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11, args=[diff_val_11 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a__0_8, a__1_8, 0, 0]\nmult=is_valid * 1, args=[a__2_8, a__3_8, 0, 0]\nmult=is_valid * 1, args=[a__0_9, a__1_9, 0, 0]\nmult=is_valid * 1, args=[a__2_9, a__3_9, 0, 0]\nmult=is_valid * 1, args=[a__0_10, a__1_10, 0, 0]\nmult=is_valid * 1, args=[a__2_10, a__3_10, 0, 0]\n\n// Algebraic constraints:\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 122880 * is_valid)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 122881)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1 + 251658242 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1 + 251658243)) = 0\n(30720 * mem_ptr_limbs__0_2 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 245760 * is_valid)) * (30720 * mem_ptr_limbs__0_2 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 245761)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_2 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_2 + 503316484 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_2 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_2 + 503316485)) = 0\n(30720 * mem_ptr_limbs__0_3 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 368640 * is_valid)) * (30720 * mem_ptr_limbs__0_3 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 368641)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_3 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_3 + 754974726 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_3 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_3 + 754974727)) = 0\n(30720 * mem_ptr_limbs__0_4 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4)) * (30720 * mem_ptr_limbs__0_4 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 1)) = 0\n(943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_4 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_4)) * (943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_4 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_4 + 1)) = 0\n(30720 * mem_ptr_limbs__0_5 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 122880 * is_valid)) * (30720 * mem_ptr_limbs__0_5 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 122881)) = 0\n(943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_5 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_5 + 251658242 * is_valid)) * (943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_5 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_5 + 251658243)) = 0\n(30720 * mem_ptr_limbs__0_6 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 245760 * is_valid)) * (30720 * mem_ptr_limbs__0_6 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 245761)) = 0\n(943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_6 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_6 + 503316484 * is_valid)) * (943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_6 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_6 + 503316485)) = 0\n(30720 * mem_ptr_limbs__0_7 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 368640 * is_valid)) * (30720 * mem_ptr_limbs__0_7 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 368641)) = 0\n(943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_7 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_7 + 754974726 * is_valid)) * (943718400 * rs1_data__0_4 + 30720 * mem_ptr_limbs__1_7 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * mem_ptr_limbs__0_7 + 754974727)) = 0\n(7864320 * a__0_8 - (7864320 * rs1_data__0_0 + 125829120 * is_valid)) * (7864320 * a__0_8 - (7864320 * rs1_data__0_0 + 125829121)) = 0\n(30720 * a__0_8 + 7864320 * a__1_8 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 491520 * is_valid)) * (30720 * a__0_8 + 7864320 * a__1_8 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 491521)) = 0\n(120 * a__0_8 + 30720 * a__1_8 + 7864320 * a__2_8 - (120 * rs1_data__0_0 + 30720 * rs1_data__1_0 + 7864320 * rs1_data__2_0 + 1920 * is_valid)) * (120 * a__0_8 + 30720 * a__1_8 + 7864320 * a__2_8 - (120 * rs1_data__0_0 + 30720 * rs1_data__1_0 + 7864320 * rs1_data__2_0 + 1921)) = 0\n(943718400 * rs1_data__0_0 + 120 * a__1_8 + 30720 * a__2_8 + 7864320 * a__3_8 + 1006632953 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * a__0_8)) * (943718400 * rs1_data__0_0 + 120 * a__1_8 + 30720 * a__2_8 + 7864320 * a__3_8 + 1006632952 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * a__0_8)) = 0\n(7864320 * a__0_9 + 125829121 * is_valid - 7864320 * writes_aux__prev_data__0_9) * (7864320 * a__0_9 + 125829120 - 7864320 * writes_aux__prev_data__0_9) = 0\n(30720 * a__0_9 + 7864320 * a__1_9 + 491521 * is_valid - (30720 * writes_aux__prev_data__0_9 + 7864320 * writes_aux__prev_data__1_9)) * (30720 * a__0_9 + 7864320 * a__1_9 + 491520 - (30720 * writes_aux__prev_data__0_9 + 7864320 * writes_aux__prev_data__1_9)) = 0\n(120 * a__0_9 + 30720 * a__1_9 + 7864320 * a__2_9 + 1921 * is_valid - (120 * writes_aux__prev_data__0_9 + 30720 * writes_aux__prev_data__1_9 + 7864320 * writes_aux__prev_data__2_9)) * (120 * a__0_9 + 30720 * a__1_9 + 7864320 * a__2_9 + 1920 - (120 * writes_aux__prev_data__0_9 + 30720 * writes_aux__prev_data__1_9 + 7864320 * writes_aux__prev_data__2_9)) = 0\n(943718400 * writes_aux__prev_data__0_9 + 120 * a__1_9 + 30720 * a__2_9 + 7864320 * a__3_9 - (120 * writes_aux__prev_data__1_9 + 30720 * writes_aux__prev_data__2_9 + 7864320 * writes_aux__prev_data__3_9 + 943718400 * a__0_9 + 1006632952 * is_valid)) * (943718400 * writes_aux__prev_data__0_9 + 120 * a__1_9 + 30720 * a__2_9 + 7864320 * a__3_9 - (120 * writes_aux__prev_data__1_9 + 30720 * writes_aux__prev_data__2_9 + 7864320 * writes_aux__prev_data__3_9 + 943718400 * a__0_9 + 1006632953)) = 0\n(7864320 * a__0_10 - (7864320 * rs1_data__0_4 + 125829120 * is_valid)) * (7864320 * a__0_10 - (7864320 * rs1_data__0_4 + 125829121)) = 0\n(30720 * a__0_10 + 7864320 * a__1_10 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 491520 * is_valid)) * (30720 * a__0_10 + 7864320 * a__1_10 - (30720 * rs1_data__0_4 + 7864320 * rs1_data__1_4 + 491521)) = 0\n(120 * a__0_10 + 30720 * a__1_10 + 7864320 * a__2_10 - (120 * rs1_data__0_4 + 30720 * rs1_data__1_4 + 7864320 * rs1_data__2_4 + 1920 * is_valid)) * (120 * a__0_10 + 30720 * a__1_10 + 7864320 * a__2_10 - (120 * rs1_data__0_4 + 30720 * rs1_data__1_4 + 7864320 * rs1_data__2_4 + 1921)) = 0\n(943718400 * rs1_data__0_4 + 120 * a__1_10 + 30720 * a__2_10 + 7864320 * a__3_10 + 1006632953 * is_valid - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * a__0_10)) * (943718400 * rs1_data__0_4 + 120 * a__1_10 + 30720 * a__2_10 + 7864320 * a__3_10 + 1006632952 - (120 * rs1_data__1_4 + 30720 * rs1_data__2_4 + 7864320 * rs1_data__3_4 + 943718400 * a__0_10)) = 0\ncmp_result_11 * (cmp_result_11 - 1) = 0\ndiff_marker__3_11 * (diff_marker__3_11 - 1) = 0\n(1 - diff_marker__3_11) * ((a__3_9 - a__3_11) * (2 * cmp_result_11 - 1)) = 0\ndiff_marker__3_11 * ((a__3_11 - a__3_9) * (2 * cmp_result_11 - 1) + diff_val_11) = 0\ndiff_marker__2_11 * (diff_marker__2_11 - 1) = 0\n(1 - (diff_marker__2_11 + diff_marker__3_11)) * ((a__2_9 - a__2_11) * (2 * cmp_result_11 - 1)) = 0\ndiff_marker__2_11 * ((a__2_11 - a__2_9) * (2 * cmp_result_11 - 1) + diff_val_11) = 0\ndiff_marker__1_11 * (diff_marker__1_11 - 1) = 0\n(1 - (diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11)) * ((a__1_9 - a__1_11) * (2 * cmp_result_11 - 1)) = 0\ndiff_marker__1_11 * ((a__1_11 - a__1_9) * (2 * cmp_result_11 - 1) + diff_val_11) = 0\ndiff_marker__0_11 * (diff_marker__0_11 - 1) = 0\n(1 - (diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11)) * ((a__0_9 - a__0_11) * (2 * cmp_result_11 - 1)) = 0\ndiff_marker__0_11 * ((a__0_11 - a__0_9) * (2 * cmp_result_11 - 1) + diff_val_11) = 0\n(diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11) * (diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11 - 1) = 0\n(1 - (diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11)) * cmp_result_11 = 0\n(1 - is_valid) * (diff_marker__0_11 + diff_marker__1_11 + diff_marker__2_11 + diff_marker__3_11) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/copy_byte.txt",
    "content": "Instructions:\n  0: LOADB rd_rs2_ptr = 8, rs1_ptr = 2, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: STOREB rd_rs2_ptr = 8, rs1_ptr = 3, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  8: ADD rd_ptr = 8, rs1_ptr = 0, rs2 = 3, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 113 -> 50 (2.26x reduction)\n  - Bus interactions: 55 -> 31 (1.77x reduction)\n  - Constraints: 65 -> 21 (3.10x reduction)\n\nSymbolic machine using 50 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  rs1_data__0_1\n  rs1_data__1_1\n  rs1_data__2_1\n  rs1_data__3_1\n  rs1_aux_cols__base__prev_timestamp_1\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  flags__0_1\n  flags__1_1\n  flags__2_1\n  flags__3_1\n  read_data__0_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  write_data__0_1\n  write_data__1_1\n  write_data__2_1\n  write_data__3_1\n  reads_aux__0__base__prev_timestamp_2\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[12, from_state__timestamp_0 + 9]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 3, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, rs1_aux_cols__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 3, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 - (flags__1_1 * flags__2_1 + 2 * flags__0_1 * flags__2_1 + 2 * flags__1_1 * flags__3_1 + 3 * flags__2_1 * flags__3_1), prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 - (flags__1_1 * flags__2_1 + 2 * flags__0_1 * flags__2_1 + 2 * flags__1_1 * flags__3_1 + 3 * flags__2_1 * flags__3_1), write_data__0_1, write_data__1_1, write_data__2_1, write_data__3_1, from_state__timestamp_0 + 5]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_2]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 6]\nmult=is_valid * 1, args=[1, 8, 3, 0, 0, 0, from_state__timestamp_0 + 8]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_1 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 30720), 12]\nmult=is_valid * 1, args=[503316480 * flags__2_1 * (flags__2_1 - 1) + 503316481 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316480 * flags__1_1 * flags__2_1 + 1006632960 * flags__0_1 * flags__2_1 + 1006632960 * flags__1_1 * flags__3_1 - (503316480 * flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 1006632960 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316481 * flags__2_1 * flags__3_1 + 503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_2 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 76800), 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\nflags__0_1 * ((flags__0_1 - 1) * (flags__0_1 - 2)) = 0\nflags__1_1 * ((flags__1_1 - 1) * (flags__1_1 - 2)) = 0\nflags__2_1 * ((flags__2_1 - 1) * (flags__2_1 - 2)) = 0\nflags__3_1 * ((flags__3_1 - 1) * (flags__3_1 - 2)) = 0\n(flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 1 * is_valid) * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__2_1 * (flags__2_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1)) * read_data__0_1 + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * (255 * data_most_sig_bit_0) + (1006632960 * flags__2_1 * (flags__2_1 - 1) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2)) * (255 * data_most_sig_bit_0) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * (255 * data_most_sig_bit_0) + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1)) * read_data__0_1 + write_data__0_1 - (flags__0_1 * flags__2_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__0_1 = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1)) * (255 * data_most_sig_bit_0) + 1006632960 * flags__2_1 * (flags__2_1 - 1) * (255 * data_most_sig_bit_0) + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - flags__0_1 * flags__1_1) * (255 * data_most_sig_bit_0) + write_data__1_1 - (flags__1_1 * flags__2_1 * read_data__0_1 + (flags__0_1 * flags__2_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__1_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * (255 * data_most_sig_bit_0) + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * (255 * data_most_sig_bit_0) + write_data__2_1 - ((flags__0_1 * flags__2_1 + flags__1_1 * flags__3_1) * read_data__0_1 + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__2_1 * flags__3_1) * prev_data__2_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * (255 * data_most_sig_bit_0) + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * (255 * data_most_sig_bit_0) + write_data__3_1 - (flags__2_1 * flags__3_1 * read_data__0_1 + flags__0_1 * flags__2_1 * (255 * data_most_sig_bit_0) + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1) * prev_data__3_1) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1 + 1)) = 0\n(943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1)) * (943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1 + 1)) = 0\nflags__1_1 * (flags__1_1 - 1) + flags__2_1 * (flags__2_1 - 1) + 4 * flags__0_1 * flags__1_1 + 4 * flags__0_1 * flags__2_1 + 5 * flags__0_1 * flags__3_1 + 5 * flags__1_1 * flags__2_1 + 5 * flags__1_1 * flags__3_1 + 5 * flags__2_1 * flags__3_1 - (1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 5 * is_valid) = 0\nflags__2_1 * (flags__2_1 - 1) - (flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 2 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2)) = 0\nopcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0 - read_data__0_1 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/guest_top_block.txt",
    "content": "Instructions:\n   0: ADD rd_ptr = 8, rs1_ptr = 8, rs2 = 16777200, rs2_as = 0\n   4: STOREW rd_rs2_ptr = 4, rs1_ptr = 8, imm = 12, mem_as = 2, needs_write = 1, imm_sign = 0\n   8: AUIPC 4 0 0 1 0\n  12: JALR 4 4 1780 1 0\n\nAPC advantage:\n  - Main columns: 125 -> 26 (4.81x reduction)\n  - Bus interactions: 65 -> 18 (3.61x reduction)\n  - Constraints: 61 -> 7 (8.71x reduction)\n\nSymbolic machine using 26 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[1788, from_state__timestamp_0 + 9]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[1, 4, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 5]\nmult=is_valid * 1, args=[1, 4, 8, 0, 0, 0, from_state__timestamp_0 + 7]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_0 + 125829121 * is_valid - 7864320 * writes_aux__prev_data__0_0) * (7864320 * a__0_0 + 125829120 - 7864320 * writes_aux__prev_data__0_0) = 0\n(30720 * a__0_0 + 7864320 * a__1_0 + 491521 * is_valid - (30720 * writes_aux__prev_data__0_0 + 7864320 * writes_aux__prev_data__1_0)) * (30720 * a__0_0 + 7864320 * a__1_0 + 491520 - (30720 * writes_aux__prev_data__0_0 + 7864320 * writes_aux__prev_data__1_0)) = 0\n(120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 1921 * is_valid - (120 * writes_aux__prev_data__0_0 + 30720 * writes_aux__prev_data__1_0 + 7864320 * writes_aux__prev_data__2_0)) * (120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 1920 - (120 * writes_aux__prev_data__0_0 + 30720 * writes_aux__prev_data__1_0 + 7864320 * writes_aux__prev_data__2_0)) = 0\n(943718400 * writes_aux__prev_data__0_0 + 120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 - (120 * writes_aux__prev_data__1_0 + 30720 * writes_aux__prev_data__2_0 + 7864320 * writes_aux__prev_data__3_0 + 943718400 * a__0_0 + 1006632952 * is_valid)) * (943718400 * writes_aux__prev_data__0_0 + 120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 - (120 * writes_aux__prev_data__1_0 + 30720 * writes_aux__prev_data__2_0 + 7864320 * writes_aux__prev_data__3_0 + 943718400 * a__0_0 + 1006632953)) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * a__0_0 + 7864320 * a__1_0 + 368640 * is_valid)) * (30720 * mem_ptr_limbs__0_1 - (30720 * a__0_0 + 7864320 * a__1_0 + 368641)) = 0\n(943718400 * a__0_0 + 30720 * mem_ptr_limbs__1_1 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718400 * mem_ptr_limbs__0_1 + 754974726 * is_valid)) * (943718400 * a__0_0 + 30720 * mem_ptr_limbs__1_1 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718400 * mem_ptr_limbs__0_1 + 754974727)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/load_two_bytes_compare.txt",
    "content": "Instructions:\n  0: LOADB rd_rs2_ptr = 52, rs1_ptr = 40, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: LOADB rd_rs2_ptr = 56, rs1_ptr = 44, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  8: BNE 52 56 28 1 1\n\nAPC advantage:\n  - Main columns: 98 -> 51 (1.92x reduction)\n  - Bus interactions: 47 -> 32 (1.47x reduction)\n  - Constraints: 47 -> 15 (3.13x reduction)\n\nSymbolic machine using 51 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  rs1_data__0_1\n  rs1_data__1_1\n  rs1_data__2_1\n  rs1_data__3_1\n  rs1_aux_cols__base__prev_timestamp_1\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  opcode_loadb_flag0_1\n  shift_most_sig_bit_1\n  data_most_sig_bit_1\n  shifted_read_data__0_1\n  shifted_read_data__1_1\n  shifted_read_data__2_1\n  shifted_read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  cmp_result_2\n  diff_inv_marker__0_2\n  free_var_101\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[24 * cmp_result_2 + 12, from_state__timestamp_0 + 8]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 52, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 44, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, rs1_aux_cols__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 44, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 + opcode_loadb_flag0_1 - (2 * shift_most_sig_bit_1 + 1), shift_most_sig_bit_1 * shifted_read_data__2_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__0_1, shift_most_sig_bit_1 * shifted_read_data__3_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__1_1, shift_most_sig_bit_1 * shifted_read_data__0_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__2_1, shift_most_sig_bit_1 * shifted_read_data__1_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 + opcode_loadb_flag0_1 - (2 * shift_most_sig_bit_1 + 1), shift_most_sig_bit_1 * shifted_read_data__2_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__0_1, shift_most_sig_bit_1 * shifted_read_data__3_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__1_1, shift_most_sig_bit_1 * shifted_read_data__0_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__2_1, shift_most_sig_bit_1 * shifted_read_data__1_1 + (1 - shift_most_sig_bit_1) * shifted_read_data__3_1, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[1, 56, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 52, opcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, from_state__timestamp_0 + 6]\nmult=is_valid * 1, args=[1, 56, opcode_loadb_flag0_1 * shifted_read_data__0_1 + (1 - opcode_loadb_flag0_1) * shifted_read_data__1_1, 255 * data_most_sig_bit_1, 255 * data_most_sig_bit_1, 255 * data_most_sig_bit_1, from_state__timestamp_0 + 7]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[shifted_read_data__0_1 * opcode_loadb_flag0_1 + shifted_read_data__1_1 * (1 - opcode_loadb_flag0_1) - 128 * data_most_sig_bit_1, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_1 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 30720), 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_1 + 503316480 - (503316480 * mem_ptr_limbs__0_1 + 503316480 * opcode_loadb_flag0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\nopcode_loadb_flag0_1 * (opcode_loadb_flag0_1 - 1) = 0\ndata_most_sig_bit_1 * (data_most_sig_bit_1 - 1) = 0\nshift_most_sig_bit_1 * (shift_most_sig_bit_1 - 1) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1 + 1)) = 0\n(943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1)) * (943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1 + 1)) = 0\ncmp_result_2 * (cmp_result_2 - 1) = 0\n(1 - cmp_result_2) * ((opcode_loadb_flag0_1 - 1) * shifted_read_data__1_1 + opcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0 - opcode_loadb_flag0_1 * shifted_read_data__0_1) = 0\n(1 - cmp_result_2) * (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) = 0\n((opcode_loadb_flag0_1 - 1) * shifted_read_data__1_1 + opcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0 - opcode_loadb_flag0_1 * shifted_read_data__0_1) * diff_inv_marker__0_2 + free_var_101 * ((255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) * (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) + (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) * (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) + (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1) * (255 * data_most_sig_bit_0 - 255 * data_most_sig_bit_1)) - cmp_result_2 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/load_two_bytes_compare_unsigned.txt",
    "content": "Instructions:\n  0: LOADBU rd_rs2_ptr = 52, rs1_ptr = 40, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: LOADBU rd_rs2_ptr = 56, rs1_ptr = 44, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  8: BNE 52 56 28 1 1\n\nAPC advantage:\n  - Main columns: 108 -> 54 (2.00x reduction)\n  - Bus interactions: 45 -> 30 (1.50x reduction)\n  - Constraints: 61 -> 32 (1.91x reduction)\n\nSymbolic machine using 54 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  flags__0_0\n  flags__1_0\n  flags__2_0\n  flags__3_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  write_data__0_0\n  rs1_data__0_1\n  rs1_data__1_1\n  rs1_data__2_1\n  rs1_data__3_1\n  rs1_aux_cols__base__prev_timestamp_1\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  flags__0_1\n  flags__1_1\n  flags__2_1\n  flags__3_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  write_data__0_1\n  cmp_result_2\n  diff_inv_marker__0_2\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[24 * cmp_result_2 + 12, from_state__timestamp_0 + 8]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 52, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 44, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, rs1_aux_cols__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 44, rs1_data__0_1, rs1_data__1_1, rs1_data__2_1, rs1_data__3_1, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[2, flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 2 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 - flags__2_1 * (flags__2_1 - 1), read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[2, flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 2 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1 - flags__2_1 * (flags__2_1 - 1), read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[1, 56, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 52, write_data__0_0, 0, 0, 0, from_state__timestamp_0 + 6]\nmult=is_valid * 1, args=[1, 56, write_data__0_1, 0, 0, 0, from_state__timestamp_0 + 7]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316480 * flags__1_0 * flags__2_0 + 1006632960 * flags__0_0 * flags__2_0 + 1006632960 * flags__1_0 * flags__3_0 - (503316480 * flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1006632960 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316481 * flags__2_0 * flags__3_0 + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_1 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 30720), 12]\nmult=is_valid * 1, args=[503316480 * flags__2_1 * (flags__2_1 - 1) + 503316481 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316480 * flags__1_1 * flags__2_1 + 1006632960 * flags__0_1 * flags__2_1 + 1006632960 * flags__1_1 * flags__3_1 - (503316480 * flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 1006632960 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316481 * flags__2_1 * flags__3_1 + 503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Algebraic constraints:\nflags__0_0 * ((flags__0_0 - 1) * (flags__0_0 - 2)) = 0\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\nflags__3_0 * ((flags__3_0 - 1) * (flags__3_0 - 2)) = 0\n(flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 1 * is_valid) * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1 * is_valid = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1)) * read_data__0_0 + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__1_0 + (1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) * read_data__2_0 + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0)) * read_data__0_0 + write_data__0_0 - (flags__0_0 * flags__2_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__0_0 = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1)) * read_data__1_0 + 1006632960 * flags__2_0 * (flags__2_0 - 1) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - flags__0_0 * flags__1_0) * read_data__1_0 - (flags__1_0 * flags__2_0 * read_data__0_0 + (flags__0_0 * flags__2_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__1_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__2_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__2_0 - ((flags__0_0 * flags__2_0 + flags__1_0 * flags__3_0) * read_data__0_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__2_0 * flags__3_0) * prev_data__2_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__3_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 - (flags__2_0 * flags__3_0 * read_data__0_0 + flags__0_0 * flags__2_0 * read_data__1_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0) * prev_data__3_0) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 4 * flags__0_0 * flags__1_0 + 4 * flags__0_0 * flags__2_0 + 5 * flags__0_0 * flags__3_0 + 5 * flags__1_0 * flags__2_0 + 5 * flags__1_0 * flags__3_0 + 5 * flags__2_0 * flags__3_0 - (1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1 * is_valid) = 0\nflags__0_1 * ((flags__0_1 - 1) * (flags__0_1 - 2)) = 0\nflags__1_1 * ((flags__1_1 - 1) * (flags__1_1 - 2)) = 0\nflags__2_1 * ((flags__2_1 - 1) * (flags__2_1 - 2)) = 0\nflags__3_1 * ((flags__3_1 - 1) * (flags__3_1 - 2)) = 0\n(flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 1 * is_valid) * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__2_1 * (flags__2_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 1 * is_valid = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1)) * read_data__0_1 + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__1_1 + (1006632960 * flags__2_1 * (flags__2_1 - 1) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2)) * read_data__2_1 + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__3_1 + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1)) * read_data__0_1 + write_data__0_1 - (flags__0_1 * flags__2_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__0_1 = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1)) * read_data__1_1 + 1006632960 * flags__2_1 * (flags__2_1 - 1) * read_data__3_1 + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - flags__0_1 * flags__1_1) * read_data__1_1 - (flags__1_1 * flags__2_1 * read_data__0_1 + (flags__0_1 * flags__2_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__1_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * read_data__2_1 + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__2_1 - ((flags__0_1 * flags__2_1 + flags__1_1 * flags__3_1) * read_data__0_1 + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__2_1 * flags__3_1) * prev_data__2_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * read_data__3_1 + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__3_1 - (flags__2_1 * flags__3_1 * read_data__0_1 + flags__0_1 * flags__2_1 * read_data__1_1 + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1) * prev_data__3_1) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_1 + 7864320 * rs1_data__1_1 + 1)) = 0\n(943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1)) * (943718400 * rs1_data__0_1 + 30720 * mem_ptr_limbs__1_1 - (120 * rs1_data__1_1 + 30720 * rs1_data__2_1 + 7864320 * rs1_data__3_1 + 943718400 * mem_ptr_limbs__0_1 + 1)) = 0\nflags__1_1 * (flags__1_1 - 1) + flags__2_1 * (flags__2_1 - 1) + 4 * flags__0_1 * flags__1_1 + 4 * flags__0_1 * flags__2_1 + 5 * flags__0_1 * flags__3_1 + 5 * flags__1_1 * flags__2_1 + 5 * flags__1_1 * flags__3_1 + 5 * flags__2_1 * flags__3_1 - (1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 1 * is_valid) = 0\ncmp_result_2 * (cmp_result_2 - 1) = 0\n(1 - cmp_result_2) * (write_data__0_0 - write_data__0_1) = 0\n(write_data__0_0 - write_data__0_1) * diff_inv_marker__0_2 - cmp_result_2 = 0\nflags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0 = 0\nflags__1_1 * flags__2_1 + 2 * flags__0_1 * flags__2_1 + 2 * flags__1_1 * flags__3_1 + 3 * flags__2_1 * flags__3_1 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/many_stores_relative_to_same_register.txt",
    "content": "Instructions:\n  0: STOREW rd_rs2_ptr = 5, rs1_ptr = 2, imm = 12, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: STOREW rd_rs2_ptr = 6, rs1_ptr = 2, imm = 16, mem_as = 2, needs_write = 1, imm_sign = 0\n  8: STOREW rd_rs2_ptr = 7, rs1_ptr = 2, imm = 20, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 123 -> 50 (2.46x reduction)\n  - Bus interactions: 51 -> 36 (1.42x reduction)\n  - Constraints: 75 -> 7 (10.71x reduction)\n\nSymbolic machine using 50 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  read_data_aux__base__prev_timestamp_2\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2\n  mem_ptr_limbs__0_2\n  mem_ptr_limbs__1_2\n  write_base_aux__prev_timestamp_2\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_2\n  read_data__0_2\n  read_data__1_2\n  read_data__2_2\n  read_data__3_2\n  prev_data__0_2\n  prev_data__1_2\n  prev_data__2_2\n  prev_data__3_2\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[12, from_state__timestamp_0 + 9]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 5, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 2]\nmult=is_valid * -1, args=[1, 6, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 6, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 5]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0 + 6]\nmult=is_valid * -1, args=[1, 7, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, read_data_aux__base__prev_timestamp_2]\nmult=is_valid * 1, args=[1, 7, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, from_state__timestamp_0 + 7]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_2 + 65536 * mem_ptr_limbs__1_2, prev_data__0_2, prev_data__1_2, prev_data__2_2, prev_data__3_2, write_base_aux__prev_timestamp_2]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_2 + 65536 * mem_ptr_limbs__1_2, read_data__0_2, read_data__1_2, read_data__2_2, read_data__3_2, from_state__timestamp_0 + 8]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_2), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_2, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_2 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 92160), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_2 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 107520), 12]\n\n// Algebraic constraints:\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 368640 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 368641)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 754974726 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 754974727)) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 491520 * is_valid)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 491521)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 + 1006632953 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 + 1006632952 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1)) = 0\n(30720 * mem_ptr_limbs__0_2 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614400 * is_valid)) * (30720 * mem_ptr_limbs__0_2 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614401)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_2 + 754974711 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_2)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_2 + 754974710 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_2)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/memcpy_block.txt",
    "content": "Instructions:\n   0: AND rd_ptr = 52, rs1_ptr = 44, rs2 = 3, rs2_as = 0\n   4: SLTU rd_ptr = 52, rs1_ptr = 52, rs2 = 1, rs2_as = 0\n   8: SLTU rd_ptr = 56, rs1_ptr = 56, rs2 = 1, rs2_as = 0\n  12: OR rd_ptr = 52, rs1_ptr = 52, rs2 = 56, rs2_as = 1\n  16: BNE 52 0 248 1 1\n\nAPC advantage:\n  - Main columns: 172 -> 29 (5.93x reduction)\n  - Bus interactions: 87 -> 19 (4.58x reduction)\n  - Constraints: 111 -> 10 (11.10x reduction)\n\nSymbolic machine using 29 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_1\n  reads_aux__0__base__prev_timestamp_2\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__prev_data__0_2\n  writes_aux__prev_data__1_2\n  writes_aux__prev_data__2_2\n  writes_aux__prev_data__3_2\n  cmp_result_2\n  reads_aux__1__base__prev_timestamp_4\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_4\n  cmp_result_4\n  inv_of_sum_173\n  inv_of_sum_174\n  free_var_176\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[244 * cmp_result_4 + 20, from_state__timestamp_0 + 14]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 44, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 44, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 52, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 56, writes_aux__prev_data__0_2, writes_aux__prev_data__1_2, writes_aux__prev_data__2_2, writes_aux__prev_data__3_2, reads_aux__0__base__prev_timestamp_2]\nmult=is_valid * 1, args=[1, 56, cmp_result_2, 0, 0, 0, from_state__timestamp_0 + 10]\nmult=is_valid * 1, args=[1, 52, cmp_result_1 + cmp_result_2 - cmp_result_1 * cmp_result_2, 0, 0, 0, from_state__timestamp_0 + 12]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_4]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 13]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_2 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 76800), 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_4, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_4 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_4 - (15360 * from_state__timestamp_0 + 184320), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[b__0_0, 3, b__0_0 + 3 - 2 * a__0_0, 1]\n\n// Algebraic constraints:\ncmp_result_1 * (cmp_result_1 - 1) = 0\ncmp_result_2 * (cmp_result_2 - 1) = 0\ncmp_result_4 * (cmp_result_4 - 1) = 0\ncmp_result_1 * a__0_0 = 0\ninv_of_sum_173 * a__0_0 + cmp_result_1 - 1 * is_valid = 0\ncmp_result_2 * (writes_aux__prev_data__0_2 + writes_aux__prev_data__1_2 + writes_aux__prev_data__2_2 + writes_aux__prev_data__3_2) = 0\ninv_of_sum_174 * (writes_aux__prev_data__0_2 + writes_aux__prev_data__1_2 + writes_aux__prev_data__2_2 + writes_aux__prev_data__3_2) + cmp_result_2 - 1 * is_valid = 0\n(1 - cmp_result_4) * (cmp_result_1 + cmp_result_2 - cmp_result_1 * cmp_result_2) = 0\nfree_var_176 * (cmp_result_1 + cmp_result_2 - cmp_result_1 * cmp_result_2) - cmp_result_4 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/rotate.txt",
    "content": "Instructions:\n  0: SRL rd_ptr = 1, rs1_ptr = 3, rs2 = 1, rs2_as = 0\n  4: SLL rd_ptr = 2, rs1_ptr = 3, rs2 = 31, rs2_as = 0\n  8: OR rd_ptr = 3, rs1_ptr = 1, rs2 = 2, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 142 -> 26 (5.46x reduction)\n  - Bus interactions: 68 -> 18 (3.78x reduction)\n  - Constraints: 174 -> 5 (34.80x reduction)\n\nSymbolic machine using 26 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  writes_aux__base__prev_timestamp_1\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  writes_aux__prev_data__0_1\n  writes_aux__prev_data__1_1\n  writes_aux__prev_data__2_1\n  writes_aux__prev_data__3_1\n  a__3_1\n  a__3_2\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[12, from_state__timestamp_0 + 9]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 3, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 1, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 2, writes_aux__prev_data__0_1, writes_aux__prev_data__1_1, writes_aux__prev_data__2_1, writes_aux__prev_data__3_1, writes_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 1, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 6]\nmult=is_valid * 1, args=[1, 2, 0, 0, 0, a__3_1, from_state__timestamp_0 + 7]\nmult=is_valid * 1, args=[1, 3, a__0_0, a__1_0, a__2_0, a__3_2, from_state__timestamp_0 + 8]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[7864320 * a__3_1 - 1006632960 * b__0_0, 7]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_1 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__3_0, a__3_1, 2 * a__3_2 - (a__3_0 + a__3_1), 1]\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, 0, 0, 0]\n\n// Algebraic constraints:\n(b__0_0 + 256 * b__1_0 + 65536 * b__2_0 + 16777216 * b__3_0 - (2 * a__0_0 + 512 * a__1_0 + 131072 * a__2_0 + 33554432 * a__3_0)) * (b__0_0 + 256 * b__1_0 + 65536 * b__2_0 + 16777216 * b__3_0 - (2 * a__0_0 + 512 * a__1_0 + 131072 * a__2_0 + 33554432 * a__3_0 + 1)) = 0\n(b__1_0 + 256 * b__2_0 + 65536 * b__3_0 - (2 * a__1_0 + 512 * a__2_0 + 131072 * a__3_0)) * (b__1_0 + 256 * b__2_0 + 65536 * b__3_0 - (2 * a__1_0 + 512 * a__2_0 + 131072 * a__3_0 + 1)) = 0\n(b__2_0 + 256 * b__3_0 - (2 * a__2_0 + 512 * a__3_0)) * (b__2_0 + 256 * b__3_0 - (2 * a__2_0 + 512 * a__3_0 + 1)) = 0\n(b__3_0 - 2 * a__3_0) * (b__3_0 - (2 * a__3_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/stack_accesses.txt",
    "content": "Instructions:\n  0: LOADW rd_rs2_ptr = 8, rs1_ptr = 2, imm = 20, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: LOADW rd_rs2_ptr = 9, rs1_ptr = 2, imm = 24, mem_as = 2, needs_write = 1, imm_sign = 0\n  8: STOREW rd_rs2_ptr = 8, rs1_ptr = 2, imm = 24, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 123 -> 36 (3.42x reduction)\n  - Bus interactions: 51 -> 26 (1.96x reduction)\n  - Constraints: 75 -> 5 (15.00x reduction)\n\nSymbolic machine using 36 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  mem_ptr_limbs__0_1\n  mem_ptr_limbs__1_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[12, from_state__timestamp_0 + 9]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * -1, args=[1, 9, prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 9, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 5]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0 + 6]\nmult=is_valid * 1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 7]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_1 + 65536 * mem_ptr_limbs__1_1, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 8]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_1), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_1, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Algebraic constraints:\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614400 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614401)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 754974711 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 754974710 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\n(30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 737280 * is_valid)) * (30720 * mem_ptr_limbs__0_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 737281)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 + 503316469 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_1 + 503316468 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/store_to_same_memory_address.txt",
    "content": "Instructions:\n  0: STOREB rd_rs2_ptr = 4, rs1_ptr = 8, imm = 8, mem_as = 2, needs_write = 1, imm_sign = 0\n  4: STOREB rd_rs2_ptr = 32, rs1_ptr = 8, imm = 8, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 82 -> 50 (1.64x reduction)\n  - Bus interactions: 34 -> 25 (1.36x reduction)\n  - Constraints: 50 -> 27 (1.85x reduction)\n\nSymbolic machine using 50 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  flags__0_0\n  flags__1_0\n  flags__2_0\n  flags__3_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  write_data__0_0\n  write_data__1_0\n  write_data__2_0\n  write_data__3_0\n  read_data_aux__base__prev_timestamp_1\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  write_base_aux__prev_timestamp_1\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_1\n  flags__0_1\n  flags__1_1\n  flags__2_1\n  flags__3_1\n  read_data__0_1\n  read_data__1_1\n  read_data__2_1\n  read_data__3_1\n  prev_data__0_1\n  prev_data__1_1\n  prev_data__2_1\n  prev_data__3_1\n  write_data__0_1\n  write_data__1_1\n  write_data__2_1\n  write_data__3_1\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[8, from_state__timestamp_0 + 6]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 4, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 4, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0), prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0), write_data__0_0, write_data__1_0, write_data__2_0, write_data__3_0, from_state__timestamp_0 + 2]\nmult=is_valid * 1, args=[1, 8, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[1, 32, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, read_data_aux__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 32, read_data__0_1, read_data__1_1, read_data__2_1, read_data__3_1, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_1 * flags__2_1 + 2 * flags__0_1 * flags__2_1 + 2 * flags__1_1 * flags__3_1 + 3 * flags__2_1 * flags__3_1), prev_data__0_1, prev_data__1_1, prev_data__2_1, prev_data__3_1, write_base_aux__prev_timestamp_1]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_1 * flags__2_1 + 2 * flags__0_1 * flags__2_1 + 2 * flags__1_1 * flags__3_1 + 3 * flags__2_1 * flags__3_1), write_data__0_1, write_data__1_1, write_data__2_1, write_data__3_1, from_state__timestamp_0 + 5]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316480 * flags__1_0 * flags__2_0 + 1006632960 * flags__0_0 * flags__2_0 + 1006632960 * flags__1_0 * flags__3_0 - (503316480 * flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1006632960 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316481 * flags__2_0 * flags__3_0 + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[503316480 * flags__2_1 * (flags__2_1 - 1) + 503316481 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316480 * flags__1_1 * flags__2_1 + 1006632960 * flags__0_1 * flags__2_1 + 1006632960 * flags__1_1 * flags__3_1 - (503316480 * flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 1006632960 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 503316481 * flags__2_1 * flags__3_1 + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_1 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_1 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Algebraic constraints:\nflags__0_0 * ((flags__0_0 - 1) * (flags__0_0 - 2)) = 0\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\nflags__3_0 * ((flags__3_0 - 1) * (flags__3_0 - 2)) = 0\n(flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 1 * is_valid) * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1)) * read_data__0_0 + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__1_0 + (1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) * read_data__2_0 + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0)) * read_data__0_0 + write_data__0_0 - (flags__0_0 * flags__2_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__0_0 = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1)) * read_data__1_0 + 1006632960 * flags__2_0 * (flags__2_0 - 1) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - flags__0_0 * flags__1_0) * read_data__1_0 + write_data__1_0 - (flags__1_0 * flags__2_0 * read_data__0_0 + (flags__0_0 * flags__2_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__1_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__2_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__2_0 + write_data__2_0 - ((flags__0_0 * flags__2_0 + flags__1_0 * flags__3_0) * read_data__0_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__2_0 * flags__3_0) * prev_data__2_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__3_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + write_data__3_0 - (flags__2_0 * flags__3_0 * read_data__0_0 + flags__0_0 * flags__2_0 * read_data__1_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0) * prev_data__3_0) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 245760 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 245761)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 503316484 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 503316485)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 4 * flags__0_0 * flags__1_0 + 4 * flags__0_0 * flags__2_0 + 5 * flags__0_0 * flags__3_0 + 5 * flags__1_0 * flags__2_0 + 5 * flags__1_0 * flags__3_0 + 5 * flags__2_0 * flags__3_0 - (1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 5 * is_valid) = 0\nflags__0_1 * ((flags__0_1 - 1) * (flags__0_1 - 2)) = 0\nflags__1_1 * ((flags__1_1 - 1) * (flags__1_1 - 2)) = 0\nflags__2_1 * ((flags__2_1 - 1) * (flags__2_1 - 2)) = 0\nflags__3_1 * ((flags__3_1 - 1) * (flags__3_1 - 2)) = 0\n(flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 1 * is_valid) * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__2_1 * (flags__2_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1) + 1006632960 * flags__3_1 * (flags__3_1 - 1)) * read_data__0_1 + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__1_1 + (1006632960 * flags__2_1 * (flags__2_1 - 1) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2)) * read_data__2_1 + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__3_1 + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1)) * read_data__0_1 + write_data__0_1 - (flags__0_1 * flags__2_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__0_1 = 0\n(1006632960 * flags__0_1 * (flags__0_1 - 1) + 1006632960 * flags__1_1 * (flags__1_1 - 1)) * read_data__1_1 + 1006632960 * flags__2_1 * (flags__2_1 - 1) * read_data__3_1 + (flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) - flags__0_1 * flags__1_1) * read_data__1_1 + write_data__1_1 - (flags__1_1 * flags__2_1 * read_data__0_1 + (flags__0_1 * flags__2_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__3_1 + flags__2_1 * flags__3_1) * prev_data__1_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * read_data__2_1 + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__2_1 + write_data__2_1 - ((flags__0_1 * flags__2_1 + flags__1_1 * flags__3_1) * read_data__0_1 + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__2_1 * flags__3_1) * prev_data__2_1) = 0\n1006632960 * flags__0_1 * (flags__0_1 - 1) * read_data__3_1 + flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) * read_data__3_1 + write_data__3_1 - (flags__2_1 * flags__3_1 * read_data__0_1 + flags__0_1 * flags__2_1 * read_data__1_1 + (flags__0_1 * flags__1_1 + flags__0_1 * flags__3_1 + flags__1_1 * flags__2_1 + flags__1_1 * flags__3_1) * prev_data__3_1) = 0\nflags__1_1 * (flags__1_1 - 1) + flags__2_1 * (flags__2_1 - 1) + 4 * flags__0_1 * flags__1_1 + 4 * flags__0_1 * flags__2_1 + 5 * flags__0_1 * flags__3_1 + 5 * flags__1_1 * flags__2_1 + 5 * flags__1_1 * flags__3_1 + 5 * flags__2_1 * flags__3_1 - (1006632960 * flags__3_1 * (flags__3_1 - 1) + flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__3_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 5 * is_valid) = 0\nflags__2_0 * (flags__2_0 - 1) - (flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) = 0\nflags__2_1 * (flags__2_1 - 1) - (flags__0_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 2 * flags__1_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2) + 3 * flags__2_1 * (flags__0_1 + flags__1_1 + flags__2_1 + flags__3_1 - 2)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/complex/unaligned_memcpy.txt",
    "content": "Instructions:\n   0: LOADB rd_rs2_ptr = 68, rs1_ptr = 44, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n   4: ADD rd_ptr = 56, rs1_ptr = 44, rs2 = 1, rs2_as = 0\n   8: ADD rd_ptr = 52, rs1_ptr = 64, rs2 = 1, rs2_as = 0\n  12: STOREB rd_rs2_ptr = 68, rs1_ptr = 64, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n  16: ADD rd_ptr = 48, rs1_ptr = 48, rs2 = 16777215, rs2_as = 0\n  20: AND rd_ptr = 44, rs1_ptr = 60, rs2 = 3, rs2_as = 0\n  24: SLTU rd_ptr = 44, rs1_ptr = 0, rs2 = 44, rs2_as = 1\n  28: SLTU rd_ptr = 64, rs1_ptr = 0, rs2 = 48, rs2_as = 1\n  32: AND rd_ptr = 68, rs1_ptr = 44, rs2 = 64, rs2_as = 1\n  36: ADD rd_ptr = 60, rs1_ptr = 60, rs2 = 1, rs2_as = 0\n  40: ADD rd_ptr = 44, rs1_ptr = 56, rs2 = 0, rs2_as = 0\n  44: ADD rd_ptr = 64, rs1_ptr = 52, rs2 = 0, rs2_as = 0\n  48: BNE 68 0 -48 1 1\n\nAPC advantage:\n  - Main columns: 465 -> 105 (4.43x reduction)\n  - Bus interactions: 242 -> 58 (4.17x reduction)\n  - Constraints: 286 -> 67 (4.27x reduction)\n\nSymbolic machine using 105 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  writes_aux__base__prev_timestamp_1\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_1\n  writes_aux__prev_data__0_1\n  writes_aux__prev_data__1_1\n  writes_aux__prev_data__2_1\n  writes_aux__prev_data__3_1\n  a__0_1\n  a__1_1\n  a__2_1\n  a__3_1\n  reads_aux__0__base__prev_timestamp_2\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__base__prev_timestamp_2\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__prev_data__0_2\n  writes_aux__prev_data__1_2\n  writes_aux__prev_data__2_2\n  writes_aux__prev_data__3_2\n  a__0_2\n  a__1_2\n  a__2_2\n  a__3_2\n  b__0_2\n  b__1_2\n  b__2_2\n  b__3_2\n  mem_ptr_limbs__0_3\n  mem_ptr_limbs__1_3\n  write_base_aux__prev_timestamp_3\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_3\n  flags__0_3\n  flags__1_3\n  flags__2_3\n  flags__3_3\n  read_data__0_3\n  prev_data__0_3\n  prev_data__1_3\n  prev_data__2_3\n  prev_data__3_3\n  write_data__0_3\n  write_data__1_3\n  write_data__2_3\n  write_data__3_3\n  reads_aux__0__base__prev_timestamp_4\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4\n  writes_aux__prev_data__0_4\n  writes_aux__prev_data__1_4\n  writes_aux__prev_data__2_4\n  writes_aux__prev_data__3_4\n  a__0_4\n  a__1_4\n  a__2_4\n  a__3_4\n  reads_aux__0__base__prev_timestamp_5\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_5\n  a__0_5\n  b__0_5\n  b__1_5\n  b__2_5\n  b__3_5\n  reads_aux__0__base__prev_timestamp_6\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_6\n  cmp_result_6\n  diff_marker__0_6\n  diff_marker__1_6\n  diff_marker__2_6\n  diff_marker__3_6\n  diff_val_6\n  cmp_result_7\n  diff_marker__0_7\n  diff_marker__1_7\n  diff_marker__2_7\n  diff_marker__3_7\n  diff_val_7\n  a__0_9\n  a__1_9\n  a__2_9\n  a__3_9\n  cmp_result_12\n  free_var_467\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[52 - 52 * cmp_result_12, from_state__timestamp_0 + 38]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 44, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 68, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 56, writes_aux__prev_data__0_1, writes_aux__prev_data__1_1, writes_aux__prev_data__2_1, writes_aux__prev_data__3_1, writes_aux__base__prev_timestamp_1]\nmult=is_valid * -1, args=[1, 64, b__0_2, b__1_2, b__2_2, b__3_2, reads_aux__0__base__prev_timestamp_2]\nmult=is_valid * -1, args=[1, 52, writes_aux__prev_data__0_2, writes_aux__prev_data__1_2, writes_aux__prev_data__2_2, writes_aux__prev_data__3_2, writes_aux__base__prev_timestamp_2]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_3 + 65536 * mem_ptr_limbs__1_3 - (flags__1_3 * flags__2_3 + 2 * flags__0_3 * flags__2_3 + 2 * flags__1_3 * flags__3_3 + 3 * flags__2_3 * flags__3_3), prev_data__0_3, prev_data__1_3, prev_data__2_3, prev_data__3_3, write_base_aux__prev_timestamp_3]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_3 + 65536 * mem_ptr_limbs__1_3 - (flags__1_3 * flags__2_3 + 2 * flags__0_3 * flags__2_3 + 2 * flags__1_3 * flags__3_3 + 3 * flags__2_3 * flags__3_3), write_data__0_3, write_data__1_3, write_data__2_3, write_data__3_3, from_state__timestamp_0 + 11]\nmult=is_valid * -1, args=[1, 48, writes_aux__prev_data__0_4, writes_aux__prev_data__1_4, writes_aux__prev_data__2_4, writes_aux__prev_data__3_4, reads_aux__0__base__prev_timestamp_4]\nmult=is_valid * -1, args=[1, 60, b__0_5, b__1_5, b__2_5, b__3_5, reads_aux__0__base__prev_timestamp_5]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_6]\nmult=is_valid * 1, args=[1, 48, a__0_4, a__1_4, a__2_4, a__3_4, from_state__timestamp_0 + 22]\nmult=is_valid * 1, args=[1, 60, a__0_9, a__1_9, a__2_9, a__3_9, from_state__timestamp_0 + 29]\nmult=is_valid * 1, args=[1, 56, a__0_1, a__1_1, a__2_1, a__3_1, from_state__timestamp_0 + 30]\nmult=is_valid * 1, args=[1, 44, a__0_1, a__1_1, a__2_1, a__3_1, from_state__timestamp_0 + 32]\nmult=is_valid * 1, args=[1, 52, a__0_2, a__1_2, a__2_2, a__3_2, from_state__timestamp_0 + 33]\nmult=is_valid * 1, args=[1, 64, a__0_2, a__1_2, a__2_2, a__3_2, from_state__timestamp_0 + 35]\nmult=is_valid * 1, args=[1, 68, cmp_result_6 * cmp_result_7, 0, 0, 0, from_state__timestamp_0 + 36]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 37]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_1 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 61440), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_2 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 76800), 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_2 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 107520), 12]\nmult=is_valid * 1, args=[503316480 * flags__2_3 * (flags__2_3 - 1) + 503316481 * flags__2_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 503316480 * flags__1_3 * flags__2_3 + 1006632960 * flags__0_3 * flags__2_3 + 1006632960 * flags__1_3 * flags__3_3 - (503316480 * flags__0_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 1006632960 * flags__1_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 503316481 * flags__2_3 * flags__3_3 + 503316480 * mem_ptr_limbs__0_3), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_3, 13]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_3, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_3 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_3 - (15360 * from_state__timestamp_0 + 153600), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_4 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4 - (15360 * from_state__timestamp_0 + 168960), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_5, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_5 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_5 - (15360 * from_state__timestamp_0 + 215040), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_6, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_6 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_6 - (15360 * from_state__timestamp_0 + 261120), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[b__0_5, 3, b__0_5 + 3 - 2 * a__0_5, 1]\nmult=diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6, args=[diff_val_6 - 1, 0, 0, 0]\nmult=diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7, args=[diff_val_7 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a__0_1, a__1_1, 0, 0]\nmult=is_valid * 1, args=[a__2_1, a__3_1, 0, 0]\nmult=is_valid * 1, args=[a__0_2, a__1_2, 0, 0]\nmult=is_valid * 1, args=[a__2_2, a__3_2, 0, 0]\nmult=is_valid * 1, args=[a__0_4, a__1_4, 0, 0]\nmult=is_valid * 1, args=[a__2_4, a__3_4, 0, 0]\nmult=is_valid * 1, args=[a__0_9, a__1_9, 0, 0]\nmult=is_valid * 1, args=[a__2_9, a__3_9, 0, 0]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\n(7864320 * a__0_1 - (7864320 * rs1_data__0_0 + 7864320 * is_valid)) * (7864320 * a__0_1 - (7864320 * rs1_data__0_0 + 7864321)) = 0\n(30720 * a__0_1 + 7864320 * a__1_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 30720 * is_valid)) * (30720 * a__0_1 + 7864320 * a__1_1 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 30721)) = 0\n(120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * rs1_data__0_0 + 30720 * rs1_data__1_0 + 7864320 * rs1_data__2_0 + 120 * is_valid)) * (120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * rs1_data__0_0 + 30720 * rs1_data__1_0 + 7864320 * rs1_data__2_0 + 121)) = 0\n(943718400 * rs1_data__0_0 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718400 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * a__0_1)) * (943718400 * rs1_data__0_0 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718399 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * a__0_1)) = 0\n(7864320 * a__0_2 - (7864320 * b__0_2 + 7864320 * is_valid)) * (7864320 * a__0_2 - (7864320 * b__0_2 + 7864321)) = 0\n(30720 * a__0_2 + 7864320 * a__1_2 - (30720 * b__0_2 + 7864320 * b__1_2 + 30720 * is_valid)) * (30720 * a__0_2 + 7864320 * a__1_2 - (30720 * b__0_2 + 7864320 * b__1_2 + 30721)) = 0\n(120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * b__0_2 + 30720 * b__1_2 + 7864320 * b__2_2 + 120 * is_valid)) * (120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * b__0_2 + 30720 * b__1_2 + 7864320 * b__2_2 + 121)) = 0\n(120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718400 * b__0_2 + 943718400 * is_valid - (943718400 * a__0_2 + 120 * b__1_2 + 30720 * b__2_2 + 7864320 * b__3_2)) * (120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718400 * b__0_2 + 943718399 - (943718400 * a__0_2 + 120 * b__1_2 + 30720 * b__2_2 + 7864320 * b__3_2)) = 0\nflags__0_3 * ((flags__0_3 - 1) * (flags__0_3 - 2)) = 0\nflags__1_3 * ((flags__1_3 - 1) * (flags__1_3 - 2)) = 0\nflags__2_3 * ((flags__2_3 - 1) * (flags__2_3 - 2)) = 0\nflags__3_3 * ((flags__3_3 - 1) * (flags__3_3 - 2)) = 0\n(flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 1 * is_valid) * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) = 0\n1006632960 * flags__0_3 * (flags__0_3 - 1) + 1006632960 * flags__1_3 * (flags__1_3 - 1) + 1006632960 * flags__2_3 * (flags__2_3 - 1) + 1006632960 * flags__3_3 * (flags__3_3 - 1) + flags__0_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + flags__1_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + flags__2_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) = 0\n(1006632960 * flags__0_3 * (flags__0_3 - 1) + 1006632960 * flags__1_3 * (flags__1_3 - 1) + 1006632960 * flags__3_3 * (flags__3_3 - 1)) * read_data__0_3 + flags__0_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) * (255 * data_most_sig_bit_0) + (1006632960 * flags__2_3 * (flags__2_3 - 1) + flags__1_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2)) * (255 * data_most_sig_bit_0) + flags__2_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) * (255 * data_most_sig_bit_0) + (flags__3_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) - (flags__0_3 * flags__1_3 + flags__0_3 * flags__3_3)) * read_data__0_3 + write_data__0_3 - (flags__0_3 * flags__2_3 + flags__1_3 * flags__2_3 + flags__1_3 * flags__3_3 + flags__2_3 * flags__3_3) * prev_data__0_3 = 0\n(1006632960 * flags__0_3 * (flags__0_3 - 1) + 1006632960 * flags__1_3 * (flags__1_3 - 1)) * (255 * data_most_sig_bit_0) + 1006632960 * flags__2_3 * (flags__2_3 - 1) * (255 * data_most_sig_bit_0) + (flags__3_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) - flags__0_3 * flags__1_3) * (255 * data_most_sig_bit_0) + write_data__1_3 - (flags__1_3 * flags__2_3 * read_data__0_3 + (flags__0_3 * flags__2_3 + flags__0_3 * flags__3_3 + flags__1_3 * flags__3_3 + flags__2_3 * flags__3_3) * prev_data__1_3) = 0\n1006632960 * flags__0_3 * (flags__0_3 - 1) * (255 * data_most_sig_bit_0) + flags__3_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) * (255 * data_most_sig_bit_0) + write_data__2_3 - ((flags__0_3 * flags__2_3 + flags__1_3 * flags__3_3) * read_data__0_3 + (flags__0_3 * flags__1_3 + flags__0_3 * flags__3_3 + flags__1_3 * flags__2_3 + flags__2_3 * flags__3_3) * prev_data__2_3) = 0\n1006632960 * flags__0_3 * (flags__0_3 - 1) * (255 * data_most_sig_bit_0) + flags__3_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) * (255 * data_most_sig_bit_0) + write_data__3_3 - (flags__2_3 * flags__3_3 * read_data__0_3 + flags__0_3 * flags__2_3 * (255 * data_most_sig_bit_0) + (flags__0_3 * flags__1_3 + flags__0_3 * flags__3_3 + flags__1_3 * flags__2_3 + flags__1_3 * flags__3_3) * prev_data__3_3) = 0\n(30720 * mem_ptr_limbs__0_3 - (30720 * b__0_2 + 7864320 * b__1_2)) * (30720 * mem_ptr_limbs__0_3 - (30720 * b__0_2 + 7864320 * b__1_2 + 1)) = 0\n(943718400 * b__0_2 + 30720 * mem_ptr_limbs__1_3 - (120 * b__1_2 + 30720 * b__2_2 + 7864320 * b__3_2 + 943718400 * mem_ptr_limbs__0_3)) * (943718400 * b__0_2 + 30720 * mem_ptr_limbs__1_3 - (120 * b__1_2 + 30720 * b__2_2 + 7864320 * b__3_2 + 943718400 * mem_ptr_limbs__0_3 + 1)) = 0\nflags__1_3 * (flags__1_3 - 1) + flags__2_3 * (flags__2_3 - 1) + 4 * flags__0_3 * flags__1_3 + 4 * flags__0_3 * flags__2_3 + 5 * flags__0_3 * flags__3_3 + 5 * flags__1_3 * flags__2_3 + 5 * flags__1_3 * flags__3_3 + 5 * flags__2_3 * flags__3_3 - (1006632960 * flags__3_3 * (flags__3_3 - 1) + flags__0_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + flags__1_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + flags__2_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 3 * flags__3_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 5 * is_valid) = 0\n(7864320 * a__0_4 + 7864321 * is_valid - 7864320 * writes_aux__prev_data__0_4) * (7864320 * a__0_4 + 7864320 - 7864320 * writes_aux__prev_data__0_4) = 0\n(30720 * a__0_4 + 7864320 * a__1_4 + 30721 * is_valid - (30720 * writes_aux__prev_data__0_4 + 7864320 * writes_aux__prev_data__1_4)) * (30720 * a__0_4 + 7864320 * a__1_4 + 30720 - (30720 * writes_aux__prev_data__0_4 + 7864320 * writes_aux__prev_data__1_4)) = 0\n(120 * a__0_4 + 30720 * a__1_4 + 7864320 * a__2_4 + 121 * is_valid - (120 * writes_aux__prev_data__0_4 + 30720 * writes_aux__prev_data__1_4 + 7864320 * writes_aux__prev_data__2_4)) * (120 * a__0_4 + 30720 * a__1_4 + 7864320 * a__2_4 + 120 - (120 * writes_aux__prev_data__0_4 + 30720 * writes_aux__prev_data__1_4 + 7864320 * writes_aux__prev_data__2_4)) = 0\n(943718400 * writes_aux__prev_data__0_4 + 120 * a__1_4 + 30720 * a__2_4 + 7864320 * a__3_4 - (120 * writes_aux__prev_data__1_4 + 30720 * writes_aux__prev_data__2_4 + 7864320 * writes_aux__prev_data__3_4 + 943718400 * a__0_4 + 943718399 * is_valid)) * (943718400 * writes_aux__prev_data__0_4 + 120 * a__1_4 + 30720 * a__2_4 + 7864320 * a__3_4 - (120 * writes_aux__prev_data__1_4 + 30720 * writes_aux__prev_data__2_4 + 7864320 * writes_aux__prev_data__3_4 + 943718400 * a__0_4 + 943718400)) = 0\ncmp_result_6 * (cmp_result_6 - 1) = 0\ndiff_marker__3_6 * (diff_marker__3_6 - 1) = 0\ndiff_marker__2_6 * (diff_marker__2_6 - 1) = 0\ndiff_marker__1_6 * (diff_marker__1_6 - 1) = 0\ndiff_marker__0_6 * (diff_marker__0_6 - 1) = 0\n(1 - (diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6)) * (a__0_5 * (2 * cmp_result_6 - 1)) = 0\ndiff_marker__0_6 * (diff_val_6 - a__0_5 * (2 * cmp_result_6 - 1)) = 0\n(diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6) * (diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6 - 1) = 0\n(1 - (diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6)) * cmp_result_6 = 0\ncmp_result_7 * (cmp_result_7 - 1) = 0\ndiff_marker__3_7 * (diff_marker__3_7 - 1) = 0\n(1 - diff_marker__3_7) * (a__3_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__3_7 * (diff_val_7 - a__3_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__2_7 * (diff_marker__2_7 - 1) = 0\n(1 - (diff_marker__2_7 + diff_marker__3_7)) * (a__2_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__2_7 * (diff_val_7 - a__2_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__1_7 * (diff_marker__1_7 - 1) = 0\n(1 - (diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7)) * (a__1_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__1_7 * (diff_val_7 - a__1_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__0_7 * (diff_marker__0_7 - 1) = 0\n(1 - (diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7)) * (a__0_4 * (2 * cmp_result_7 - 1)) = 0\ndiff_marker__0_7 * (diff_val_7 - a__0_4 * (2 * cmp_result_7 - 1)) = 0\n(diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7) * (diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7 - 1) = 0\n(1 - (diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7)) * cmp_result_7 = 0\n(7864320 * a__0_9 - (7864320 * b__0_5 + 7864320 * is_valid)) * (7864320 * a__0_9 - (7864320 * b__0_5 + 7864321)) = 0\n(30720 * a__0_9 + 7864320 * a__1_9 - (30720 * b__0_5 + 7864320 * b__1_5 + 30720 * is_valid)) * (30720 * a__0_9 + 7864320 * a__1_9 - (30720 * b__0_5 + 7864320 * b__1_5 + 30721)) = 0\n(120 * a__0_9 + 30720 * a__1_9 + 7864320 * a__2_9 - (120 * b__0_5 + 30720 * b__1_5 + 7864320 * b__2_5 + 120 * is_valid)) * (120 * a__0_9 + 30720 * a__1_9 + 7864320 * a__2_9 - (120 * b__0_5 + 30720 * b__1_5 + 7864320 * b__2_5 + 121)) = 0\n(943718400 * b__0_5 + 120 * a__1_9 + 30720 * a__2_9 + 7864320 * a__3_9 + 943718400 * is_valid - (120 * b__1_5 + 30720 * b__2_5 + 7864320 * b__3_5 + 943718400 * a__0_9)) * (943718400 * b__0_5 + 120 * a__1_9 + 30720 * a__2_9 + 7864320 * a__3_9 + 943718399 - (120 * b__1_5 + 30720 * b__2_5 + 7864320 * b__3_5 + 943718400 * a__0_9)) = 0\ncmp_result_12 * (cmp_result_12 - 1) = 0\nflags__2_3 * (flags__2_3 - 1) - (flags__0_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 2 * flags__1_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2) + 3 * flags__2_3 * (flags__0_3 + flags__1_3 + flags__2_3 + flags__3_3 - 2)) = 0\n(1 - cmp_result_12) * (cmp_result_6 * cmp_result_7) = 0\nfree_var_467 * (cmp_result_6 * cmp_result_7) - cmp_result_12 = 0\nopcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0 - read_data__0_3 = 0\ndiff_val_6 * (diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6) = 0\n(1 - is_valid) * (diff_marker__0_6 + diff_marker__1_6 + diff_marker__2_6 + diff_marker__3_6) = 0\n(1 - is_valid) * (diff_marker__0_7 + diff_marker__1_7 + diff_marker__2_7 + diff_marker__3_7) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/beqz.txt",
    "content": "Instructions:\n  0: BEQ 5 0 8 1 1\n\nAPC advantage:\n  - Main columns: 26 -> 12 (2.17x reduction)\n  - Bus interactions: 11 -> 10 (1.10x reduction)\n  - Constraints: 11 -> 4 (2.75x reduction)\n\nSymbolic machine using 12 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  cmp_result_0\n  free_var_28\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ncmp_result_0 * (a__0_0 + a__1_0 + a__2_0 + a__3_0) = 0\nfree_var_28 * (a__0_0 + a__1_0 + a__2_0 + a__3_0) + cmp_result_0 - 1 * is_valid = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/bgez.txt",
    "content": "Instructions:\n  0: BGE 5 0 8 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 17 (1.88x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 18 (1.39x reduction)\n\nSymbolic machine using 17 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  cmp_result_0\n  a_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(a__3_0 - a_msb_f_0) * (a_msb_f_0 + 256 - a__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (a_msb_f_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__3_0 * (a_msb_f_0 * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (a__2_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__2_0 * (a__2_0 * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (a__1_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__1_0 * (a__1_0 * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (a__0_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__0_0 * (a__0_0 * (1 - 2 * cmp_result_0) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 * is_valid - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (1 - cmp_result_0) = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/bgtz.txt",
    "content": "Instructions:\n  0: BLT 0 5 8 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 17 (1.88x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 18 (1.39x reduction)\n\nSymbolic machine using 17 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  b_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[b_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (b_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * (diff_val_0 - b_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (b__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_val_0 - b__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_val_0 - b__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__0_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_val_0 - b__0_0 * (2 * cmp_result_0 - 1)) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/blez.txt",
    "content": "Instructions:\n  0: BGE 0 5 8 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 17 (1.88x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 18 (1.39x reduction)\n\nSymbolic machine using 17 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  b_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[b_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (b_msb_f_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__3_0 * (diff_val_0 - b_msb_f_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (b__2_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__2_0 * (diff_val_0 - b__2_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__1_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__1_0 * (diff_val_0 - b__1_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__0_0 * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__0_0 * (diff_val_0 - b__0_0 * (1 - 2 * cmp_result_0)) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 * is_valid - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (1 - cmp_result_0) = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/bltz.txt",
    "content": "Instructions:\n  0: BLT 5 0 8 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 17 (1.88x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 18 (1.39x reduction)\n\nSymbolic machine using 17 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  cmp_result_0\n  a_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(a__3_0 - a_msb_f_0) * (a_msb_f_0 + 256 - a__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (a_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * (a_msb_f_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (a__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (a__2_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (a__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (a__1_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (a__0_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (a__0_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/bnez.txt",
    "content": "Instructions:\n  0: BNE 5 0 8 1 1\n\nAPC advantage:\n  - Main columns: 26 -> 12 (2.17x reduction)\n  - Bus interactions: 11 -> 10 (1.10x reduction)\n  - Constraints: 11 -> 4 (2.75x reduction)\n\nSymbolic machine using 12 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  cmp_result_0\n  free_var_28\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 * cmp_result_0 + 4, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(1 - cmp_result_0) * (a__0_0 + a__1_0 + a__2_0 + a__3_0) = 0\nfree_var_28 * (a__0_0 + a__1_0 + a__2_0 + a__3_0) - cmp_result_0 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/j.txt",
    "content": "Instructions:\n  0: JAL 0 0 8 1 0\n\nAPC advantage:\n  - Main columns: 18 -> 2 (9.00x reduction)\n  - Bus interactions: 10 -> 2 (5.00x reduction)\n  - Constraints: 9 -> 1 (9.00x reduction)\n\nSymbolic machine using 2 unique main columns:\n  inner__from_state__timestamp_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, inner__from_state__timestamp_0]\nmult=is_valid * 1, args=[8, inner__from_state__timestamp_0 + 1]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/jr.txt",
    "content": "Instructions:\n  0: JAL 1 0 8 1 0\n\nAPC advantage:\n  - Main columns: 18 -> 2 (9.00x reduction)\n  - Bus interactions: 10 -> 2 (5.00x reduction)\n  - Constraints: 9 -> 1 (9.00x reduction)\n\nSymbolic machine using 2 unique main columns:\n  inner__from_state__timestamp_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, inner__from_state__timestamp_0]\nmult=is_valid * 1, args=[8, inner__from_state__timestamp_0 + 1]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/load_immediate.txt",
    "content": "Instructions:\n  0: ADD rd_ptr = 48, rs1_ptr = 0, rs2 = 216, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 36 -> 10 (3.60x reduction)\n  - Bus interactions: 20 -> 10 (2.00x reduction)\n  - Constraints: 22 -> 1 (22.00x reduction)\n\nSymbolic machine using 10 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 48, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 48, 216, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/mv.txt",
    "content": "Instructions:\n  0: ADD rd_ptr = 8, rs1_ptr = 5, rs2 = 0, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 36 -> 14 (2.57x reduction)\n  - Bus interactions: 20 -> 10 (2.00x reduction)\n  - Constraints: 22 -> 1 (22.00x reduction)\n\nSymbolic machine using 14 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/neg.txt",
    "content": "Instructions:\n  0: SUB rd_ptr = 8, rs1_ptr = 0, rs2 = 5, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 36 -> 20 (1.80x reduction)\n  - Bus interactions: 20 -> 16 (1.25x reduction)\n  - Constraints: 22 -> 5 (4.40x reduction)\n\nSymbolic machine using 20 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_0 + 7864320 * c__0_0) * (7864320 * a__0_0 + 7864320 * c__0_0 + 1) = 0\n(30720 * a__0_0 + 7864320 * a__1_0 + 30720 * c__0_0 + 7864320 * c__1_0) * (30720 * a__0_0 + 7864320 * a__1_0 + 30720 * c__0_0 + 7864320 * c__1_0 + 1) = 0\n(120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 120 * c__0_0 + 30720 * c__1_0 + 7864320 * c__2_0) * (120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 120 * c__0_0 + 30720 * c__1_0 + 7864320 * c__2_0 + 1) = 0\n(943718400 * a__0_0 + 943718400 * c__0_0 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 120 * c__1_0 + 30720 * c__2_0 + 7864320 * c__3_0)) * (943718400 * a__0_0 + 943718400 * c__0_0 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 120 * c__1_0 + 30720 * c__2_0 + 7864320 * c__3_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/not.txt",
    "content": "Instructions:\n  0: XOR rd_ptr = 8, rs1_ptr = 5, rs2 = 16777215, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 36 -> 14 (2.57x reduction)\n  - Bus interactions: 20 -> 10 (2.00x reduction)\n  - Constraints: 22 -> 1 (22.00x reduction)\n\nSymbolic machine using 14 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, 255 - a__0_0, 255 - a__1_0, 255 - a__2_0, 255 - a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, 255 - a__0_0, 255 - a__1_0, 255 - a__2_0, 255 - a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/ret.txt",
    "content": "Instructions:\n  0: JALR 0 1 0 1 0\n\nAPC advantage:\n  - Main columns: 28 -> 11 (2.55x reduction)\n  - Bus interactions: 16 -> 8 (2.00x reduction)\n  - Constraints: 9 -> 4 (2.25x reduction)\n\nSymbolic machine using 11 unique main columns:\n  from_state__timestamp_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  to_pc_least_sig_bit_0\n  to_pc_limbs__0_0\n  to_pc_limbs__1_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[2 * to_pc_limbs__0_0 + 65536 * to_pc_limbs__1_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 1, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 1, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[to_pc_limbs__1_0, 14]\nmult=is_valid * 1, args=[to_pc_limbs__0_0, 15]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\nto_pc_least_sig_bit_0 * (to_pc_least_sig_bit_0 - 1) = 0\n(30720 * to_pc_least_sig_bit_0 + 61440 * to_pc_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * to_pc_least_sig_bit_0 + 61440 * to_pc_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 125829121 * to_pc_limbs__0_0 + 30720 * to_pc_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * to_pc_least_sig_bit_0)) * (943718400 * rs1_data__0_0 + 125829121 * to_pc_limbs__0_0 + 30720 * to_pc_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * to_pc_least_sig_bit_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/seqz.txt",
    "content": "Instructions:\n  0: SLTU rd_ptr = 8, rs1_ptr = 5, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 37 -> 16 (2.31x reduction)\n  - Bus interactions: 18 -> 10 (1.80x reduction)\n  - Constraints: 28 -> 4 (7.00x reduction)\n\nSymbolic machine using 16 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  inv_of_sum_37\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, cmp_result_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ncmp_result_0 * (b__0_0 + b__1_0 + b__2_0 + b__3_0) = 0\ninv_of_sum_37 * (b__0_0 + b__1_0 + b__2_0 + b__3_0) + cmp_result_0 - 1 * is_valid = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/sgtz.txt",
    "content": "Instructions:\n  0: SLT rd_ptr = 8, rs1_ptr = 0, rs2 = 5, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 37 -> 23 (1.61x reduction)\n  - Bus interactions: 18 -> 16 (1.12x reduction)\n  - Constraints: 28 -> 18 (1.56x reduction)\n\nSymbolic machine using 23 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  cmp_result_0\n  c_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, cmp_result_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[c_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(c__3_0 - c_msb_f_0) * (c_msb_f_0 + 256 - c__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (c_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * (diff_val_0 - c_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (c__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_val_0 - c__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (c__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_val_0 - c__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (c__0_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_val_0 - c__0_0 * (2 * cmp_result_0 - 1)) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/sltz.txt",
    "content": "Instructions:\n  0: SLT rd_ptr = 8, rs1_ptr = 5, rs2 = 0, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 37 -> 23 (1.61x reduction)\n  - Bus interactions: 18 -> 16 (1.12x reduction)\n  - Constraints: 28 -> 18 (1.56x reduction)\n\nSymbolic machine using 23 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  b_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, cmp_result_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[b_msb_f_0 + 128, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (b_msb_f_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * (b_msb_f_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (b__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (b__2_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (b__1_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (b__0_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (b__0_0 * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/pseudo_instructions/snez.txt",
    "content": "Instructions:\n  0: SLTU rd_ptr = 8, rs1_ptr = 0, rs2 = 5, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 37 -> 22 (1.68x reduction)\n  - Bus interactions: 18 -> 15 (1.20x reduction)\n  - Constraints: 28 -> 17 (1.65x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  cmp_result_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, cmp_result_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * (c__3_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * (diff_val_0 - c__3_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * (c__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * (diff_val_0 - c__2_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (c__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * (diff_val_0 - c__1_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (c__0_0 * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * (diff_val_0 - c__0_0 * (2 * cmp_result_0 - 1)) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_add_1.txt",
    "content": "Instructions:\n  0: ADD rd_ptr = 8, rs1_ptr = 8, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 36 -> 12 (3.00x reduction)\n  - Bus interactions: 20 -> 8 (2.50x reduction)\n  - Constraints: 22 -> 5 (4.40x reduction)\n\nSymbolic machine using 12 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_0 - (7864320 * writes_aux__prev_data__0_0 + 7864320 * is_valid)) * (7864320 * a__0_0 - (7864320 * writes_aux__prev_data__0_0 + 7864321)) = 0\n(30720 * a__0_0 + 7864320 * a__1_0 - (30720 * writes_aux__prev_data__0_0 + 7864320 * writes_aux__prev_data__1_0 + 30720 * is_valid)) * (30720 * a__0_0 + 7864320 * a__1_0 - (30720 * writes_aux__prev_data__0_0 + 7864320 * writes_aux__prev_data__1_0 + 30721)) = 0\n(120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 - (120 * writes_aux__prev_data__0_0 + 30720 * writes_aux__prev_data__1_0 + 7864320 * writes_aux__prev_data__2_0 + 120 * is_valid)) * (120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 - (120 * writes_aux__prev_data__0_0 + 30720 * writes_aux__prev_data__1_0 + 7864320 * writes_aux__prev_data__2_0 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_0 + 120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_0 + 30720 * writes_aux__prev_data__2_0 + 7864320 * writes_aux__prev_data__3_0 + 943718400 * a__0_0)) * (943718400 * writes_aux__prev_data__0_0 + 120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718399 - (120 * writes_aux__prev_data__1_0 + 30720 * writes_aux__prev_data__2_0 + 7864320 * writes_aux__prev_data__3_0 + 943718400 * a__0_0)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_and_0.txt",
    "content": "Instructions:\n  0: AND rd_ptr = 8, rs1_ptr = 0, rs2 = 5, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 36 -> 10 (3.60x reduction)\n  - Bus interactions: 20 -> 10 (2.00x reduction)\n  - Constraints: 22 -> 1 (22.00x reduction)\n\nSymbolic machine using 10 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, 0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_beq.txt",
    "content": "Instructions:\n  0: BEQ 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 26 -> 16 (1.62x reduction)\n  - Bus interactions: 11 -> 10 (1.10x reduction)\n  - Constraints: 11 -> 7 (1.57x reduction)\n\nSymbolic machine using 16 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  free_var_30\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ncmp_result_0 * (a__0_0 - b__0_0) = 0\ncmp_result_0 * (a__1_0 - b__1_0) = 0\ncmp_result_0 * (a__2_0 - b__2_0) = 0\ncmp_result_0 * (a__3_0 - b__3_0) = 0\nfree_var_30 * ((a__0_0 - b__0_0) * (a__0_0 - b__0_0) + (a__1_0 - b__1_0) * (a__1_0 - b__1_0) + (a__2_0 - b__2_0) * (a__2_0 - b__2_0) + (a__3_0 - b__3_0) * (a__3_0 - b__3_0)) + cmp_result_0 - 1 * is_valid = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_bge.txt",
    "content": "Instructions:\n  0: BGE 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 22 (1.45x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 19 (1.32x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  a_msb_f_0\n  b_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a_msb_f_0 + 128, b_msb_f_0 + 128, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(a__3_0 - a_msb_f_0) * (a_msb_f_0 + 256 - a__3_0) = 0\n(b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * ((b_msb_f_0 - a_msb_f_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__3_0 * ((a_msb_f_0 - b_msb_f_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * ((b__2_0 - a__2_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__2_0 * ((a__2_0 - b__2_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__1_0 - a__1_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__1_0 * ((a__1_0 - b__1_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__0_0 - a__0_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__0_0 * ((a__0_0 - b__0_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 * is_valid - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (1 - cmp_result_0) = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_bgeu.txt",
    "content": "Instructions:\n  0: BGEU 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 20 (1.60x reduction)\n  - Bus interactions: 13 -> 11 (1.18x reduction)\n  - Constraints: 25 -> 17 (1.47x reduction)\n\nSymbolic machine using 20 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * ((b__3_0 - a__3_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__3_0 * ((a__3_0 - b__3_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * ((b__2_0 - a__2_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__2_0 * ((a__2_0 - b__2_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__1_0 - a__1_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__1_0 * ((a__1_0 - b__1_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__0_0 - a__0_0) * (1 - 2 * cmp_result_0)) = 0\ndiff_marker__0_0 * ((a__0_0 - b__0_0) * (1 - 2 * cmp_result_0) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 * is_valid - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * (1 - cmp_result_0) = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_blt.txt",
    "content": "Instructions:\n  0: BLT 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 22 (1.45x reduction)\n  - Bus interactions: 13 -> 12 (1.08x reduction)\n  - Constraints: 25 -> 19 (1.32x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  a_msb_f_0\n  b_msb_f_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[a_msb_f_0 + 128, b_msb_f_0 + 128, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(a__3_0 - a_msb_f_0) * (a_msb_f_0 + 256 - a__3_0) = 0\n(b__3_0 - b_msb_f_0) * (b_msb_f_0 + 256 - b__3_0) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * ((b_msb_f_0 - a_msb_f_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * ((a_msb_f_0 - b_msb_f_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * ((b__2_0 - a__2_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * ((a__2_0 - b__2_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__1_0 - a__1_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * ((a__1_0 - b__1_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__0_0 - a__0_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * ((a__0_0 - b__0_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_bltu.txt",
    "content": "Instructions:\n  0: BLTU 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 32 -> 20 (1.60x reduction)\n  - Bus interactions: 13 -> 11 (1.18x reduction)\n  - Constraints: 25 -> 17 (1.47x reduction)\n\nSymbolic machine using 20 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  diff_marker__0_0\n  diff_marker__1_0\n  diff_marker__2_0\n  diff_marker__3_0\n  diff_val_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0, args=[diff_val_0 - 1, 0, 0, 0]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\ndiff_marker__3_0 * (diff_marker__3_0 - 1) = 0\n(1 - diff_marker__3_0) * ((b__3_0 - a__3_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__3_0 * ((a__3_0 - b__3_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__2_0 * (diff_marker__2_0 - 1) = 0\n(1 - (diff_marker__2_0 + diff_marker__3_0)) * ((b__2_0 - a__2_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__2_0 * ((a__2_0 - b__2_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__1_0 * (diff_marker__1_0 - 1) = 0\n(1 - (diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__1_0 - a__1_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__1_0 * ((a__1_0 - b__1_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\ndiff_marker__0_0 * (diff_marker__0_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * ((b__0_0 - a__0_0) * (2 * cmp_result_0 - 1)) = 0\ndiff_marker__0_0 * ((a__0_0 - b__0_0) * (2 * cmp_result_0 - 1) + diff_val_0) = 0\n(diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0 - 1) = 0\n(1 - (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0)) * cmp_result_0 = 0\n(1 - is_valid) * (diff_marker__0_0 + diff_marker__1_0 + diff_marker__2_0 + diff_marker__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_bne.txt",
    "content": "Instructions:\n  0: BNE 8 5 2 1 1\n\nAPC advantage:\n  - Main columns: 26 -> 16 (1.62x reduction)\n  - Bus interactions: 11 -> 10 (1.10x reduction)\n  - Constraints: 11 -> 7 (1.57x reduction)\n\nSymbolic machine using 16 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  cmp_result_0\n  free_var_30\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4 - 2 * cmp_result_0, from_state__timestamp_0 + 2]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\ncmp_result_0 * (cmp_result_0 - 1) = 0\n(1 - cmp_result_0) * (a__0_0 - b__0_0) = 0\n(1 - cmp_result_0) * (a__1_0 - b__1_0) = 0\n(1 - cmp_result_0) * (a__2_0 - b__2_0) = 0\n(1 - cmp_result_0) * (a__3_0 - b__3_0) = 0\nfree_var_30 * ((a__0_0 - b__0_0) * (a__0_0 - b__0_0) + (a__1_0 - b__1_0) * (a__1_0 - b__1_0) + (a__2_0 - b__2_0) * (a__2_0 - b__2_0) + (a__3_0 - b__3_0) * (a__3_0 - b__3_0)) - cmp_result_0 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_div.txt",
    "content": "Instructions:\n  0: DIV 8 7 5 1 0\n\nAPC advantage:\n  - Main columns: 59 -> 48 (1.23x reduction)\n  - Bus interactions: 25 -> 24 (1.04x reduction)\n  - Constraints: 64 -> 45 (1.42x reduction)\n\nSymbolic machine using 48 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  q__0_0\n  q__1_0\n  q__2_0\n  q__3_0\n  r__0_0\n  r__1_0\n  r__2_0\n  r__3_0\n  zero_divisor_0\n  r_zero_0\n  b_sign_0\n  c_sign_0\n  q_sign_0\n  sign_xor_0\n  c_sum_inv_0\n  r_sum_inv_0\n  r_prime__0_0\n  r_prime__1_0\n  r_prime__2_0\n  r_prime__3_0\n  r_inv__0_0\n  r_inv__1_0\n  r_inv__2_0\n  r_inv__3_0\n  lt_marker__0_0\n  lt_marker__1_0\n  lt_marker__2_0\n  lt_diff_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, q__0_0, q__1_0, q__2_0, q__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * (1 - (zero_divisor_0 + r_zero_0)), args=[lt_diff_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[2 * b__3_0 - 256 * b_sign_0, 2 * c__3_0 - 256 * c_sign_0, 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid * 1, args=[q__0_0, 7864320 * b__0_0 - (7864320 * c__0_0 * q__0_0 + 7864320 * r__0_0)]\nmult=is_valid * 1, args=[q__1_0, 30720 * b__0_0 + 7864320 * b__1_0 - (30720 * c__0_0 * q__0_0 + 7864320 * c__0_0 * q__1_0 + 7864320 * c__1_0 * q__0_0 + 30720 * r__0_0 + 7864320 * r__1_0)]\nmult=is_valid * 1, args=[q__2_0, 120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * c__0_0 * q__0_0 + 30720 * c__0_0 * q__1_0 + 30720 * c__1_0 * q__0_0 + 7864320 * c__0_0 * q__2_0 + 7864320 * c__1_0 * q__1_0 + 7864320 * c__2_0 * q__0_0 + 120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0)]\nmult=is_valid * 1, args=[q__3_0, 943718400 * c__0_0 * q__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * r__0_0 - (120 * c__0_0 * q__1_0 + 120 * c__1_0 * q__0_0 + 30720 * c__0_0 * q__2_0 + 30720 * c__1_0 * q__1_0 + 30720 * c__2_0 * q__0_0 + 7864320 * c__0_0 * q__3_0 + 7864320 * c__1_0 * q__2_0 + 7864320 * c__2_0 * q__1_0 + 7864320 * c__3_0 * q__0_0 + 943718400 * b__0_0 + 120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0)]\nmult=is_valid * 1, args=[r__0_0, 3686400 * c__0_0 * q__0_0 + 943718400 * c__0_0 * q__1_0 + 943718400 * c__1_0 * q__0_0 + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 120 * b__2_0 + 30720 * b__3_0 + 3686400 * r__0_0 + 943718400 * r__1_0 - (120 * c__0_0 * q__2_0 + 120 * c__1_0 * q__1_0 + 120 * c__2_0 * q__0_0 + 30720 * c__0_0 * q__3_0 + 30720 * c__1_0 * q__2_0 + 30720 * c__2_0 * q__1_0 + 30720 * c__3_0 * q__0_0 + 7864320 * c__1_0 * q__3_0 + 7864320 * c__2_0 * q__2_0 + 7864320 * c__3_0 * q__1_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 3686400 * b__0_0 + 943718400 * b__1_0 + 120 * r__2_0 + 30720 * r__3_0 + 7864321 * b_sign_0)]\nmult=is_valid * 1, args=[r__1_0, 14400 * c__0_0 * q__0_0 + 3686400 * c__0_0 * q__1_0 + 3686400 * c__1_0 * q__0_0 + 943718400 * c__0_0 * q__2_0 + 943718400 * c__1_0 * q__1_0 + 943718400 * c__2_0 * q__0_0 + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 120 * b__3_0 + 14400 * r__0_0 + 3686400 * r__1_0 + 943718400 * r__2_0 - (120 * c__0_0 * q__3_0 + 120 * c__1_0 * q__2_0 + 120 * c__2_0 * q__1_0 + 120 * c__3_0 * q__0_0 + 30720 * c__1_0 * q__3_0 + 30720 * c__2_0 * q__2_0 + 30720 * c__3_0 * q__1_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 7864320 * c__2_0 * q__3_0 + 7864320 * c__3_0 * q__2_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 14400 * b__0_0 + 3686400 * b__1_0 + 943718400 * b__2_0 + 120 * r__3_0 + 30721 * b_sign_0)]\nmult=is_valid * 1, args=[r__2_0, 14400 * c__0_0 * q__1_0 + 14400 * c__1_0 * q__0_0 + 3686400 * c__0_0 * q__2_0 + 3686400 * c__1_0 * q__1_0 + 3686400 * c__2_0 * q__0_0 + 943718400 * c__0_0 * q__3_0 + 943718400 * c__1_0 * q__2_0 + 943718400 * c__2_0 * q__1_0 + 943718400 * c__3_0 * q__0_0 + (120 * r_zero_0 - 120) * (255 * b_sign_0) + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 503316424 * b__0_0 + 14400 * r__1_0 + 3686400 * r__2_0 + 943718400 * r__3_0 - (503316424 * c__0_0 * q__0_0 + 120 * c__1_0 * q__3_0 + 120 * c__2_0 * q__2_0 + 120 * c__3_0 * q__1_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * q__0_0 * (255 * c_sign_0) + 30720 * c__2_0 * q__3_0 + 30720 * c__3_0 * q__2_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * q__1_0 * (255 * c_sign_0) + 7864320 * c__3_0 * q__3_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * q__2_0 * (255 * c_sign_0) + 14400 * b__1_0 + 3686400 * b__2_0 + 943718400 * b__3_0 + 503316424 * r__0_0 + 121 * b_sign_0)]\nmult=is_valid * 1, args=[r__3_0, 14400 * c__0_0 * q__2_0 + 14400 * c__1_0 * q__1_0 + 14400 * c__2_0 * q__0_0 + 3686400 * c__0_0 * q__3_0 + 3686400 * c__1_0 * q__2_0 + 3686400 * c__2_0 * q__1_0 + 3686400 * c__3_0 * q__0_0 + 943718400 * c__1_0 * q__3_0 + 943718400 * c__2_0 * q__2_0 + 943718400 * c__3_0 * q__1_0 + 943718400 * c__0_0 * (255 * q_sign_0) + 943718400 * q__0_0 * (255 * c_sign_0) + (943718400 - 943718400 * r_zero_0) * (255 * b_sign_0) + (120 * r_zero_0 - 120) * (255 * b_sign_0) + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 442368000 * b__0_0 + 503316424 * b__1_0 + 14400 * r__2_0 + 3686400 * r__3_0 + 943718399 * b_sign_0 - (442368000 * c__0_0 * q__0_0 + 503316424 * c__0_0 * q__1_0 + 503316424 * c__1_0 * q__0_0 + 120 * c__2_0 * q__3_0 + 120 * c__3_0 * q__2_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * q__0_0 * (255 * c_sign_0) + 120 * c__1_0 * (255 * q_sign_0) + 120 * q__1_0 * (255 * c_sign_0) + 30720 * c__3_0 * q__3_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * q__1_0 * (255 * c_sign_0) + 30720 * c__2_0 * (255 * q_sign_0) + 30720 * q__2_0 * (255 * c_sign_0) + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * q__2_0 * (255 * c_sign_0) + 7864320 * c__3_0 * (255 * q_sign_0) + 7864320 * q__3_0 * (255 * c_sign_0) + 14400 * b__2_0 + 3686400 * b__3_0 + 442368000 * r__0_0 + 503316424 * r__1_0)]\n\n// Algebraic constraints:\n(zero_divisor_0 + r_zero_0) * (zero_divisor_0 + r_zero_0 - 1) = 0\nzero_divisor_0 * (zero_divisor_0 - 1) = 0\nzero_divisor_0 * (q__0_0 - 255) = 0\nzero_divisor_0 * (q__1_0 - 255) = 0\nzero_divisor_0 * (q__2_0 - 255) = 0\nzero_divisor_0 * (q__3_0 - 255) = 0\n(1 * is_valid - zero_divisor_0) * ((c__0_0 + c__1_0 + c__2_0 + c__3_0) * c_sum_inv_0 - 1) = 0\nr_zero_0 * (r_zero_0 - 1) = 0\n(1 * is_valid - (zero_divisor_0 + r_zero_0)) * ((r__0_0 + r__1_0 + r__2_0 + r__3_0) * r_sum_inv_0 - 1) = 0\nb_sign_0 * (b_sign_0 - 1) = 0\nc_sign_0 * (c_sign_0 - 1) = 0\nb_sign_0 + c_sign_0 - (2 * b_sign_0 * c_sign_0 + sign_xor_0) = 0\nq_sign_0 * (q_sign_0 - 1) = 0\n(q__0_0 + q__1_0 + q__2_0 + q__3_0) * ((1 - zero_divisor_0) * (q_sign_0 - sign_xor_0)) = 0\n(q_sign_0 - sign_xor_0) * ((1 - zero_divisor_0) * q_sign_0) = 0\n(1 - sign_xor_0) * (r__0_0 - r_prime__0_0) = 0\nsign_xor_0 * ((7864320 * r__0_0 + 7864320 * r_prime__0_0) * (7864320 * r__0_0 + 7864320 * r_prime__0_0 + 1)) = 0\nsign_xor_0 * ((r_prime__0_0 - 256) * r_inv__0_0 - 1) = 0\nsign_xor_0 * ((7864320 * r__0_0 + 7864320 * r_prime__0_0 + 1) * r_prime__0_0) = 0\n(1 - sign_xor_0) * (r__1_0 - r_prime__1_0) = 0\nsign_xor_0 * ((7833600 * r__0_0 + 7833600 * r_prime__0_0 - (7864320 * r__1_0 + 7864320 * r_prime__1_0)) * (30720 * r__0_0 + 7864320 * r__1_0 + 30720 * r_prime__0_0 + 7864320 * r_prime__1_0 + 1)) = 0\nsign_xor_0 * ((r_prime__1_0 - 256) * r_inv__1_0 - 1) = 0\nsign_xor_0 * ((30720 * r__0_0 + 7864320 * r__1_0 + 30720 * r_prime__0_0 + 7864320 * r_prime__1_0 + 1) * r_prime__1_0) = 0\n(1 - sign_xor_0) * (r__2_0 - r_prime__2_0) = 0\nsign_xor_0 * ((30600 * r__0_0 + 7833600 * r__1_0 + 30600 * r_prime__0_0 + 7833600 * r_prime__1_0 - (7864320 * r__2_0 + 7864320 * r_prime__2_0)) * (120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0 + 120 * r_prime__0_0 + 30720 * r_prime__1_0 + 7864320 * r_prime__2_0 + 1)) = 0\nsign_xor_0 * ((r_prime__2_0 - 256) * r_inv__2_0 - 1) = 0\nsign_xor_0 * ((120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0 + 120 * r_prime__0_0 + 30720 * r_prime__1_0 + 7864320 * r_prime__2_0 + 1) * r_prime__2_0) = 0\n(1 - sign_xor_0) * (r__3_0 - r_prime__3_0) = 0\nsign_xor_0 * ((943718520 * r__0_0 + 30600 * r__1_0 + 7833600 * r__2_0 + 943718520 * r_prime__0_0 + 30600 * r_prime__1_0 + 7833600 * r_prime__2_0 - (7864320 * r__3_0 + 7864320 * r_prime__3_0)) * (943718400 * r__0_0 + 943718400 * r_prime__0_0 - (120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0 + 120 * r_prime__1_0 + 30720 * r_prime__2_0 + 7864320 * r_prime__3_0 + 1))) = 0\nsign_xor_0 * ((r_prime__3_0 - 256) * r_inv__3_0 - 1) = 0\nsign_xor_0 * ((120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0 + 120 * r_prime__1_0 + 30720 * r_prime__2_0 + 7864320 * r_prime__3_0 + 1 - (943718400 * r__0_0 + 943718400 * r_prime__0_0)) * r_prime__3_0) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) = 0\n(lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) * (r_prime__3_0 * (2 * c_sign_0 - 1) + c__3_0 * (1 - 2 * c_sign_0)) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (lt_diff_0 - (r_prime__3_0 * (2 * c_sign_0 - 1) + c__3_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__2_0 * (lt_marker__2_0 - 1) = 0\n(lt_marker__0_0 + lt_marker__1_0) * (r_prime__2_0 * (2 * c_sign_0 - 1) + c__2_0 * (1 - 2 * c_sign_0)) = 0\nlt_marker__2_0 * (lt_diff_0 - (r_prime__2_0 * (2 * c_sign_0 - 1) + c__2_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__1_0 * (lt_marker__1_0 - 1) = 0\nlt_marker__0_0 * (r_prime__1_0 * (2 * c_sign_0 - 1) + c__1_0 * (1 - 2 * c_sign_0)) = 0\nlt_marker__1_0 * (lt_diff_0 - (r_prime__1_0 * (2 * c_sign_0 - 1) + c__1_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__0_0 * (lt_marker__0_0 - 1) = 0\nlt_marker__0_0 * (lt_diff_0 - (r_prime__0_0 * (2 * c_sign_0 - 1) + c__0_0 * (1 - 2 * c_sign_0))) = 0\nzero_divisor_0 * (c__0_0 + c__1_0 + c__2_0 + c__3_0) = 0\nr_zero_0 * (r__0_0 + r__1_0 + r__2_0 + r__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_divu.txt",
    "content": "Instructions:\n  0: DIVU 8 7 5 1 0\n\nAPC advantage:\n  - Main columns: 59 -> 37 (1.59x reduction)\n  - Bus interactions: 25 -> 23 (1.09x reduction)\n  - Constraints: 64 -> 25 (2.56x reduction)\n\nSymbolic machine using 37 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  q__0_0\n  q__1_0\n  q__2_0\n  q__3_0\n  r__0_0\n  r__1_0\n  r__2_0\n  r__3_0\n  zero_divisor_0\n  r_zero_0\n  q_sign_0\n  c_sum_inv_0\n  r_sum_inv_0\n  lt_marker__0_0\n  lt_marker__1_0\n  lt_marker__2_0\n  lt_diff_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, q__0_0, q__1_0, q__2_0, q__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * (1 - (zero_divisor_0 + r_zero_0)), args=[lt_diff_0 - 1, 0, 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid * 1, args=[q__0_0, 7864320 * b__0_0 - (7864320 * c__0_0 * q__0_0 + 7864320 * r__0_0)]\nmult=is_valid * 1, args=[q__1_0, 30720 * b__0_0 + 7864320 * b__1_0 - (30720 * c__0_0 * q__0_0 + 7864320 * c__0_0 * q__1_0 + 7864320 * c__1_0 * q__0_0 + 30720 * r__0_0 + 7864320 * r__1_0)]\nmult=is_valid * 1, args=[q__2_0, 120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * c__0_0 * q__0_0 + 30720 * c__0_0 * q__1_0 + 30720 * c__1_0 * q__0_0 + 7864320 * c__0_0 * q__2_0 + 7864320 * c__1_0 * q__1_0 + 7864320 * c__2_0 * q__0_0 + 120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0)]\nmult=is_valid * 1, args=[q__3_0, 943718400 * c__0_0 * q__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * r__0_0 - (120 * c__0_0 * q__1_0 + 120 * c__1_0 * q__0_0 + 30720 * c__0_0 * q__2_0 + 30720 * c__1_0 * q__1_0 + 30720 * c__2_0 * q__0_0 + 7864320 * c__0_0 * q__3_0 + 7864320 * c__1_0 * q__2_0 + 7864320 * c__2_0 * q__1_0 + 7864320 * c__3_0 * q__0_0 + 943718400 * b__0_0 + 120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0)]\nmult=is_valid * 1, args=[r__0_0, 3686400 * c__0_0 * q__0_0 + 943718400 * c__0_0 * q__1_0 + 943718400 * c__1_0 * q__0_0 + 120 * b__2_0 + 30720 * b__3_0 + 3686400 * r__0_0 + 943718400 * r__1_0 - (120 * c__0_0 * q__2_0 + 120 * c__1_0 * q__1_0 + 120 * c__2_0 * q__0_0 + 30720 * c__0_0 * q__3_0 + 30720 * c__1_0 * q__2_0 + 30720 * c__2_0 * q__1_0 + 30720 * c__3_0 * q__0_0 + 7864320 * c__1_0 * q__3_0 + 7864320 * c__2_0 * q__2_0 + 7864320 * c__3_0 * q__1_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 3686400 * b__0_0 + 943718400 * b__1_0 + 120 * r__2_0 + 30720 * r__3_0)]\nmult=is_valid * 1, args=[r__1_0, 14400 * c__0_0 * q__0_0 + 3686400 * c__0_0 * q__1_0 + 3686400 * c__1_0 * q__0_0 + 943718400 * c__0_0 * q__2_0 + 943718400 * c__1_0 * q__1_0 + 943718400 * c__2_0 * q__0_0 + 120 * b__3_0 + 14400 * r__0_0 + 3686400 * r__1_0 + 943718400 * r__2_0 - (120 * c__0_0 * q__3_0 + 120 * c__1_0 * q__2_0 + 120 * c__2_0 * q__1_0 + 120 * c__3_0 * q__0_0 + 30720 * c__1_0 * q__3_0 + 30720 * c__2_0 * q__2_0 + 30720 * c__3_0 * q__1_0 + 30720 * c__0_0 * (255 * q_sign_0) + 7864320 * c__2_0 * q__3_0 + 7864320 * c__3_0 * q__2_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 14400 * b__0_0 + 3686400 * b__1_0 + 943718400 * b__2_0 + 120 * r__3_0)]\nmult=is_valid * 1, args=[r__2_0, 14400 * c__0_0 * q__1_0 + 14400 * c__1_0 * q__0_0 + 3686400 * c__0_0 * q__2_0 + 3686400 * c__1_0 * q__1_0 + 3686400 * c__2_0 * q__0_0 + 943718400 * c__0_0 * q__3_0 + 943718400 * c__1_0 * q__2_0 + 943718400 * c__2_0 * q__1_0 + 943718400 * c__3_0 * q__0_0 + 503316424 * b__0_0 + 14400 * r__1_0 + 3686400 * r__2_0 + 943718400 * r__3_0 - (503316424 * c__0_0 * q__0_0 + 120 * c__1_0 * q__3_0 + 120 * c__2_0 * q__2_0 + 120 * c__3_0 * q__1_0 + 120 * c__0_0 * (255 * q_sign_0) + 30720 * c__2_0 * q__3_0 + 30720 * c__3_0 * q__2_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 7864320 * c__3_0 * q__3_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 14400 * b__1_0 + 3686400 * b__2_0 + 943718400 * b__3_0 + 503316424 * r__0_0)]\nmult=is_valid * 1, args=[r__3_0, 14400 * c__0_0 * q__2_0 + 14400 * c__1_0 * q__1_0 + 14400 * c__2_0 * q__0_0 + 3686400 * c__0_0 * q__3_0 + 3686400 * c__1_0 * q__2_0 + 3686400 * c__2_0 * q__1_0 + 3686400 * c__3_0 * q__0_0 + 943718400 * c__1_0 * q__3_0 + 943718400 * c__2_0 * q__2_0 + 943718400 * c__3_0 * q__1_0 + 943718400 * c__0_0 * (255 * q_sign_0) + 442368000 * b__0_0 + 503316424 * b__1_0 + 14400 * r__2_0 + 3686400 * r__3_0 - (442368000 * c__0_0 * q__0_0 + 503316424 * c__0_0 * q__1_0 + 503316424 * c__1_0 * q__0_0 + 120 * c__2_0 * q__3_0 + 120 * c__3_0 * q__2_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * c__1_0 * (255 * q_sign_0) + 30720 * c__3_0 * q__3_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * c__2_0 * (255 * q_sign_0) + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * c__3_0 * (255 * q_sign_0) + 14400 * b__2_0 + 3686400 * b__3_0 + 442368000 * r__0_0 + 503316424 * r__1_0)]\n\n// Algebraic constraints:\n(zero_divisor_0 + r_zero_0) * (zero_divisor_0 + r_zero_0 - 1) = 0\nzero_divisor_0 * (zero_divisor_0 - 1) = 0\nzero_divisor_0 * (q__0_0 - 255) = 0\nzero_divisor_0 * (q__1_0 - 255) = 0\nzero_divisor_0 * (q__2_0 - 255) = 0\nzero_divisor_0 * (q__3_0 - 255) = 0\n(1 * is_valid - zero_divisor_0) * ((c__0_0 + c__1_0 + c__2_0 + c__3_0) * c_sum_inv_0 - 1) = 0\nr_zero_0 * (r_zero_0 - 1) = 0\n(1 * is_valid - (zero_divisor_0 + r_zero_0)) * ((r__0_0 + r__1_0 + r__2_0 + r__3_0) * r_sum_inv_0 - 1) = 0\nq_sign_0 * (q_sign_0 - 1) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) = 0\n(lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) * (c__3_0 - r__3_0) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (r__3_0 + lt_diff_0 - c__3_0) = 0\nlt_marker__2_0 * (lt_marker__2_0 - 1) = 0\n(lt_marker__0_0 + lt_marker__1_0) * (c__2_0 - r__2_0) = 0\nlt_marker__2_0 * (r__2_0 + lt_diff_0 - c__2_0) = 0\nlt_marker__1_0 * (lt_marker__1_0 - 1) = 0\nlt_marker__0_0 * (c__1_0 - r__1_0) = 0\nlt_marker__1_0 * (r__1_0 + lt_diff_0 - c__1_0) = 0\nlt_marker__0_0 * (lt_marker__0_0 - 1) = 0\nlt_marker__0_0 * (r__0_0 + lt_diff_0 - c__0_0) = 0\nq_sign_0 * (1 - zero_divisor_0) = 0\nzero_divisor_0 * (c__0_0 + c__1_0 + c__2_0 + c__3_0) = 0\nr_zero_0 * (r__0_0 + r__1_0 + r__2_0 + r__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadb.txt",
    "content": "Instructions:\n  0: LOADB rd_rs2_ptr = 8, rs1_ptr = 2, imm = 3, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 36 -> 25 (1.44x reduction)\n  - Bus interactions: 18 -> 17 (1.06x reduction)\n  - Constraints: 18 -> 6 (3.00x reduction)\n\nSymbolic machine using 25 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, opcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92160 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92161)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889279 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889278 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadb_imm0.txt",
    "content": "Instructions:\n  0: LOADB rd_rs2_ptr = 8, rs1_ptr = 2, imm = 0, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 36 -> 25 (1.44x reduction)\n  - Bus interactions: 18 -> 17 (1.06x reduction)\n  - Constraints: 18 -> 6 (3.00x reduction)\n\nSymbolic machine using 25 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, opcode_loadb_flag0_0 * shifted_read_data__0_0 + (1 - opcode_loadb_flag0_0) * shifted_read_data__1_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 1)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadb_x0.txt",
    "content": "Instructions:\n  0: LOADB rd_rs2_ptr = 0, rs1_ptr = 2, imm = 3, mem_as = 2, needs_write = 0, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 36 -> 19 (1.89x reduction)\n  - Bus interactions: 18 -> 13 (1.38x reduction)\n  - Constraints: 18 -> 6 (3.00x reduction)\n\nSymbolic machine using 19 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  opcode_loadb_flag0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 + opcode_loadb_flag0_0 - (2 * shift_most_sig_bit_0 + 1), shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__0_0 * opcode_loadb_flag0_0 + shifted_read_data__1_0 * (1 - opcode_loadb_flag0_0) - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 + 503316480 - (503316480 * mem_ptr_limbs__0_0 + 503316480 * opcode_loadb_flag0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0_0 * (opcode_loadb_flag0_0 - 1) = 0\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92160 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92161)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889279 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889278 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadbu.txt",
    "content": "Instructions:\n  0: LOADBU rd_rs2_ptr = 8, rs1_ptr = 2, imm = 21, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 41 -> 27 (1.52x reduction)\n  - Bus interactions: 17 -> 16 (1.06x reduction)\n  - Constraints: 25 -> 15 (1.67x reduction)\n\nSymbolic machine using 27 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  flags__0_0\n  flags__1_0\n  flags__2_0\n  flags__3_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  write_data__0_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, write_data__0_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316480 * flags__1_0 * flags__2_0 + 1006632960 * flags__0_0 * flags__2_0 + 1006632960 * flags__1_0 * flags__3_0 - (503316480 * flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1006632960 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316481 * flags__2_0 * flags__3_0 + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nflags__0_0 * ((flags__0_0 - 1) * (flags__0_0 - 2)) = 0\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\nflags__3_0 * ((flags__3_0 - 1) * (flags__3_0 - 2)) = 0\n(flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 1 * is_valid) * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1 * is_valid = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1)) * read_data__0_0 + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__1_0 + (1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) * read_data__2_0 + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0)) * read_data__0_0 + write_data__0_0 - (flags__0_0 * flags__2_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__0_0 = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1)) * read_data__1_0 + 1006632960 * flags__2_0 * (flags__2_0 - 1) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - flags__0_0 * flags__1_0) * read_data__1_0 - (flags__1_0 * flags__2_0 * read_data__0_0 + (flags__0_0 * flags__2_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__1_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__2_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__2_0 - ((flags__0_0 * flags__2_0 + flags__1_0 * flags__3_0) * read_data__0_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__2_0 * flags__3_0) * prev_data__2_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__3_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 - (flags__2_0 * flags__3_0 * read_data__0_0 + flags__0_0 * flags__2_0 * read_data__1_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0) * prev_data__3_0) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 645120 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 645121)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 314572810 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 314572811)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 4 * flags__0_0 * flags__1_0 + 4 * flags__0_0 * flags__2_0 + 5 * flags__0_0 * flags__3_0 + 5 * flags__1_0 * flags__2_0 + 5 * flags__1_0 * flags__3_0 + 5 * flags__2_0 * flags__3_0 - (1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1 * is_valid) = 0\nflags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadh.txt",
    "content": "Instructions:\n  0: LOADH rd_rs2_ptr = 8, rs1_ptr = 2, imm = 6, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 36 -> 24 (1.50x reduction)\n  - Bus interactions: 18 -> 17 (1.06x reduction)\n  - Constraints: 18 -> 5 (3.60x reduction)\n\nSymbolic machine using 24 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  shift_most_sig_bit_0\n  data_most_sig_bit_0\n  shifted_read_data__0_0\n  shifted_read_data__1_0\n  shifted_read_data__2_0\n  shifted_read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - 2 * shift_most_sig_bit_0, shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - 2 * shift_most_sig_bit_0, shift_most_sig_bit_0 * shifted_read_data__2_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__0_0, shift_most_sig_bit_0 * shifted_read_data__3_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__1_0, shift_most_sig_bit_0 * shifted_read_data__0_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__2_0, shift_most_sig_bit_0 * shifted_read_data__1_0 + (1 - shift_most_sig_bit_0) * shifted_read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, shifted_read_data__0_0, shifted_read_data__1_0, 255 * data_most_sig_bit_0, 255 * data_most_sig_bit_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[shifted_read_data__1_0 - 128 * data_most_sig_bit_0, 7]\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[1006632960 * shift_most_sig_bit_0 - 503316480 * mem_ptr_limbs__0_0, 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\ndata_most_sig_bit_0 * (data_most_sig_bit_0 - 1) = 0\nshift_most_sig_bit_0 * (shift_most_sig_bit_0 - 1) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 184320 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 184321)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 377487363 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 377487364)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadhu.txt",
    "content": "Instructions:\n  0: LOADHU rd_rs2_ptr = 0, rs1_ptr = 2, imm = 22, mem_as = 2, needs_write = 0, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 41 -> 18 (2.28x reduction)\n  - Bus interactions: 17 -> 12 (1.42x reduction)\n  - Constraints: 25 -> 9 (2.78x reduction)\n\nSymbolic machine using 18 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  flags__1_0\n  flags__2_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, 2 * flags__1_0 * (flags__1_0 + flags__2_0 - 2) + 3 * flags__2_0 * (flags__1_0 + flags__2_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, 2 * flags__1_0 * (flags__1_0 + flags__2_0 - 2) + 3 * flags__2_0 * (flags__1_0 + flags__2_0 - 2) + mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - flags__2_0 * (flags__2_0 - 1), read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__1_0 + flags__2_0 - 2) + 503316480 * flags__1_0 * flags__2_0 - (1006632960 * flags__1_0 * (flags__1_0 + flags__2_0 - 2) + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\n\n// Algebraic constraints:\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\n(flags__1_0 + flags__2_0 - 1 * is_valid) * (flags__1_0 + flags__2_0 - 2) = 0\n1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__1_0 + flags__2_0 - 2) + flags__2_0 * (flags__1_0 + flags__2_0 - 2) + 1 * is_valid = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 675840 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 675841)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 629145590 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 629145589 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 5 * flags__1_0 * flags__2_0 - (flags__1_0 * (flags__1_0 + flags__2_0 - 2) + flags__2_0 * (flags__1_0 + flags__2_0 - 2) + 2 * is_valid) = 0\nflags__1_0 * flags__2_0 = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_loadw.txt",
    "content": "Instructions:\n  0: LOADW rd_rs2_ptr = 8, rs1_ptr = 2, imm = 20, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 41 -> 22 (1.86x reduction)\n  - Bus interactions: 17 -> 16 (1.06x reduction)\n  - Constraints: 25 -> 3 (8.33x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614400 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 614401)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 754974711 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 754974710 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_mul.txt",
    "content": "Instructions:\n  0: MUL 8 7 5 1 0\n\nAPC advantage:\n  - Main columns: 31 -> 24 (1.29x reduction)\n  - Bus interactions: 19 -> 18 (1.06x reduction)\n  - Constraints: 4 -> 1 (4.00x reduction)\n\nSymbolic machine using 24 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid * 1, args=[a__0_0, 7864320 * a__0_0 - 7864320 * b__0_0 * c__0_0]\nmult=is_valid * 1, args=[a__1_0, 30720 * a__0_0 + 7864320 * a__1_0 - (30720 * b__0_0 * c__0_0 + 7864320 * b__0_0 * c__1_0 + 7864320 * b__1_0 * c__0_0)]\nmult=is_valid * 1, args=[a__2_0, 120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 - (120 * b__0_0 * c__0_0 + 30720 * b__0_0 * c__1_0 + 30720 * b__1_0 * c__0_0 + 7864320 * b__0_0 * c__2_0 + 7864320 * b__1_0 * c__1_0 + 7864320 * b__2_0 * c__0_0)]\nmult=is_valid * 1, args=[a__3_0, 943718400 * b__0_0 * c__0_0 + 120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 - (120 * b__0_0 * c__1_0 + 120 * b__1_0 * c__0_0 + 30720 * b__0_0 * c__2_0 + 30720 * b__1_0 * c__1_0 + 30720 * b__2_0 * c__0_0 + 7864320 * b__0_0 * c__3_0 + 7864320 * b__1_0 * c__2_0 + 7864320 * b__2_0 * c__1_0 + 7864320 * b__3_0 * c__0_0 + 943718400 * a__0_0)]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_rem.txt",
    "content": "Instructions:\n  0: REM 8 7 5 1 0\n\nAPC advantage:\n  - Main columns: 59 -> 48 (1.23x reduction)\n  - Bus interactions: 25 -> 24 (1.04x reduction)\n  - Constraints: 64 -> 45 (1.42x reduction)\n\nSymbolic machine using 48 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  q__0_0\n  q__1_0\n  q__2_0\n  q__3_0\n  r__0_0\n  r__1_0\n  r__2_0\n  r__3_0\n  zero_divisor_0\n  r_zero_0\n  b_sign_0\n  c_sign_0\n  q_sign_0\n  sign_xor_0\n  c_sum_inv_0\n  r_sum_inv_0\n  r_prime__0_0\n  r_prime__1_0\n  r_prime__2_0\n  r_prime__3_0\n  r_inv__0_0\n  r_inv__1_0\n  r_inv__2_0\n  r_inv__3_0\n  lt_marker__0_0\n  lt_marker__1_0\n  lt_marker__2_0\n  lt_diff_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, r__0_0, r__1_0, r__2_0, r__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * (1 - (zero_divisor_0 + r_zero_0)), args=[lt_diff_0 - 1, 0, 0, 0]\nmult=is_valid * 1, args=[2 * b__3_0 - 256 * b_sign_0, 2 * c__3_0 - 256 * c_sign_0, 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid * 1, args=[q__0_0, 7864320 * b__0_0 - (7864320 * c__0_0 * q__0_0 + 7864320 * r__0_0)]\nmult=is_valid * 1, args=[q__1_0, 30720 * b__0_0 + 7864320 * b__1_0 - (30720 * c__0_0 * q__0_0 + 7864320 * c__0_0 * q__1_0 + 7864320 * c__1_0 * q__0_0 + 30720 * r__0_0 + 7864320 * r__1_0)]\nmult=is_valid * 1, args=[q__2_0, 120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * c__0_0 * q__0_0 + 30720 * c__0_0 * q__1_0 + 30720 * c__1_0 * q__0_0 + 7864320 * c__0_0 * q__2_0 + 7864320 * c__1_0 * q__1_0 + 7864320 * c__2_0 * q__0_0 + 120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0)]\nmult=is_valid * 1, args=[q__3_0, 943718400 * c__0_0 * q__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * r__0_0 - (120 * c__0_0 * q__1_0 + 120 * c__1_0 * q__0_0 + 30720 * c__0_0 * q__2_0 + 30720 * c__1_0 * q__1_0 + 30720 * c__2_0 * q__0_0 + 7864320 * c__0_0 * q__3_0 + 7864320 * c__1_0 * q__2_0 + 7864320 * c__2_0 * q__1_0 + 7864320 * c__3_0 * q__0_0 + 943718400 * b__0_0 + 120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0)]\nmult=is_valid * 1, args=[r__0_0, 3686400 * c__0_0 * q__0_0 + 943718400 * c__0_0 * q__1_0 + 943718400 * c__1_0 * q__0_0 + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 120 * b__2_0 + 30720 * b__3_0 + 3686400 * r__0_0 + 943718400 * r__1_0 - (120 * c__0_0 * q__2_0 + 120 * c__1_0 * q__1_0 + 120 * c__2_0 * q__0_0 + 30720 * c__0_0 * q__3_0 + 30720 * c__1_0 * q__2_0 + 30720 * c__2_0 * q__1_0 + 30720 * c__3_0 * q__0_0 + 7864320 * c__1_0 * q__3_0 + 7864320 * c__2_0 * q__2_0 + 7864320 * c__3_0 * q__1_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 3686400 * b__0_0 + 943718400 * b__1_0 + 120 * r__2_0 + 30720 * r__3_0 + 7864321 * b_sign_0)]\nmult=is_valid * 1, args=[r__1_0, 14400 * c__0_0 * q__0_0 + 3686400 * c__0_0 * q__1_0 + 3686400 * c__1_0 * q__0_0 + 943718400 * c__0_0 * q__2_0 + 943718400 * c__1_0 * q__1_0 + 943718400 * c__2_0 * q__0_0 + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 120 * b__3_0 + 14400 * r__0_0 + 3686400 * r__1_0 + 943718400 * r__2_0 - (120 * c__0_0 * q__3_0 + 120 * c__1_0 * q__2_0 + 120 * c__2_0 * q__1_0 + 120 * c__3_0 * q__0_0 + 30720 * c__1_0 * q__3_0 + 30720 * c__2_0 * q__2_0 + 30720 * c__3_0 * q__1_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 7864320 * c__2_0 * q__3_0 + 7864320 * c__3_0 * q__2_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 14400 * b__0_0 + 3686400 * b__1_0 + 943718400 * b__2_0 + 120 * r__3_0 + 30721 * b_sign_0)]\nmult=is_valid * 1, args=[r__2_0, 14400 * c__0_0 * q__1_0 + 14400 * c__1_0 * q__0_0 + 3686400 * c__0_0 * q__2_0 + 3686400 * c__1_0 * q__1_0 + 3686400 * c__2_0 * q__0_0 + 943718400 * c__0_0 * q__3_0 + 943718400 * c__1_0 * q__2_0 + 943718400 * c__2_0 * q__1_0 + 943718400 * c__3_0 * q__0_0 + (120 * r_zero_0 - 120) * (255 * b_sign_0) + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 503316424 * b__0_0 + 14400 * r__1_0 + 3686400 * r__2_0 + 943718400 * r__3_0 - (503316424 * c__0_0 * q__0_0 + 120 * c__1_0 * q__3_0 + 120 * c__2_0 * q__2_0 + 120 * c__3_0 * q__1_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * q__0_0 * (255 * c_sign_0) + 30720 * c__2_0 * q__3_0 + 30720 * c__3_0 * q__2_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * q__1_0 * (255 * c_sign_0) + 7864320 * c__3_0 * q__3_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * q__2_0 * (255 * c_sign_0) + 14400 * b__1_0 + 3686400 * b__2_0 + 943718400 * b__3_0 + 503316424 * r__0_0 + 121 * b_sign_0)]\nmult=is_valid * 1, args=[r__3_0, 14400 * c__0_0 * q__2_0 + 14400 * c__1_0 * q__1_0 + 14400 * c__2_0 * q__0_0 + 3686400 * c__0_0 * q__3_0 + 3686400 * c__1_0 * q__2_0 + 3686400 * c__2_0 * q__1_0 + 3686400 * c__3_0 * q__0_0 + 943718400 * c__1_0 * q__3_0 + 943718400 * c__2_0 * q__2_0 + 943718400 * c__3_0 * q__1_0 + 943718400 * c__0_0 * (255 * q_sign_0) + 943718400 * q__0_0 * (255 * c_sign_0) + (943718400 - 943718400 * r_zero_0) * (255 * b_sign_0) + (120 * r_zero_0 - 120) * (255 * b_sign_0) + (30720 * r_zero_0 - 30720) * (255 * b_sign_0) + (7864320 * r_zero_0 - 7864320) * (255 * b_sign_0) + 442368000 * b__0_0 + 503316424 * b__1_0 + 14400 * r__2_0 + 3686400 * r__3_0 + 943718399 * b_sign_0 - (442368000 * c__0_0 * q__0_0 + 503316424 * c__0_0 * q__1_0 + 503316424 * c__1_0 * q__0_0 + 120 * c__2_0 * q__3_0 + 120 * c__3_0 * q__2_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * q__0_0 * (255 * c_sign_0) + 120 * c__1_0 * (255 * q_sign_0) + 120 * q__1_0 * (255 * c_sign_0) + 30720 * c__3_0 * q__3_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * q__0_0 * (255 * c_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * q__1_0 * (255 * c_sign_0) + 30720 * c__2_0 * (255 * q_sign_0) + 30720 * q__2_0 * (255 * c_sign_0) + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * q__0_0 * (255 * c_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * q__1_0 * (255 * c_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * q__2_0 * (255 * c_sign_0) + 7864320 * c__3_0 * (255 * q_sign_0) + 7864320 * q__3_0 * (255 * c_sign_0) + 14400 * b__2_0 + 3686400 * b__3_0 + 442368000 * r__0_0 + 503316424 * r__1_0)]\n\n// Algebraic constraints:\n(zero_divisor_0 + r_zero_0) * (zero_divisor_0 + r_zero_0 - 1) = 0\nzero_divisor_0 * (zero_divisor_0 - 1) = 0\nzero_divisor_0 * (q__0_0 - 255) = 0\nzero_divisor_0 * (q__1_0 - 255) = 0\nzero_divisor_0 * (q__2_0 - 255) = 0\nzero_divisor_0 * (q__3_0 - 255) = 0\n(1 * is_valid - zero_divisor_0) * ((c__0_0 + c__1_0 + c__2_0 + c__3_0) * c_sum_inv_0 - 1) = 0\nr_zero_0 * (r_zero_0 - 1) = 0\n(1 * is_valid - (zero_divisor_0 + r_zero_0)) * ((r__0_0 + r__1_0 + r__2_0 + r__3_0) * r_sum_inv_0 - 1) = 0\nb_sign_0 * (b_sign_0 - 1) = 0\nc_sign_0 * (c_sign_0 - 1) = 0\nb_sign_0 + c_sign_0 - (2 * b_sign_0 * c_sign_0 + sign_xor_0) = 0\nq_sign_0 * (q_sign_0 - 1) = 0\n(q__0_0 + q__1_0 + q__2_0 + q__3_0) * ((1 - zero_divisor_0) * (q_sign_0 - sign_xor_0)) = 0\n(q_sign_0 - sign_xor_0) * ((1 - zero_divisor_0) * q_sign_0) = 0\n(1 - sign_xor_0) * (r__0_0 - r_prime__0_0) = 0\nsign_xor_0 * ((7864320 * r__0_0 + 7864320 * r_prime__0_0) * (7864320 * r__0_0 + 7864320 * r_prime__0_0 + 1)) = 0\nsign_xor_0 * ((r_prime__0_0 - 256) * r_inv__0_0 - 1) = 0\nsign_xor_0 * ((7864320 * r__0_0 + 7864320 * r_prime__0_0 + 1) * r_prime__0_0) = 0\n(1 - sign_xor_0) * (r__1_0 - r_prime__1_0) = 0\nsign_xor_0 * ((7833600 * r__0_0 + 7833600 * r_prime__0_0 - (7864320 * r__1_0 + 7864320 * r_prime__1_0)) * (30720 * r__0_0 + 7864320 * r__1_0 + 30720 * r_prime__0_0 + 7864320 * r_prime__1_0 + 1)) = 0\nsign_xor_0 * ((r_prime__1_0 - 256) * r_inv__1_0 - 1) = 0\nsign_xor_0 * ((30720 * r__0_0 + 7864320 * r__1_0 + 30720 * r_prime__0_0 + 7864320 * r_prime__1_0 + 1) * r_prime__1_0) = 0\n(1 - sign_xor_0) * (r__2_0 - r_prime__2_0) = 0\nsign_xor_0 * ((30600 * r__0_0 + 7833600 * r__1_0 + 30600 * r_prime__0_0 + 7833600 * r_prime__1_0 - (7864320 * r__2_0 + 7864320 * r_prime__2_0)) * (120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0 + 120 * r_prime__0_0 + 30720 * r_prime__1_0 + 7864320 * r_prime__2_0 + 1)) = 0\nsign_xor_0 * ((r_prime__2_0 - 256) * r_inv__2_0 - 1) = 0\nsign_xor_0 * ((120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0 + 120 * r_prime__0_0 + 30720 * r_prime__1_0 + 7864320 * r_prime__2_0 + 1) * r_prime__2_0) = 0\n(1 - sign_xor_0) * (r__3_0 - r_prime__3_0) = 0\nsign_xor_0 * ((943718520 * r__0_0 + 30600 * r__1_0 + 7833600 * r__2_0 + 943718520 * r_prime__0_0 + 30600 * r_prime__1_0 + 7833600 * r_prime__2_0 - (7864320 * r__3_0 + 7864320 * r_prime__3_0)) * (943718400 * r__0_0 + 943718400 * r_prime__0_0 - (120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0 + 120 * r_prime__1_0 + 30720 * r_prime__2_0 + 7864320 * r_prime__3_0 + 1))) = 0\nsign_xor_0 * ((r_prime__3_0 - 256) * r_inv__3_0 - 1) = 0\nsign_xor_0 * ((120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0 + 120 * r_prime__1_0 + 30720 * r_prime__2_0 + 7864320 * r_prime__3_0 + 1 - (943718400 * r__0_0 + 943718400 * r_prime__0_0)) * r_prime__3_0) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) = 0\n(lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) * (r_prime__3_0 * (2 * c_sign_0 - 1) + c__3_0 * (1 - 2 * c_sign_0)) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (lt_diff_0 - (r_prime__3_0 * (2 * c_sign_0 - 1) + c__3_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__2_0 * (lt_marker__2_0 - 1) = 0\n(lt_marker__0_0 + lt_marker__1_0) * (r_prime__2_0 * (2 * c_sign_0 - 1) + c__2_0 * (1 - 2 * c_sign_0)) = 0\nlt_marker__2_0 * (lt_diff_0 - (r_prime__2_0 * (2 * c_sign_0 - 1) + c__2_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__1_0 * (lt_marker__1_0 - 1) = 0\nlt_marker__0_0 * (r_prime__1_0 * (2 * c_sign_0 - 1) + c__1_0 * (1 - 2 * c_sign_0)) = 0\nlt_marker__1_0 * (lt_diff_0 - (r_prime__1_0 * (2 * c_sign_0 - 1) + c__1_0 * (1 - 2 * c_sign_0))) = 0\nlt_marker__0_0 * (lt_marker__0_0 - 1) = 0\nlt_marker__0_0 * (lt_diff_0 - (r_prime__0_0 * (2 * c_sign_0 - 1) + c__0_0 * (1 - 2 * c_sign_0))) = 0\nzero_divisor_0 * (c__0_0 + c__1_0 + c__2_0 + c__3_0) = 0\nr_zero_0 * (r__0_0 + r__1_0 + r__2_0 + r__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_remu.txt",
    "content": "Instructions:\n  0: REMU 8 7 5 1 0\n\nAPC advantage:\n  - Main columns: 59 -> 37 (1.59x reduction)\n  - Bus interactions: 25 -> 23 (1.09x reduction)\n  - Constraints: 64 -> 25 (2.56x reduction)\n\nSymbolic machine using 37 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  q__0_0\n  q__1_0\n  q__2_0\n  q__3_0\n  r__0_0\n  r__1_0\n  r__2_0\n  r__3_0\n  zero_divisor_0\n  r_zero_0\n  q_sign_0\n  c_sum_inv_0\n  r_sum_inv_0\n  lt_marker__0_0\n  lt_marker__1_0\n  lt_marker__2_0\n  lt_diff_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, r__0_0, r__1_0, r__2_0, r__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * (1 - (zero_divisor_0 + r_zero_0)), args=[lt_diff_0 - 1, 0, 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid * 1, args=[q__0_0, 7864320 * b__0_0 - (7864320 * c__0_0 * q__0_0 + 7864320 * r__0_0)]\nmult=is_valid * 1, args=[q__1_0, 30720 * b__0_0 + 7864320 * b__1_0 - (30720 * c__0_0 * q__0_0 + 7864320 * c__0_0 * q__1_0 + 7864320 * c__1_0 * q__0_0 + 30720 * r__0_0 + 7864320 * r__1_0)]\nmult=is_valid * 1, args=[q__2_0, 120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * c__0_0 * q__0_0 + 30720 * c__0_0 * q__1_0 + 30720 * c__1_0 * q__0_0 + 7864320 * c__0_0 * q__2_0 + 7864320 * c__1_0 * q__1_0 + 7864320 * c__2_0 * q__0_0 + 120 * r__0_0 + 30720 * r__1_0 + 7864320 * r__2_0)]\nmult=is_valid * 1, args=[q__3_0, 943718400 * c__0_0 * q__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * r__0_0 - (120 * c__0_0 * q__1_0 + 120 * c__1_0 * q__0_0 + 30720 * c__0_0 * q__2_0 + 30720 * c__1_0 * q__1_0 + 30720 * c__2_0 * q__0_0 + 7864320 * c__0_0 * q__3_0 + 7864320 * c__1_0 * q__2_0 + 7864320 * c__2_0 * q__1_0 + 7864320 * c__3_0 * q__0_0 + 943718400 * b__0_0 + 120 * r__1_0 + 30720 * r__2_0 + 7864320 * r__3_0)]\nmult=is_valid * 1, args=[r__0_0, 3686400 * c__0_0 * q__0_0 + 943718400 * c__0_0 * q__1_0 + 943718400 * c__1_0 * q__0_0 + 120 * b__2_0 + 30720 * b__3_0 + 3686400 * r__0_0 + 943718400 * r__1_0 - (120 * c__0_0 * q__2_0 + 120 * c__1_0 * q__1_0 + 120 * c__2_0 * q__0_0 + 30720 * c__0_0 * q__3_0 + 30720 * c__1_0 * q__2_0 + 30720 * c__2_0 * q__1_0 + 30720 * c__3_0 * q__0_0 + 7864320 * c__1_0 * q__3_0 + 7864320 * c__2_0 * q__2_0 + 7864320 * c__3_0 * q__1_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 3686400 * b__0_0 + 943718400 * b__1_0 + 120 * r__2_0 + 30720 * r__3_0)]\nmult=is_valid * 1, args=[r__1_0, 14400 * c__0_0 * q__0_0 + 3686400 * c__0_0 * q__1_0 + 3686400 * c__1_0 * q__0_0 + 943718400 * c__0_0 * q__2_0 + 943718400 * c__1_0 * q__1_0 + 943718400 * c__2_0 * q__0_0 + 120 * b__3_0 + 14400 * r__0_0 + 3686400 * r__1_0 + 943718400 * r__2_0 - (120 * c__0_0 * q__3_0 + 120 * c__1_0 * q__2_0 + 120 * c__2_0 * q__1_0 + 120 * c__3_0 * q__0_0 + 30720 * c__1_0 * q__3_0 + 30720 * c__2_0 * q__2_0 + 30720 * c__3_0 * q__1_0 + 30720 * c__0_0 * (255 * q_sign_0) + 7864320 * c__2_0 * q__3_0 + 7864320 * c__3_0 * q__2_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 14400 * b__0_0 + 3686400 * b__1_0 + 943718400 * b__2_0 + 120 * r__3_0)]\nmult=is_valid * 1, args=[r__2_0, 14400 * c__0_0 * q__1_0 + 14400 * c__1_0 * q__0_0 + 3686400 * c__0_0 * q__2_0 + 3686400 * c__1_0 * q__1_0 + 3686400 * c__2_0 * q__0_0 + 943718400 * c__0_0 * q__3_0 + 943718400 * c__1_0 * q__2_0 + 943718400 * c__2_0 * q__1_0 + 943718400 * c__3_0 * q__0_0 + 503316424 * b__0_0 + 14400 * r__1_0 + 3686400 * r__2_0 + 943718400 * r__3_0 - (503316424 * c__0_0 * q__0_0 + 120 * c__1_0 * q__3_0 + 120 * c__2_0 * q__2_0 + 120 * c__3_0 * q__1_0 + 120 * c__0_0 * (255 * q_sign_0) + 30720 * c__2_0 * q__3_0 + 30720 * c__3_0 * q__2_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 7864320 * c__3_0 * q__3_0 + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 14400 * b__1_0 + 3686400 * b__2_0 + 943718400 * b__3_0 + 503316424 * r__0_0)]\nmult=is_valid * 1, args=[r__3_0, 14400 * c__0_0 * q__2_0 + 14400 * c__1_0 * q__1_0 + 14400 * c__2_0 * q__0_0 + 3686400 * c__0_0 * q__3_0 + 3686400 * c__1_0 * q__2_0 + 3686400 * c__2_0 * q__1_0 + 3686400 * c__3_0 * q__0_0 + 943718400 * c__1_0 * q__3_0 + 943718400 * c__2_0 * q__2_0 + 943718400 * c__3_0 * q__1_0 + 943718400 * c__0_0 * (255 * q_sign_0) + 442368000 * b__0_0 + 503316424 * b__1_0 + 14400 * r__2_0 + 3686400 * r__3_0 - (442368000 * c__0_0 * q__0_0 + 503316424 * c__0_0 * q__1_0 + 503316424 * c__1_0 * q__0_0 + 120 * c__2_0 * q__3_0 + 120 * c__3_0 * q__2_0 + 120 * c__0_0 * (255 * q_sign_0) + 120 * c__1_0 * (255 * q_sign_0) + 30720 * c__3_0 * q__3_0 + 30720 * c__0_0 * (255 * q_sign_0) + 30720 * c__1_0 * (255 * q_sign_0) + 30720 * c__2_0 * (255 * q_sign_0) + 7864320 * c__0_0 * (255 * q_sign_0) + 7864320 * c__1_0 * (255 * q_sign_0) + 7864320 * c__2_0 * (255 * q_sign_0) + 7864320 * c__3_0 * (255 * q_sign_0) + 14400 * b__2_0 + 3686400 * b__3_0 + 442368000 * r__0_0 + 503316424 * r__1_0)]\n\n// Algebraic constraints:\n(zero_divisor_0 + r_zero_0) * (zero_divisor_0 + r_zero_0 - 1) = 0\nzero_divisor_0 * (zero_divisor_0 - 1) = 0\nzero_divisor_0 * (q__0_0 - 255) = 0\nzero_divisor_0 * (q__1_0 - 255) = 0\nzero_divisor_0 * (q__2_0 - 255) = 0\nzero_divisor_0 * (q__3_0 - 255) = 0\n(1 * is_valid - zero_divisor_0) * ((c__0_0 + c__1_0 + c__2_0 + c__3_0) * c_sum_inv_0 - 1) = 0\nr_zero_0 * (r_zero_0 - 1) = 0\n(1 * is_valid - (zero_divisor_0 + r_zero_0)) * ((r__0_0 + r__1_0 + r__2_0 + r__3_0) * r_sum_inv_0 - 1) = 0\nq_sign_0 * (q_sign_0 - 1) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) = 0\n(lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0) * (c__3_0 - r__3_0) = 0\n(1 - (zero_divisor_0 + r_zero_0 + lt_marker__0_0 + lt_marker__1_0 + lt_marker__2_0)) * (r__3_0 + lt_diff_0 - c__3_0) = 0\nlt_marker__2_0 * (lt_marker__2_0 - 1) = 0\n(lt_marker__0_0 + lt_marker__1_0) * (c__2_0 - r__2_0) = 0\nlt_marker__2_0 * (r__2_0 + lt_diff_0 - c__2_0) = 0\nlt_marker__1_0 * (lt_marker__1_0 - 1) = 0\nlt_marker__0_0 * (c__1_0 - r__1_0) = 0\nlt_marker__1_0 * (r__1_0 + lt_diff_0 - c__1_0) = 0\nlt_marker__0_0 * (lt_marker__0_0 - 1) = 0\nlt_marker__0_0 * (r__0_0 + lt_diff_0 - c__0_0) = 0\nq_sign_0 * (1 - zero_divisor_0) = 0\nzero_divisor_0 * (c__0_0 + c__1_0 + c__2_0 + c__3_0) = 0\nr_zero_0 * (r__0_0 + r__1_0 + r__2_0 + r__3_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_sll.txt",
    "content": "Instructions:\n  0: SLL rd_ptr = 68, rs1_ptr = 40, rs2 = 3, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 53 -> 18 (2.94x reduction)\n  - Bus interactions: 24 -> 16 (1.50x reduction)\n  - Constraints: 76 -> 1 (76.00x reduction)\n\nSymbolic machine using 18 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 68, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 68, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[7864320 * a__0_0 - 62914560 * b__0_0, 3]\nmult=is_valid * 1, args=[30720 * a__0_0 + 7864320 * a__1_0 - (245760 * b__0_0 + 62914560 * b__1_0), 3]\nmult=is_valid * 1, args=[120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 - (960 * b__0_0 + 245760 * b__1_0 + 62914560 * b__2_0), 3]\nmult=is_valid * 1, args=[120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 - (943718400 * a__0_0 + 503316484 * b__0_0 + 960 * b__1_0 + 245760 * b__2_0 + 62914560 * b__3_0), 3]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_sll_by_8.txt",
    "content": "Instructions:\n  0: SLL rd_ptr = 68, rs1_ptr = 40, rs2 = 8, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 53 -> 14 (3.79x reduction)\n  - Bus interactions: 24 -> 10 (2.40x reduction)\n  - Constraints: 76 -> 1 (76.00x reduction)\n\nSymbolic machine using 14 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, a__1_0, a__2_0, a__3_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, a__1_0, a__2_0, a__3_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 68, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 68, 0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_sra.txt",
    "content": "Instructions:\n  0: SRA rd_ptr = 68, rs1_ptr = 40, rs2 = 3, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 53 -> 40 (1.32x reduction)\n  - Bus interactions: 24 -> 22 (1.09x reduction)\n  - Constraints: 76 -> 35 (2.17x reduction)\n\nSymbolic machine using 40 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  bit_multiplier_right_0\n  b_sign_0\n  bit_shift_marker__0_0\n  bit_shift_marker__1_0\n  bit_shift_marker__2_0\n  bit_shift_marker__3_0\n  bit_shift_marker__4_0\n  bit_shift_marker__5_0\n  bit_shift_marker__6_0\n  limb_shift_marker__0_0\n  limb_shift_marker__1_0\n  limb_shift_marker__2_0\n  bit_shift_carry__0_0\n  bit_shift_carry__1_0\n  bit_shift_carry__2_0\n  bit_shift_carry__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 3, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 3, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 68, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 68, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[bit_shift_carry__0_0, 7 - (7 * bit_shift_marker__0_0 + 6 * bit_shift_marker__1_0 + 5 * bit_shift_marker__2_0 + 4 * bit_shift_marker__3_0 + 3 * bit_shift_marker__4_0 + 2 * bit_shift_marker__5_0 + bit_shift_marker__6_0)]\nmult=is_valid * 1, args=[bit_shift_carry__1_0, 7 - (7 * bit_shift_marker__0_0 + 6 * bit_shift_marker__1_0 + 5 * bit_shift_marker__2_0 + 4 * bit_shift_marker__3_0 + 3 * bit_shift_marker__4_0 + 2 * bit_shift_marker__5_0 + bit_shift_marker__6_0)]\nmult=is_valid * 1, args=[bit_shift_carry__2_0, 7 - (7 * bit_shift_marker__0_0 + 6 * bit_shift_marker__1_0 + 5 * bit_shift_marker__2_0 + 4 * bit_shift_marker__3_0 + 3 * bit_shift_marker__4_0 + 2 * bit_shift_marker__5_0 + bit_shift_marker__6_0)]\nmult=is_valid * 1, args=[bit_shift_carry__3_0, 7 - (7 * bit_shift_marker__0_0 + 6 * bit_shift_marker__1_0 + 5 * bit_shift_marker__2_0 + 4 * bit_shift_marker__3_0 + 3 * bit_shift_marker__4_0 + 2 * bit_shift_marker__5_0 + bit_shift_marker__6_0)]\nmult=is_valid * 1, args=[503316481 * limb_shift_marker__0_0 - (62914560 * c__0_0 + 440401920 * bit_shift_marker__0_0 + 377487360 * bit_shift_marker__1_0 + 314572800 * bit_shift_marker__2_0 + 251658240 * bit_shift_marker__3_0 + 188743680 * bit_shift_marker__4_0 + 125829120 * bit_shift_marker__5_0 + 62914560 * bit_shift_marker__6_0 + 1006632960 * limb_shift_marker__1_0 + 503316480 * limb_shift_marker__2_0 + 62914561), 3]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[b__3_0, 128, b__3_0 + 128 - 256 * b_sign_0, 1]\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\nbit_shift_marker__0_0 * (bit_shift_marker__0_0 - 1) = 0\nbit_shift_marker__0_0 * (bit_multiplier_right_0 - 1) = 0\nbit_shift_marker__1_0 * (bit_shift_marker__1_0 - 1) = 0\nbit_shift_marker__1_0 * (bit_multiplier_right_0 - 2) = 0\nbit_shift_marker__2_0 * (bit_shift_marker__2_0 - 1) = 0\nbit_shift_marker__2_0 * (bit_multiplier_right_0 - 4) = 0\nbit_shift_marker__3_0 * (bit_shift_marker__3_0 - 1) = 0\nbit_shift_marker__3_0 * (bit_multiplier_right_0 - 8) = 0\nbit_shift_marker__4_0 * (bit_shift_marker__4_0 - 1) = 0\nbit_shift_marker__4_0 * (bit_multiplier_right_0 - 16) = 0\nbit_shift_marker__5_0 * (bit_shift_marker__5_0 - 1) = 0\nbit_shift_marker__5_0 * (bit_multiplier_right_0 - 32) = 0\nbit_shift_marker__6_0 * (bit_shift_marker__6_0 - 1) = 0\nbit_shift_marker__6_0 * (bit_multiplier_right_0 - 64) = 0\n(1 - (bit_shift_marker__0_0 + bit_shift_marker__1_0 + bit_shift_marker__2_0 + bit_shift_marker__3_0 + bit_shift_marker__4_0 + bit_shift_marker__5_0 + bit_shift_marker__6_0)) * (bit_shift_marker__0_0 + bit_shift_marker__1_0 + bit_shift_marker__2_0 + bit_shift_marker__3_0 + bit_shift_marker__4_0 + bit_shift_marker__5_0 + bit_shift_marker__6_0) = 0\n(1 * is_valid - (bit_shift_marker__0_0 + bit_shift_marker__1_0 + bit_shift_marker__2_0 + bit_shift_marker__3_0 + bit_shift_marker__4_0 + bit_shift_marker__5_0 + bit_shift_marker__6_0)) * (bit_multiplier_right_0 - 128) = 0\nlimb_shift_marker__0_0 * (limb_shift_marker__0_0 - 1) = 0\nlimb_shift_marker__0_0 * (a__0_0 * bit_multiplier_right_0 + bit_shift_carry__0_0 - (b__0_0 + 256 * bit_shift_carry__1_0)) = 0\nlimb_shift_marker__0_0 * (a__1_0 * bit_multiplier_right_0 + bit_shift_carry__1_0 - (b__1_0 + 256 * bit_shift_carry__2_0)) = 0\nlimb_shift_marker__0_0 * (a__2_0 * bit_multiplier_right_0 + bit_shift_carry__2_0 - (b__2_0 + 256 * bit_shift_carry__3_0)) = 0\nlimb_shift_marker__0_0 * (a__3_0 * bit_multiplier_right_0 + bit_shift_carry__3_0 - (256 * b_sign_0 * (bit_multiplier_right_0 - 1) + b__3_0)) = 0\nlimb_shift_marker__1_0 * (limb_shift_marker__1_0 - 1) = 0\nlimb_shift_marker__1_0 * (a__0_0 * bit_multiplier_right_0 + bit_shift_carry__1_0 - (b__1_0 + 256 * bit_shift_carry__2_0)) = 0\nlimb_shift_marker__1_0 * (a__1_0 * bit_multiplier_right_0 + bit_shift_carry__2_0 - (b__2_0 + 256 * bit_shift_carry__3_0)) = 0\nlimb_shift_marker__1_0 * (a__2_0 * bit_multiplier_right_0 + bit_shift_carry__3_0 - (256 * b_sign_0 * (bit_multiplier_right_0 - 1) + b__3_0)) = 0\nlimb_shift_marker__2_0 * (limb_shift_marker__2_0 - 1) = 0\nlimb_shift_marker__2_0 * (a__0_0 * bit_multiplier_right_0 + bit_shift_carry__2_0 - (b__2_0 + 256 * bit_shift_carry__3_0)) = 0\nlimb_shift_marker__2_0 * (a__1_0 * bit_multiplier_right_0 + bit_shift_carry__3_0 - (256 * b_sign_0 * (bit_multiplier_right_0 - 1) + b__3_0)) = 0\n(1 - (limb_shift_marker__0_0 + limb_shift_marker__1_0 + limb_shift_marker__2_0)) * (limb_shift_marker__0_0 + limb_shift_marker__1_0 + limb_shift_marker__2_0) = 0\n(1 - (limb_shift_marker__0_0 + limb_shift_marker__1_0 + limb_shift_marker__2_0)) * (a__0_0 * bit_multiplier_right_0 + bit_shift_carry__3_0 - (256 * b_sign_0 * (bit_multiplier_right_0 - 1) + b__3_0)) = 0\n(1 - (limb_shift_marker__0_0 + limb_shift_marker__1_0 + limb_shift_marker__2_0)) * (a__1_0 - 255 * b_sign_0) = 0\nb_sign_0 * (b_sign_0 - 1) = 0\n(a__2_0 - 255 * b_sign_0) * (1 - (limb_shift_marker__0_0 + limb_shift_marker__1_0)) = 0\n(a__3_0 - 255 * b_sign_0) * (1 - limb_shift_marker__0_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_srl.txt",
    "content": "Instructions:\n  0: SRL rd_ptr = 68, rs1_ptr = 40, rs2 = 25, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 53 -> 15 (3.53x reduction)\n  - Bus interactions: 24 -> 11 (2.18x reduction)\n  - Constraints: 76 -> 2 (38.00x reduction)\n\nSymbolic machine using 15 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 40, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 68, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 68, a__0_0, 0, 0, 0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, 0, 0, 0]\n\n// Algebraic constraints:\n(b__3_0 - 2 * a__0_0) * (b__3_0 - (2 * a__0_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_storeb.txt",
    "content": "Instructions:\n  0: STOREB rd_rs2_ptr = 8, rs1_ptr = 2, imm = 3, mem_as = 2, needs_write = 1, imm_sign = 0\n\nAPC advantage:\n  - Main columns: 41 -> 30 (1.37x reduction)\n  - Bus interactions: 17 -> 16 (1.06x reduction)\n  - Constraints: 25 -> 15 (1.67x reduction)\n\nSymbolic machine using 30 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  flags__0_0\n  flags__1_0\n  flags__2_0\n  flags__3_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  write_data__0_0\n  write_data__1_0\n  write_data__2_0\n  write_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0), prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__0_0 * flags__2_0 + 2 * flags__1_0 * flags__3_0 + 3 * flags__2_0 * flags__3_0), write_data__0_0, write_data__1_0, write_data__2_0, write_data__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316480 * flags__1_0 * flags__2_0 + 1006632960 * flags__0_0 * flags__2_0 + 1006632960 * flags__1_0 * flags__3_0 - (503316480 * flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 1006632960 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 503316481 * flags__2_0 * flags__3_0 + 503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nflags__0_0 * ((flags__0_0 - 1) * (flags__0_0 - 2)) = 0\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\nflags__3_0 * ((flags__3_0 - 1) * (flags__3_0 - 2)) = 0\n(flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 1 * is_valid) * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__3_0 * (flags__3_0 - 1)) * read_data__0_0 + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__1_0 + (1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) * read_data__2_0 + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0)) * read_data__0_0 + write_data__0_0 - (flags__0_0 * flags__2_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__0_0 = 0\n(1006632960 * flags__0_0 * (flags__0_0 - 1) + 1006632960 * flags__1_0 * (flags__1_0 - 1)) * read_data__1_0 + 1006632960 * flags__2_0 * (flags__2_0 - 1) * read_data__3_0 + (flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) - flags__0_0 * flags__1_0) * read_data__1_0 + write_data__1_0 - (flags__1_0 * flags__2_0 * read_data__0_0 + (flags__0_0 * flags__2_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__3_0 + flags__2_0 * flags__3_0) * prev_data__1_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__2_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__2_0 + write_data__2_0 - ((flags__0_0 * flags__2_0 + flags__1_0 * flags__3_0) * read_data__0_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__2_0 * flags__3_0) * prev_data__2_0) = 0\n1006632960 * flags__0_0 * (flags__0_0 - 1) * read_data__3_0 + flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) * read_data__3_0 + write_data__3_0 - (flags__2_0 * flags__3_0 * read_data__0_0 + flags__0_0 * flags__2_0 * read_data__1_0 + (flags__0_0 * flags__1_0 + flags__0_0 * flags__3_0 + flags__1_0 * flags__2_0 + flags__1_0 * flags__3_0) * prev_data__3_0) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92160 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 92161)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889279 * is_valid - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 + 817889278 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 4 * flags__0_0 * flags__1_0 + 4 * flags__0_0 * flags__2_0 + 5 * flags__0_0 * flags__3_0 + 5 * flags__1_0 * flags__2_0 + 5 * flags__1_0 * flags__3_0 + 5 * flags__2_0 * flags__3_0 - (1006632960 * flags__3_0 * (flags__3_0 - 1) + flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__3_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 5 * is_valid) = 0\nflags__2_0 * (flags__2_0 - 1) - (flags__0_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 2 * flags__1_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2) + 3 * flags__2_0 * (flags__0_0 + flags__1_0 + flags__2_0 + flags__3_0 - 2)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_storeh.txt",
    "content": "Instructions:\n  0: STOREH rd_rs2_ptr = 8, rs1_ptr = 2, imm = 6, mem_as = 2, needs_write = 1, imm_sign = 1\n\nAPC advantage:\n  - Main columns: 41 -> 28 (1.46x reduction)\n  - Bus interactions: 17 -> 16 (1.06x reduction)\n  - Constraints: 25 -> 13 (1.92x reduction)\n\nSymbolic machine using 28 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  flags__1_0\n  flags__2_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  write_data__0_0\n  write_data__1_0\n  write_data__2_0\n  write_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__2_0), prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0 - (flags__1_0 * flags__2_0 + 2 * flags__2_0), write_data__0_0, write_data__1_0, write_data__2_0, write_data__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[503316480 * flags__2_0 * (flags__2_0 - 1) + 503316481 * flags__2_0 * (flags__1_0 + flags__2_0 - 1) + 503316480 * flags__1_0 * flags__2_0 + 503316480 * flags__2_0 + 503316480 - (1006632960 * flags__1_0 * (flags__1_0 + flags__2_0 - 1) + 503316480 * mem_ptr_limbs__0_0 + 503316480 * flags__1_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\nflags__1_0 * ((flags__1_0 - 1) * (flags__1_0 - 2)) = 0\nflags__2_0 * ((flags__2_0 - 1) * (flags__2_0 - 2)) = 0\n(flags__1_0 + flags__2_0) * (flags__1_0 + flags__2_0 - 1) = 0\n1006632960 * flags__1_0 * (flags__1_0 - 1) + 1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__1_0 + flags__2_0 - 1) + flags__2_0 * (flags__1_0 + flags__2_0 - 1) + flags__1_0 + flags__2_0 - 1 * is_valid = 0\n1006632960 * flags__1_0 * (flags__1_0 - 1) * read_data__0_0 + (flags__1_0 + flags__2_0 - 1) * read_data__1_0 + (1006632960 * flags__2_0 * (flags__2_0 - 1) + flags__1_0 * (flags__1_0 + flags__2_0 - 1)) * read_data__2_0 + flags__2_0 * (flags__1_0 + flags__2_0 - 1) * read_data__3_0 + write_data__0_0 - (flags__1_0 * read_data__0_0 + (flags__1_0 * flags__2_0 + flags__2_0) * prev_data__0_0) = 0\n1006632960 * flags__1_0 * (flags__1_0 - 1) * read_data__1_0 + 1006632960 * flags__2_0 * (flags__2_0 - 1) * read_data__3_0 + write_data__1_0 - (flags__1_0 * flags__2_0 * read_data__0_0 + flags__1_0 * read_data__1_0 + flags__2_0 * prev_data__1_0) = 0\nwrite_data__2_0 - (flags__2_0 * read_data__0_0 + (flags__1_0 * flags__2_0 + flags__1_0) * prev_data__2_0) = 0\nwrite_data__3_0 - (flags__2_0 * read_data__1_0 + (flags__1_0 * flags__2_0 + flags__1_0) * prev_data__3_0) = 0\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 184320 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 184321)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 377456642 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 377456643)) = 0\nflags__1_0 * (flags__1_0 - 1) + flags__2_0 * (flags__2_0 - 1) + 5 * flags__1_0 * flags__2_0 + 3 * flags__1_0 + 3 * flags__2_0 - (flags__1_0 * (flags__1_0 + flags__2_0 - 1) + flags__2_0 * (flags__1_0 + flags__2_0 - 1) + 3 * is_valid) = 0\nflags__2_0 * (flags__2_0 - 1) + 1 * is_valid - (2 * flags__1_0 * (flags__1_0 + flags__2_0 - 1) + 3 * flags__2_0 * (flags__1_0 + flags__2_0 - 1) + flags__1_0 + flags__2_0) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_storew.txt",
    "content": "Instructions:\n  0: STOREW rd_rs2_ptr = 8, rs1_ptr = 2, imm = 4, mem_as = 2, needs_write = 1, imm_sign = 1\n\nAPC advantage:\n  - Main columns: 41 -> 22 (1.86x reduction)\n  - Bus interactions: 17 -> 16 (1.06x reduction)\n  - Constraints: 25 -> 3 (8.33x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  rs1_data__0_0\n  rs1_data__1_0\n  rs1_data__2_0\n  rs1_data__3_0\n  rs1_aux_cols__base__prev_timestamp_0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0\n  read_data_aux__base__prev_timestamp_0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  mem_ptr_limbs__0_0\n  mem_ptr_limbs__1_0\n  write_base_aux__prev_timestamp_0\n  write_base_aux__timestamp_lt_aux__lower_decomp__0_0\n  read_data__0_0\n  read_data__1_0\n  read_data__2_0\n  read_data__3_0\n  prev_data__0_0\n  prev_data__1_0\n  prev_data__2_0\n  prev_data__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, rs1_aux_cols__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 2, rs1_data__0_0, rs1_data__1_0, rs1_data__2_0, rs1_data__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, read_data_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, prev_data__0_0, prev_data__1_0, prev_data__2_0, prev_data__3_0, write_base_aux__prev_timestamp_0]\nmult=is_valid * 1, args=[2, mem_ptr_limbs__0_0 + 65536 * mem_ptr_limbs__1_0, read_data__0_0, read_data__1_0, read_data__2_0, read_data__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * rs1_aux_cols__base__prev_timestamp_0 + 15360 * rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[-(503316480 * mem_ptr_limbs__0_0), 14]\nmult=is_valid * 1, args=[mem_ptr_limbs__1_0, 13]\nmult=is_valid * 1, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * read_data_aux__base__prev_timestamp_0 + 15360 * read_data_aux__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * write_base_aux__prev_timestamp_0 + 15360 * write_base_aux__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Algebraic constraints:\n(30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 122880 * is_valid)) * (30720 * mem_ptr_limbs__0_0 - (30720 * rs1_data__0_0 + 7864320 * rs1_data__1_0 + 122881)) = 0\n(943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 251627521 * is_valid)) * (943718400 * rs1_data__0_0 + 30720 * mem_ptr_limbs__1_0 - (120 * rs1_data__1_0 + 30720 * rs1_data__2_0 + 7864320 * rs1_data__3_0 + 943718400 * mem_ptr_limbs__0_0 + 251627522)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_sub.txt",
    "content": "Instructions:\n  0: SUB rd_ptr = 8, rs1_ptr = 7, rs2 = 5, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 36 -> 24 (1.50x reduction)\n  - Bus interactions: 20 -> 16 (1.25x reduction)\n  - Constraints: 22 -> 5 (4.40x reduction)\n\nSymbolic machine using 24 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_0, a__1_0, 0, 0]\nmult=is_valid * 1, args=[a__2_0, a__3_0, 0, 0]\n\n// Algebraic constraints:\n(7864320 * b__0_0 - (7864320 * a__0_0 + 7864320 * c__0_0)) * (7864320 * b__0_0 - (7864320 * a__0_0 + 7864320 * c__0_0 + 1)) = 0\n(30720 * b__0_0 + 7864320 * b__1_0 - (30720 * a__0_0 + 7864320 * a__1_0 + 30720 * c__0_0 + 7864320 * c__1_0)) * (30720 * b__0_0 + 7864320 * b__1_0 - (30720 * a__0_0 + 7864320 * a__1_0 + 30720 * c__0_0 + 7864320 * c__1_0 + 1)) = 0\n(120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 120 * c__0_0 + 30720 * c__1_0 + 7864320 * c__2_0)) * (120 * b__0_0 + 30720 * b__1_0 + 7864320 * b__2_0 - (120 * a__0_0 + 30720 * a__1_0 + 7864320 * a__2_0 + 120 * c__0_0 + 30720 * c__1_0 + 7864320 * c__2_0 + 1)) = 0\n(943718400 * a__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * c__0_0 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718400 * b__0_0 + 120 * c__1_0 + 30720 * c__2_0 + 7864320 * c__3_0)) * (943718400 * a__0_0 + 120 * b__1_0 + 30720 * b__2_0 + 7864320 * b__3_0 + 943718400 * c__0_0 - (120 * a__1_0 + 30720 * a__2_0 + 7864320 * a__3_0 + 943718400 * b__0_0 + 120 * c__1_0 + 30720 * c__2_0 + 7864320 * c__3_0 + 1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/single_instructions/single_xor.txt",
    "content": "Instructions:\n  0: XOR rd_ptr = 8, rs1_ptr = 7, rs2 = 5, rs2_as = 1\n\nAPC advantage:\n  - Main columns: 36 -> 24 (1.50x reduction)\n  - Bus interactions: 20 -> 18 (1.11x reduction)\n  - Constraints: 22 -> 1 (22.00x reduction)\n\nSymbolic machine using 24 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  b__0_0\n  b__1_0\n  b__2_0\n  b__3_0\n  c__0_0\n  c__1_0\n  c__2_0\n  c__3_0\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[4, from_state__timestamp_0 + 3]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 7, b__0_0, b__1_0, b__2_0, b__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 5, c__0_0, c__1_0, c__2_0, c__3_0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 8, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0 + 2]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[b__0_0, c__0_0, a__0_0, 1]\nmult=is_valid * 1, args=[b__1_0, c__1_0, a__1_0, 1]\nmult=is_valid * 1, args=[b__2_0, c__2_0, a__2_0, 1]\nmult=is_valid * 1, args=[b__3_0, c__3_0, a__3_0, 1]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/superblocks/beq0_fallthrough.txt",
    "content": "Instructions:\n  0: BEQ 8 0 40 1 1\n  4: ADD rd_ptr = 9, rs1_ptr = 9, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 62 -> 21 (2.95x reduction)\n  - Bus interactions: 31 -> 16 (1.94x reduction)\n  - Constraints: 33 -> 6 (5.50x reduction)\n\nSymbolic machine using 21 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  a__0_0\n  a__1_0\n  a__2_0\n  a__3_0\n  reads_aux__0__base__prev_timestamp_1\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1\n  writes_aux__prev_data__0_1\n  writes_aux__prev_data__1_1\n  writes_aux__prev_data__2_1\n  writes_aux__prev_data__3_1\n  a__0_1\n  a__1_1\n  a__2_1\n  a__3_1\n  free_var_64\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[8, from_state__timestamp_0 + 5]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, a__0_0, a__1_0, a__2_0, a__3_0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 9, writes_aux__prev_data__0_1, writes_aux__prev_data__1_1, writes_aux__prev_data__2_1, writes_aux__prev_data__3_1, reads_aux__0__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 9, a__0_1, a__1_1, a__2_1, a__3_1, from_state__timestamp_0 + 4]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_1 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_1, a__1_1, 0, 0]\nmult=is_valid * 1, args=[a__2_1, a__3_1, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_1 - (7864320 * writes_aux__prev_data__0_1 + 7864320 * is_valid)) * (7864320 * a__0_1 - (7864320 * writes_aux__prev_data__0_1 + 7864321)) = 0\n(30720 * a__0_1 + 7864320 * a__1_1 - (30720 * writes_aux__prev_data__0_1 + 7864320 * writes_aux__prev_data__1_1 + 30720 * is_valid)) * (30720 * a__0_1 + 7864320 * a__1_1 - (30720 * writes_aux__prev_data__0_1 + 7864320 * writes_aux__prev_data__1_1 + 30721)) = 0\n(120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * writes_aux__prev_data__0_1 + 30720 * writes_aux__prev_data__1_1 + 7864320 * writes_aux__prev_data__2_1 + 120 * is_valid)) * (120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * writes_aux__prev_data__0_1 + 30720 * writes_aux__prev_data__1_1 + 7864320 * writes_aux__prev_data__2_1 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_1 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_1 + 30720 * writes_aux__prev_data__2_1 + 7864320 * writes_aux__prev_data__3_1 + 943718400 * a__0_1)) * (943718400 * writes_aux__prev_data__0_1 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718399 - (120 * writes_aux__prev_data__1_1 + 30720 * writes_aux__prev_data__2_1 + 7864320 * writes_aux__prev_data__3_1 + 943718400 * a__0_1)) = 0\nfree_var_64 * (a__0_0 + a__1_0 + a__2_0 + a__3_0) - 1 * is_valid = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/superblocks/beq0_jump.txt",
    "content": "Instructions:\n   0: BEQ 8 0 40 1 1\n  40: ADD rd_ptr = 9, rs1_ptr = 9, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 62 -> 16 (3.88x reduction)\n  - Bus interactions: 31 -> 16 (1.94x reduction)\n  - Constraints: 33 -> 5 (6.60x reduction)\n\nSymbolic machine using 16 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__1__base__prev_timestamp_0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0\n  reads_aux__0__base__prev_timestamp_1\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1\n  writes_aux__prev_data__0_1\n  writes_aux__prev_data__1_1\n  writes_aux__prev_data__2_1\n  writes_aux__prev_data__3_1\n  a__0_1\n  a__1_1\n  a__2_1\n  a__3_1\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[44, from_state__timestamp_0 + 5]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 8, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 8, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__1__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0 + 1]\nmult=is_valid * -1, args=[1, 9, writes_aux__prev_data__0_1, writes_aux__prev_data__1_1, writes_aux__prev_data__2_1, writes_aux__prev_data__3_1, reads_aux__0__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 9, a__0_1, a__1_1, a__2_1, a__3_1, from_state__timestamp_0 + 4]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_0 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_0 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_1 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 15360), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_1, a__1_1, 0, 0]\nmult=is_valid * 1, args=[a__2_1, a__3_1, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_1 - (7864320 * writes_aux__prev_data__0_1 + 7864320 * is_valid)) * (7864320 * a__0_1 - (7864320 * writes_aux__prev_data__0_1 + 7864321)) = 0\n(30720 * a__0_1 + 7864320 * a__1_1 - (30720 * writes_aux__prev_data__0_1 + 7864320 * writes_aux__prev_data__1_1 + 30720 * is_valid)) * (30720 * a__0_1 + 7864320 * a__1_1 - (30720 * writes_aux__prev_data__0_1 + 7864320 * writes_aux__prev_data__1_1 + 30721)) = 0\n(120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * writes_aux__prev_data__0_1 + 30720 * writes_aux__prev_data__1_1 + 7864320 * writes_aux__prev_data__2_1 + 120 * is_valid)) * (120 * a__0_1 + 30720 * a__1_1 + 7864320 * a__2_1 - (120 * writes_aux__prev_data__0_1 + 30720 * writes_aux__prev_data__1_1 + 7864320 * writes_aux__prev_data__2_1 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_1 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_1 + 30720 * writes_aux__prev_data__2_1 + 7864320 * writes_aux__prev_data__3_1 + 943718400 * a__0_1)) * (943718400 * writes_aux__prev_data__0_1 + 120 * a__1_1 + 30720 * a__2_1 + 7864320 * a__3_1 + 943718399 - (120 * writes_aux__prev_data__1_1 + 30720 * writes_aux__prev_data__2_1 + 7864320 * writes_aux__prev_data__3_1 + 943718400 * a__0_1)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/superblocks/beq_fallthrough.txt",
    "content": "Instructions:\n  0: ADD rd_ptr = 10, rs1_ptr = 0, rs2 = 33, rs2_as = 0\n  4: BEQ 8 10 40 1 1\n  8: ADD rd_ptr = 9, rs1_ptr = 9, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 98 -> 27 (3.63x reduction)\n  - Bus interactions: 51 -> 20 (2.55x reduction)\n  - Constraints: 55 -> 6 (9.17x reduction)\n\nSymbolic machine using 27 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  reads_aux__0__base__prev_timestamp_1\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1\n  a__0_1\n  a__1_1\n  a__2_1\n  a__3_1\n  reads_aux__0__base__prev_timestamp_2\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__prev_data__0_2\n  writes_aux__prev_data__1_2\n  writes_aux__prev_data__2_2\n  writes_aux__prev_data__3_2\n  a__0_2\n  a__1_2\n  a__2_2\n  a__3_2\n  free_var_103\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[12, from_state__timestamp_0 + 8]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 10, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 8, a__0_1, a__1_1, a__2_1, a__3_1, reads_aux__0__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 8, a__0_1, a__1_1, a__2_1, a__3_1, from_state__timestamp_0 + 3]\nmult=is_valid * 1, args=[1, 10, 33, 0, 0, 0, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[1, 9, writes_aux__prev_data__0_2, writes_aux__prev_data__1_2, writes_aux__prev_data__2_2, writes_aux__prev_data__3_2, reads_aux__0__base__prev_timestamp_2]\nmult=is_valid * 1, args=[1, 9, a__0_2, a__1_2, a__2_2, a__3_2, from_state__timestamp_0 + 7]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_1 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 30720), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_2 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_2, a__1_2, 0, 0]\nmult=is_valid * 1, args=[a__2_2, a__3_2, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_2 - (7864320 * writes_aux__prev_data__0_2 + 7864320 * is_valid)) * (7864320 * a__0_2 - (7864320 * writes_aux__prev_data__0_2 + 7864321)) = 0\n(30720 * a__0_2 + 7864320 * a__1_2 - (30720 * writes_aux__prev_data__0_2 + 7864320 * writes_aux__prev_data__1_2 + 30720 * is_valid)) * (30720 * a__0_2 + 7864320 * a__1_2 - (30720 * writes_aux__prev_data__0_2 + 7864320 * writes_aux__prev_data__1_2 + 30721)) = 0\n(120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * writes_aux__prev_data__0_2 + 30720 * writes_aux__prev_data__1_2 + 7864320 * writes_aux__prev_data__2_2 + 120 * is_valid)) * (120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * writes_aux__prev_data__0_2 + 30720 * writes_aux__prev_data__1_2 + 7864320 * writes_aux__prev_data__2_2 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_2 + 120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_2 + 30720 * writes_aux__prev_data__2_2 + 7864320 * writes_aux__prev_data__3_2 + 943718400 * a__0_2)) * (943718400 * writes_aux__prev_data__0_2 + 120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718399 - (120 * writes_aux__prev_data__1_2 + 30720 * writes_aux__prev_data__2_2 + 7864320 * writes_aux__prev_data__3_2 + 943718400 * a__0_2)) = 0\nfree_var_103 * ((a__0_1 - 33) * (a__0_1 - 33) + a__1_1 + a__2_1 + a__3_1) - 1 * is_valid = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/superblocks/beq_jump.txt",
    "content": "Instructions:\n   0: ADD rd_ptr = 10, rs1_ptr = 0, rs2 = 33, rs2_as = 0\n   4: BEQ 8 10 40 1 1\n  44: ADD rd_ptr = 9, rs1_ptr = 9, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 98 -> 22 (4.45x reduction)\n  - Bus interactions: 51 -> 20 (2.55x reduction)\n  - Constraints: 55 -> 5 (11.00x reduction)\n\nSymbolic machine using 22 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  reads_aux__0__base__prev_timestamp_1\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1\n  reads_aux__0__base__prev_timestamp_2\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__prev_data__0_2\n  writes_aux__prev_data__1_2\n  writes_aux__prev_data__2_2\n  writes_aux__prev_data__3_2\n  a__0_2\n  a__1_2\n  a__2_2\n  a__3_2\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[48, from_state__timestamp_0 + 8]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 10, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * -1, args=[1, 8, 33, 0, 0, 0, reads_aux__0__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 8, 33, 0, 0, 0, from_state__timestamp_0 + 3]\nmult=is_valid * 1, args=[1, 10, 33, 0, 0, 0, from_state__timestamp_0 + 4]\nmult=is_valid * -1, args=[1, 9, writes_aux__prev_data__0_2, writes_aux__prev_data__1_2, writes_aux__prev_data__2_2, writes_aux__prev_data__3_2, reads_aux__0__base__prev_timestamp_2]\nmult=is_valid * 1, args=[1, 9, a__0_2, a__1_2, a__2_2, a__3_2, from_state__timestamp_0 + 7]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_1 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 30720), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_2 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 61440), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_2, a__1_2, 0, 0]\nmult=is_valid * 1, args=[a__2_2, a__3_2, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_2 - (7864320 * writes_aux__prev_data__0_2 + 7864320 * is_valid)) * (7864320 * a__0_2 - (7864320 * writes_aux__prev_data__0_2 + 7864321)) = 0\n(30720 * a__0_2 + 7864320 * a__1_2 - (30720 * writes_aux__prev_data__0_2 + 7864320 * writes_aux__prev_data__1_2 + 30720 * is_valid)) * (30720 * a__0_2 + 7864320 * a__1_2 - (30720 * writes_aux__prev_data__0_2 + 7864320 * writes_aux__prev_data__1_2 + 30721)) = 0\n(120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * writes_aux__prev_data__0_2 + 30720 * writes_aux__prev_data__1_2 + 7864320 * writes_aux__prev_data__2_2 + 120 * is_valid)) * (120 * a__0_2 + 30720 * a__1_2 + 7864320 * a__2_2 - (120 * writes_aux__prev_data__0_2 + 30720 * writes_aux__prev_data__1_2 + 7864320 * writes_aux__prev_data__2_2 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_2 + 120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_2 + 30720 * writes_aux__prev_data__2_2 + 7864320 * writes_aux__prev_data__3_2 + 943718400 * a__0_2)) * (943718400 * writes_aux__prev_data__0_2 + 120 * a__1_2 + 30720 * a__2_2 + 7864320 * a__3_2 + 943718399 - (120 * writes_aux__prev_data__1_2 + 30720 * writes_aux__prev_data__2_2 + 7864320 * writes_aux__prev_data__3_2 + 943718400 * a__0_2)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/apc_snapshots/superblocks/many_blocks.txt",
    "content": "Instructions:\n   0: ADD rd_ptr = 10, rs1_ptr = 0, rs2 = 10, rs2_as = 0\n   4: BNE 10 11 100 1 1\n   8: ADD rd_ptr = 12, rs1_ptr = 11, rs2 = 5, rs2_as = 0\n  12: BEQ 8 12 60 1 1\n  72: ADD rd_ptr = 9, rs1_ptr = 9, rs2 = 1, rs2_as = 0\n\nAPC advantage:\n  - Main columns: 160 -> 30 (5.33x reduction)\n  - Bus interactions: 82 -> 28 (2.93x reduction)\n  - Constraints: 88 -> 5 (17.60x reduction)\n\nSymbolic machine using 30 unique main columns:\n  from_state__timestamp_0\n  reads_aux__0__base__prev_timestamp_0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__base__prev_timestamp_0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_0\n  writes_aux__prev_data__0_0\n  writes_aux__prev_data__1_0\n  writes_aux__prev_data__2_0\n  writes_aux__prev_data__3_0\n  reads_aux__1__base__prev_timestamp_1\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_1\n  writes_aux__base__prev_timestamp_2\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0_2\n  writes_aux__prev_data__0_2\n  writes_aux__prev_data__1_2\n  writes_aux__prev_data__2_2\n  writes_aux__prev_data__3_2\n  reads_aux__0__base__prev_timestamp_3\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_3\n  reads_aux__0__base__prev_timestamp_4\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4\n  writes_aux__prev_data__0_4\n  writes_aux__prev_data__1_4\n  writes_aux__prev_data__2_4\n  writes_aux__prev_data__3_4\n  a__0_4\n  a__1_4\n  a__2_4\n  a__3_4\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=is_valid * -1, args=[0, from_state__timestamp_0]\nmult=is_valid * 1, args=[76, from_state__timestamp_0 + 13]\n\n// Bus 1 (MEMORY):\nmult=is_valid * -1, args=[1, 0, 0, 0, 0, 0, reads_aux__0__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 0, 0, 0, 0, 0, from_state__timestamp_0]\nmult=is_valid * -1, args=[1, 10, writes_aux__prev_data__0_0, writes_aux__prev_data__1_0, writes_aux__prev_data__2_0, writes_aux__prev_data__3_0, writes_aux__base__prev_timestamp_0]\nmult=is_valid * 1, args=[1, 10, 10, 0, 0, 0, from_state__timestamp_0 + 3]\nmult=is_valid * -1, args=[1, 11, 10, 0, 0, 0, reads_aux__1__base__prev_timestamp_1]\nmult=is_valid * 1, args=[1, 11, 10, 0, 0, 0, from_state__timestamp_0 + 5]\nmult=is_valid * -1, args=[1, 12, writes_aux__prev_data__0_2, writes_aux__prev_data__1_2, writes_aux__prev_data__2_2, writes_aux__prev_data__3_2, writes_aux__base__prev_timestamp_2]\nmult=is_valid * -1, args=[1, 8, 15, 0, 0, 0, reads_aux__0__base__prev_timestamp_3]\nmult=is_valid * 1, args=[1, 8, 15, 0, 0, 0, from_state__timestamp_0 + 8]\nmult=is_valid * 1, args=[1, 12, 15, 0, 0, 0, from_state__timestamp_0 + 9]\nmult=is_valid * -1, args=[1, 9, writes_aux__prev_data__0_4, writes_aux__prev_data__1_4, writes_aux__prev_data__2_4, writes_aux__prev_data__3_4, reads_aux__0__base__prev_timestamp_4]\nmult=is_valid * 1, args=[1, 9, a__0_4, a__1_4, a__2_4, a__3_4, from_state__timestamp_0 + 12]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_0 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_0 + 15360 - 15360 * from_state__timestamp_0, 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_0, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_0 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_0 - (15360 * from_state__timestamp_0 + 15360), 12]\nmult=is_valid * 1, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_1, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__1__base__prev_timestamp_1 + 15360 * reads_aux__1__base__timestamp_lt_aux__lower_decomp__0_1 - (15360 * from_state__timestamp_0 + 46080), 12]\nmult=is_valid * 1, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0_2, 17]\nmult=is_valid * 1, args=[15360 * writes_aux__base__prev_timestamp_2 + 15360 * writes_aux__base__timestamp_lt_aux__lower_decomp__0_2 - (15360 * from_state__timestamp_0 + 92160), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_3, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_3 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_3 - (15360 * from_state__timestamp_0 + 107520), 12]\nmult=is_valid * 1, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4, 17]\nmult=is_valid * 1, args=[15360 * reads_aux__0__base__prev_timestamp_4 + 15360 * reads_aux__0__base__timestamp_lt_aux__lower_decomp__0_4 - (15360 * from_state__timestamp_0 + 138240), 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid * 1, args=[a__0_4, a__1_4, 0, 0]\nmult=is_valid * 1, args=[a__2_4, a__3_4, 0, 0]\n\n// Algebraic constraints:\n(7864320 * a__0_4 - (7864320 * writes_aux__prev_data__0_4 + 7864320 * is_valid)) * (7864320 * a__0_4 - (7864320 * writes_aux__prev_data__0_4 + 7864321)) = 0\n(30720 * a__0_4 + 7864320 * a__1_4 - (30720 * writes_aux__prev_data__0_4 + 7864320 * writes_aux__prev_data__1_4 + 30720 * is_valid)) * (30720 * a__0_4 + 7864320 * a__1_4 - (30720 * writes_aux__prev_data__0_4 + 7864320 * writes_aux__prev_data__1_4 + 30721)) = 0\n(120 * a__0_4 + 30720 * a__1_4 + 7864320 * a__2_4 - (120 * writes_aux__prev_data__0_4 + 30720 * writes_aux__prev_data__1_4 + 7864320 * writes_aux__prev_data__2_4 + 120 * is_valid)) * (120 * a__0_4 + 30720 * a__1_4 + 7864320 * a__2_4 - (120 * writes_aux__prev_data__0_4 + 30720 * writes_aux__prev_data__1_4 + 7864320 * writes_aux__prev_data__2_4 + 121)) = 0\n(943718400 * writes_aux__prev_data__0_4 + 120 * a__1_4 + 30720 * a__2_4 + 7864320 * a__3_4 + 943718400 * is_valid - (120 * writes_aux__prev_data__1_4 + 30720 * writes_aux__prev_data__2_4 + 7864320 * writes_aux__prev_data__3_4 + 943718400 * a__0_4)) * (943718400 * writes_aux__prev_data__0_4 + 120 * a__1_4 + 30720 * a__2_4 + 7864320 * a__3_4 + 943718399 - (120 * writes_aux__prev_data__1_4 + 30720 * writes_aux__prev_data__2_4 + 7864320 * writes_aux__prev_data__3_4 + 943718400 * a__0_4)) = 0\nis_valid * (is_valid - 1) = 0"
  },
  {
    "path": "openvm-riscv/tests/common/mod.rs",
    "content": "use openvm_instructions::instruction::Instruction;\nuse openvm_sdk::config::SdkVmConfig;\nuse openvm_stark_sdk::p3_baby_bear::BabyBear;\nuse powdr_autoprecompiles::blocks::SuperBlock;\nuse powdr_openvm::extraction_utils::OriginalVmConfig;\nuse powdr_openvm::test_utils;\nuse powdr_openvm_riscv::{ExtendedVmConfig, RiscvISA};\nuse powdr_openvm_riscv_hints_circuit::HintsExtension;\nuse std::path::Path;\n\npub fn original_vm_config() -> OriginalVmConfig<RiscvISA> {\n    let sdk_vm_config = SdkVmConfig::builder()\n        .system(Default::default())\n        .rv32i(Default::default())\n        .rv32m(Default::default())\n        .io(Default::default())\n        .build();\n\n    let ext_vm_config = ExtendedVmConfig {\n        sdk: sdk_vm_config,\n        hints: HintsExtension,\n    };\n    OriginalVmConfig::new(ext_vm_config)\n}\n\npub mod apc_builder_utils {\n    use super::*;\n\n    // This code is not dead, but somehow the compiler thinks so.\n    #[allow(dead_code)]\n    pub fn compile(superblock: SuperBlock<Instruction<BabyBear>>) -> String {\n        let original_config = original_vm_config();\n        test_utils::compile_apc::<RiscvISA>(&original_config, superblock)\n    }\n\n    // This code is not dead, but somehow the compiler thinks so.\n    #[allow(dead_code)]\n    pub fn assert_machine_output(\n        program: SuperBlock<Instruction<BabyBear>>,\n        module_name: &str,\n        test_name: &str,\n    ) {\n        let snapshot_dir = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n            .join(\"tests\")\n            .join(\"apc_snapshots\");\n        let original_config = original_vm_config();\n        test_utils::assert_apc_machine_output::<RiscvISA>(\n            &original_config,\n            program,\n            &snapshot_dir,\n            module_name,\n            test_name,\n        );\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/tests/machine_extraction.rs",
    "content": "use std::{fs, io, path::Path};\nmod common;\n\nuse itertools::Itertools;\nuse powdr_openvm_riscv::DEFAULT_DEGREE_BOUND;\nuse pretty_assertions::assert_eq;\n\nuse crate::common::original_vm_config;\n\n#[test]\nfn extract_machine() {\n    let original_config = original_vm_config();\n    let airs = original_config.airs(DEFAULT_DEGREE_BOUND).unwrap();\n    let bus_map = original_config.bus_map();\n    let rendered = airs\n        .airs_by_name()\n        .map(|(machine_name, air)| format!(\"# {machine_name}\\n{}\", air.render(&bus_map)))\n        .join(\"\\n\\n\\n\");\n\n    let path = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n        .join(\"tests\")\n        .join(\"openvm_constraints.txt\");\n    match fs::read_to_string(&path) {\n        // Snapshot exists, compare it with the extracted constraints\n        Ok(expected) => {\n            assert_eq!(rendered, expected)\n        }\n\n        // Snapshot does not exist, create it\n        Err(err) if err.kind() == io::ErrorKind::NotFound => {\n            if let Some(parent) = path.parent() {\n                fs::create_dir_all(parent).unwrap();\n            }\n            fs::write(&path, &rendered).unwrap();\n            panic!(\"Created new snapshot at {path:?}. Inspect it, then rerun the tests.\");\n        }\n\n        Err(_) => panic!(),\n    }\n}\n"
  },
  {
    "path": "openvm-riscv/tests/openvm_constraints.txt",
    "content": "# VmAirWrapper<Rv32BaseAluAdapterAir, BaseAluCoreAir<4, 8>\nSymbolic machine using 36 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2\n  rs2_as\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  opcode_add_flag\n  opcode_sub_flag\n  opcode_xor_flag\n  opcode_or_flag\n  opcode_and_flag\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag), args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * (0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag), args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[1, rd_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[from_state__pc, 512 + (0 + opcode_add_flag * 0 + opcode_sub_flag * 1 + opcode_xor_flag * 2 + opcode_or_flag * 3 + opcode_and_flag * 4), rd_ptr, rs1_ptr, rs2, 1, rs2_as, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[(1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__0 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * b__0, (1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__0 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * c__0, opcode_xor_flag * a__0 + opcode_or_flag * (2 * a__0 - b__0 - c__0) + opcode_and_flag * (b__0 + c__0 - 2 * a__0), 1]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[(1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__1 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * b__1, (1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__1 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * c__1, opcode_xor_flag * a__1 + opcode_or_flag * (2 * a__1 - b__1 - c__1) + opcode_and_flag * (b__1 + c__1 - 2 * a__1), 1]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[(1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__2 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * b__2, (1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__2 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * c__2, opcode_xor_flag * a__2 + opcode_or_flag * (2 * a__2 - b__2 - c__2) + opcode_and_flag * (b__2 + c__2 - 2 * a__2), 1]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag, args=[(1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__3 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * b__3, (1 - (opcode_xor_flag + opcode_or_flag + opcode_and_flag)) * a__3 + (opcode_xor_flag + opcode_or_flag + opcode_and_flag) * c__3, opcode_xor_flag * a__3 + opcode_or_flag * (2 * a__3 - b__3 - c__3) + opcode_and_flag * (b__3 + c__3 - 2 * a__3), 1]\nmult=0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag - rs2_as, args=[c__0, c__1, 0, 0]\n\n// Algebraic constraints:\nopcode_add_flag * (opcode_add_flag - 1) = 0\nopcode_sub_flag * (opcode_sub_flag - 1) = 0\nopcode_xor_flag * (opcode_xor_flag - 1) = 0\nopcode_or_flag * (opcode_or_flag - 1) = 0\nopcode_and_flag * (opcode_and_flag - 1) = 0\n(0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag) * (0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag - 1) = 0\nopcode_add_flag * (2005401601 * (b__0 + c__0 - a__0 + 0) * (2005401601 * (b__0 + c__0 - a__0 + 0) - 1)) = 0\nopcode_sub_flag * (2005401601 * (a__0 + c__0 - b__0 + 0) * (2005401601 * (a__0 + c__0 - b__0 + 0) - 1)) = 0\nopcode_add_flag * (2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0)) * (2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0)) - 1)) = 0\nopcode_sub_flag * (2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0)) * (2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0)) - 1)) = 0\nopcode_add_flag * (2005401601 * (b__2 + c__2 - a__2 + 2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0))) * (2005401601 * (b__2 + c__2 - a__2 + 2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0))) - 1)) = 0\nopcode_sub_flag * (2005401601 * (a__2 + c__2 - b__2 + 2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0))) * (2005401601 * (a__2 + c__2 - b__2 + 2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0))) - 1)) = 0\nopcode_add_flag * (2005401601 * (b__3 + c__3 - a__3 + 2005401601 * (b__2 + c__2 - a__2 + 2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0)))) * (2005401601 * (b__3 + c__3 - a__3 + 2005401601 * (b__2 + c__2 - a__2 + 2005401601 * (b__1 + c__1 - a__1 + 2005401601 * (b__0 + c__0 - a__0 + 0)))) - 1)) = 0\nopcode_sub_flag * (2005401601 * (a__3 + c__3 - b__3 + 2005401601 * (a__2 + c__2 - b__2 + 2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0)))) * (2005401601 * (a__3 + c__3 - b__3 + 2005401601 * (a__2 + c__2 - b__2 + 2005401601 * (a__1 + c__1 - b__1 + 2005401601 * (a__0 + c__0 - b__0 + 0)))) - 1)) = 0\nrs2_as * (rs2_as - 1) = 0\n(1 - rs2_as) * (rs2 - (c__0 + c__1 * 256 + c__2 * 65536)) = 0\n(1 - rs2_as) * (c__2 - c__3) = 0\n(1 - rs2_as) * (c__2 * (255 - c__2)) = 0\n(0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nrs2_as * (0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag - 1) = 0\nrs2_as * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_add_flag + opcode_sub_flag + opcode_xor_flag + opcode_or_flag + opcode_and_flag) * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32BaseAluAdapterAir, LessThanCoreAir<4, 8>\nSymbolic machine using 37 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2\n  rs2_as\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  cmp_result\n  opcode_slt_flag\n  opcode_sltu_flag\n  b_msb_f\n  c_msb_f\n  diff_marker__0\n  diff_marker__1\n  diff_marker__2\n  diff_marker__3\n  diff_val\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_slt_flag + opcode_sltu_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_slt_flag + opcode_sltu_flag), args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * (0 + opcode_slt_flag + opcode_sltu_flag), args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[1, rd_ptr, cmp_result, 0, 0, 0, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[from_state__pc, 0 + opcode_slt_flag * 0 + opcode_sltu_flag * 1 + 520, rd_ptr, rs1_ptr, rs2, 1, rs2_as, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=0 + opcode_slt_flag + opcode_sltu_flag, args=[b_msb_f + 128 * opcode_slt_flag, c_msb_f + 128 * opcode_slt_flag, 0, 0]\nmult=0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0, args=[diff_val - 1, 0, 0, 0]\nmult=0 + opcode_slt_flag + opcode_sltu_flag - rs2_as, args=[c__0, c__1, 0, 0]\n\n// Algebraic constraints:\nopcode_slt_flag * (opcode_slt_flag - 1) = 0\nopcode_sltu_flag * (opcode_sltu_flag - 1) = 0\n(0 + opcode_slt_flag + opcode_sltu_flag) * (0 + opcode_slt_flag + opcode_sltu_flag - 1) = 0\ncmp_result * (cmp_result - 1) = 0\n(b__3 - b_msb_f) * (256 - (b__3 - b_msb_f)) = 0\n(c__3 - c_msb_f) * (256 - (c__3 - c_msb_f)) = 0\ndiff_marker__3 * (diff_marker__3 - 1) = 0\n(1 - (0 + diff_marker__3)) * ((c_msb_f - b_msb_f) * (2 * cmp_result - 1)) = 0\ndiff_marker__3 * (diff_val - (c_msb_f - b_msb_f) * (2 * cmp_result - 1)) = 0\ndiff_marker__2 * (diff_marker__2 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2)) * ((c__2 - b__2) * (2 * cmp_result - 1)) = 0\ndiff_marker__2 * (diff_val - (c__2 - b__2) * (2 * cmp_result - 1)) = 0\ndiff_marker__1 * (diff_marker__1 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1)) * ((c__1 - b__1) * (2 * cmp_result - 1)) = 0\ndiff_marker__1 * (diff_val - (c__1 - b__1) * (2 * cmp_result - 1)) = 0\ndiff_marker__0 * (diff_marker__0 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0)) * ((c__0 - b__0) * (2 * cmp_result - 1)) = 0\ndiff_marker__0 * (diff_val - (c__0 - b__0) * (2 * cmp_result - 1)) = 0\n(0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0) * (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0)) * cmp_result = 0\nrs2_as * (rs2_as - 1) = 0\n(1 - rs2_as) * (rs2 - (c__0 + c__1 * 256 + c__2 * 65536)) = 0\n(1 - rs2_as) * (c__2 - c__3) = 0\n(1 - rs2_as) * (c__2 * (255 - c__2)) = 0\n(0 + opcode_slt_flag + opcode_sltu_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nrs2_as * (0 + opcode_slt_flag + opcode_sltu_flag - 1) = 0\nrs2_as * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_slt_flag + opcode_sltu_flag) * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32BaseAluAdapterAir, ShiftCoreAir<4, 8>\nSymbolic machine using 53 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2\n  rs2_as\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  opcode_sll_flag\n  opcode_srl_flag\n  opcode_sra_flag\n  bit_multiplier_left\n  bit_multiplier_right\n  b_sign\n  bit_shift_marker__0\n  bit_shift_marker__1\n  bit_shift_marker__2\n  bit_shift_marker__3\n  bit_shift_marker__4\n  bit_shift_marker__5\n  bit_shift_marker__6\n  bit_shift_marker__7\n  limb_shift_marker__0\n  limb_shift_marker__1\n  limb_shift_marker__2\n  limb_shift_marker__3\n  bit_shift_carry__0\n  bit_shift_carry__1\n  bit_shift_carry__2\n  bit_shift_carry__3\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag), args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=rs2_as, args=[rs2_as, rs2, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * (0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag), args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[1, rd_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[from_state__pc, 517 + (0 + opcode_sll_flag * 0 + opcode_srl_flag * 1 + opcode_sra_flag * 2), rd_ptr, rs1_ptr, rs2, 1, rs2_as, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[(c__0 - (0 + 0 * limb_shift_marker__0 + 1 * limb_shift_marker__1 + 2 * limb_shift_marker__2 + 3 * limb_shift_marker__3) * 8 - (0 + 0 * bit_shift_marker__0 + 1 * bit_shift_marker__1 + 2 * bit_shift_marker__2 + 3 * bit_shift_marker__3 + 4 * bit_shift_marker__4 + 5 * bit_shift_marker__5 + 6 * bit_shift_marker__6 + 7 * bit_shift_marker__7)) * 1950351361, 3]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[bit_shift_carry__0, 0 + 0 * bit_shift_marker__0 + 1 * bit_shift_marker__1 + 2 * bit_shift_marker__2 + 3 * bit_shift_marker__3 + 4 * bit_shift_marker__4 + 5 * bit_shift_marker__5 + 6 * bit_shift_marker__6 + 7 * bit_shift_marker__7]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[bit_shift_carry__1, 0 + 0 * bit_shift_marker__0 + 1 * bit_shift_marker__1 + 2 * bit_shift_marker__2 + 3 * bit_shift_marker__3 + 4 * bit_shift_marker__4 + 5 * bit_shift_marker__5 + 6 * bit_shift_marker__6 + 7 * bit_shift_marker__7]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[bit_shift_carry__2, 0 + 0 * bit_shift_marker__0 + 1 * bit_shift_marker__1 + 2 * bit_shift_marker__2 + 3 * bit_shift_marker__3 + 4 * bit_shift_marker__4 + 5 * bit_shift_marker__5 + 6 * bit_shift_marker__6 + 7 * bit_shift_marker__7]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[bit_shift_carry__3, 0 + 0 * bit_shift_marker__0 + 1 * bit_shift_marker__1 + 2 * bit_shift_marker__2 + 3 * bit_shift_marker__3 + 4 * bit_shift_marker__4 + 5 * bit_shift_marker__5 + 6 * bit_shift_marker__6 + 7 * bit_shift_marker__7]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=rs2_as, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=opcode_sra_flag, args=[b__3, 128, b__3 + 128 - 2 * (b_sign * 128), 1]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[a__0, a__1, 0, 0]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag, args=[a__2, a__3, 0, 0]\nmult=0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag - rs2_as, args=[c__0, c__1, 0, 0]\n\n// Algebraic constraints:\nopcode_sll_flag * (opcode_sll_flag - 1) = 0\nopcode_srl_flag * (opcode_srl_flag - 1) = 0\nopcode_sra_flag * (opcode_sra_flag - 1) = 0\n(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag) * (0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag - 1) = 0\nbit_shift_marker__0 * (bit_shift_marker__0 - 1) = 0\nbit_shift_marker__0 * (bit_multiplier_left - 1 * opcode_sll_flag) = 0\nbit_shift_marker__0 * (bit_multiplier_right - 1 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__1 * (bit_shift_marker__1 - 1) = 0\nbit_shift_marker__1 * (bit_multiplier_left - 2 * opcode_sll_flag) = 0\nbit_shift_marker__1 * (bit_multiplier_right - 2 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__2 * (bit_shift_marker__2 - 1) = 0\nbit_shift_marker__2 * (bit_multiplier_left - 4 * opcode_sll_flag) = 0\nbit_shift_marker__2 * (bit_multiplier_right - 4 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__3 * (bit_shift_marker__3 - 1) = 0\nbit_shift_marker__3 * (bit_multiplier_left - 8 * opcode_sll_flag) = 0\nbit_shift_marker__3 * (bit_multiplier_right - 8 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__4 * (bit_shift_marker__4 - 1) = 0\nbit_shift_marker__4 * (bit_multiplier_left - 16 * opcode_sll_flag) = 0\nbit_shift_marker__4 * (bit_multiplier_right - 16 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__5 * (bit_shift_marker__5 - 1) = 0\nbit_shift_marker__5 * (bit_multiplier_left - 32 * opcode_sll_flag) = 0\nbit_shift_marker__5 * (bit_multiplier_right - 32 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__6 * (bit_shift_marker__6 - 1) = 0\nbit_shift_marker__6 * (bit_multiplier_left - 64 * opcode_sll_flag) = 0\nbit_shift_marker__6 * (bit_multiplier_right - 64 * (opcode_srl_flag + opcode_sra_flag)) = 0\nbit_shift_marker__7 * (bit_shift_marker__7 - 1) = 0\nbit_shift_marker__7 * (bit_multiplier_left - 128 * opcode_sll_flag) = 0\nbit_shift_marker__7 * (bit_multiplier_right - 128 * (opcode_srl_flag + opcode_sra_flag)) = 0\n(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag) * (0 + bit_shift_marker__0 + bit_shift_marker__1 + bit_shift_marker__2 + bit_shift_marker__3 + bit_shift_marker__4 + bit_shift_marker__5 + bit_shift_marker__6 + bit_shift_marker__7 - 1) = 0\nlimb_shift_marker__0 * (limb_shift_marker__0 - 1) = 0\nlimb_shift_marker__0 * (a__0 * opcode_sll_flag - (0 + b__0 * bit_multiplier_left - 256 * bit_shift_carry__0 * opcode_sll_flag)) = 0\nlimb_shift_marker__0 * (a__0 * bit_multiplier_right - (bit_shift_carry__1 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__0 - bit_shift_carry__0))) = 0\nlimb_shift_marker__0 * (a__1 * opcode_sll_flag - (bit_shift_carry__0 * opcode_sll_flag + b__1 * bit_multiplier_left - 256 * bit_shift_carry__1 * opcode_sll_flag)) = 0\nlimb_shift_marker__0 * (a__1 * bit_multiplier_right - (bit_shift_carry__2 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__1 - bit_shift_carry__1))) = 0\nlimb_shift_marker__0 * (a__2 * opcode_sll_flag - (bit_shift_carry__1 * opcode_sll_flag + b__2 * bit_multiplier_left - 256 * bit_shift_carry__2 * opcode_sll_flag)) = 0\nlimb_shift_marker__0 * (a__2 * bit_multiplier_right - (bit_shift_carry__3 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__2 - bit_shift_carry__2))) = 0\nlimb_shift_marker__0 * (a__3 * opcode_sll_flag - (bit_shift_carry__2 * opcode_sll_flag + b__3 * bit_multiplier_left - 256 * bit_shift_carry__3 * opcode_sll_flag)) = 0\nlimb_shift_marker__0 * (a__3 * bit_multiplier_right - (b_sign * (bit_multiplier_right - 1) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__3 - bit_shift_carry__3))) = 0\nlimb_shift_marker__1 * (limb_shift_marker__1 - 1) = 0\nlimb_shift_marker__1 * (a__0 * opcode_sll_flag) = 0\nlimb_shift_marker__1 * (a__0 * bit_multiplier_right - (bit_shift_carry__2 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__1 - bit_shift_carry__1))) = 0\nlimb_shift_marker__1 * (a__1 * opcode_sll_flag - (0 + b__0 * bit_multiplier_left - 256 * bit_shift_carry__0 * opcode_sll_flag)) = 0\nlimb_shift_marker__1 * (a__1 * bit_multiplier_right - (bit_shift_carry__3 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__2 - bit_shift_carry__2))) = 0\nlimb_shift_marker__1 * (a__2 * opcode_sll_flag - (bit_shift_carry__0 * opcode_sll_flag + b__1 * bit_multiplier_left - 256 * bit_shift_carry__1 * opcode_sll_flag)) = 0\nlimb_shift_marker__1 * (a__2 * bit_multiplier_right - (b_sign * (bit_multiplier_right - 1) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__3 - bit_shift_carry__3))) = 0\nlimb_shift_marker__1 * (a__3 * opcode_sll_flag - (bit_shift_carry__1 * opcode_sll_flag + b__2 * bit_multiplier_left - 256 * bit_shift_carry__2 * opcode_sll_flag)) = 0\nlimb_shift_marker__1 * (a__3 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\nlimb_shift_marker__2 * (limb_shift_marker__2 - 1) = 0\nlimb_shift_marker__2 * (a__0 * opcode_sll_flag) = 0\nlimb_shift_marker__2 * (a__0 * bit_multiplier_right - (bit_shift_carry__3 * (opcode_srl_flag + opcode_sra_flag) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__2 - bit_shift_carry__2))) = 0\nlimb_shift_marker__2 * (a__1 * opcode_sll_flag) = 0\nlimb_shift_marker__2 * (a__1 * bit_multiplier_right - (b_sign * (bit_multiplier_right - 1) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__3 - bit_shift_carry__3))) = 0\nlimb_shift_marker__2 * (a__2 * opcode_sll_flag - (0 + b__0 * bit_multiplier_left - 256 * bit_shift_carry__0 * opcode_sll_flag)) = 0\nlimb_shift_marker__2 * (a__2 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\nlimb_shift_marker__2 * (a__3 * opcode_sll_flag - (bit_shift_carry__0 * opcode_sll_flag + b__1 * bit_multiplier_left - 256 * bit_shift_carry__1 * opcode_sll_flag)) = 0\nlimb_shift_marker__2 * (a__3 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\nlimb_shift_marker__3 * (limb_shift_marker__3 - 1) = 0\nlimb_shift_marker__3 * (a__0 * opcode_sll_flag) = 0\nlimb_shift_marker__3 * (a__0 * bit_multiplier_right - (b_sign * (bit_multiplier_right - 1) * 256 + (opcode_srl_flag + opcode_sra_flag) * (b__3 - bit_shift_carry__3))) = 0\nlimb_shift_marker__3 * (a__1 * opcode_sll_flag) = 0\nlimb_shift_marker__3 * (a__1 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\nlimb_shift_marker__3 * (a__2 * opcode_sll_flag) = 0\nlimb_shift_marker__3 * (a__2 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\nlimb_shift_marker__3 * (a__3 * opcode_sll_flag - (0 + b__0 * bit_multiplier_left - 256 * bit_shift_carry__0 * opcode_sll_flag)) = 0\nlimb_shift_marker__3 * (a__3 * (opcode_srl_flag + opcode_sra_flag) - b_sign * 255) = 0\n(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag) * (0 + limb_shift_marker__0 + limb_shift_marker__1 + limb_shift_marker__2 + limb_shift_marker__3 - 1) = 0\nb_sign * (b_sign - 1) = 0\n(1 - opcode_sra_flag) * b_sign = 0\nrs2_as * (rs2_as - 1) = 0\n(1 - rs2_as) * (rs2 - (c__0 + c__1 * 256 + c__2 * 65536)) = 0\n(1 - rs2_as) * (c__2 - c__3) = 0\n(1 - rs2_as) * (c__2 * (255 - c__2)) = 0\n(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nrs2_as * (0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag - 1) = 0\nrs2_as * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_sll_flag + opcode_srl_flag + opcode_sra_flag) * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32BranchAdapterAir, BranchEqualCoreAir<4>\nSymbolic machine using 26 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rs1_ptr\n  rs2_ptr\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  cmp_result\n  imm\n  opcode_beq_flag\n  opcode_bne_flag\n  diff_inv_marker__0\n  diff_inv_marker__1\n  diff_inv_marker__2\n  diff_inv_marker__3\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_beq_flag + opcode_bne_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[from_state__pc + cmp_result * imm + (1 - cmp_result) * 4, from_state__timestamp + 2]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_beq_flag + opcode_bne_flag), args=[1, rs1_ptr, a__0, a__1, a__2, a__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[1, rs1_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 0]\nmult=2013265920 * (0 + opcode_beq_flag + opcode_bne_flag), args=[1, rs2_ptr, b__0, b__1, b__2, b__3, reads_aux__1__base__prev_timestamp]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[1, rs2_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 1]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[from_state__pc, 0 + opcode_beq_flag * 0 + opcode_bne_flag * 1 + 544, rs1_ptr, rs2_ptr, imm, 1, 1, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_beq_flag + opcode_bne_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Algebraic constraints:\nopcode_beq_flag * (opcode_beq_flag - 1) = 0\nopcode_bne_flag * (opcode_bne_flag - 1) = 0\n(0 + opcode_beq_flag + opcode_bne_flag) * (0 + opcode_beq_flag + opcode_bne_flag - 1) = 0\ncmp_result * (cmp_result - 1) = 0\n(cmp_result * opcode_beq_flag + (1 - cmp_result) * opcode_bne_flag) * (a__0 - b__0) = 0\n(cmp_result * opcode_beq_flag + (1 - cmp_result) * opcode_bne_flag) * (a__1 - b__1) = 0\n(cmp_result * opcode_beq_flag + (1 - cmp_result) * opcode_bne_flag) * (a__2 - b__2) = 0\n(cmp_result * opcode_beq_flag + (1 - cmp_result) * opcode_bne_flag) * (a__3 - b__3) = 0\n(0 + opcode_beq_flag + opcode_bne_flag) * (cmp_result * opcode_beq_flag + (1 - cmp_result) * opcode_bne_flag + (a__0 - b__0) * diff_inv_marker__0 + (a__1 - b__1) * diff_inv_marker__1 + (a__2 - b__2) * diff_inv_marker__2 + (a__3 - b__3) * diff_inv_marker__3 - 1) = 0\n(0 + opcode_beq_flag + opcode_bne_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_beq_flag + opcode_bne_flag) * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32BranchAdapterAir, BranchLessThanCoreAir<4, 8>\nSymbolic machine using 32 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rs1_ptr\n  rs2_ptr\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  cmp_result\n  imm\n  opcode_blt_flag\n  opcode_bltu_flag\n  opcode_bge_flag\n  opcode_bgeu_flag\n  a_msb_f\n  b_msb_f\n  cmp_lt\n  diff_marker__0\n  diff_marker__1\n  diff_marker__2\n  diff_marker__3\n  diff_val\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[from_state__pc + cmp_result * imm + (1 - cmp_result) * 4, from_state__timestamp + 2]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag), args=[1, rs1_ptr, a__0, a__1, a__2, a__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[1, rs1_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 0]\nmult=2013265920 * (0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag), args=[1, rs2_ptr, b__0, b__1, b__2, b__3, reads_aux__1__base__prev_timestamp]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[1, rs2_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 1]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[from_state__pc, 0 + opcode_blt_flag * 0 + opcode_bltu_flag * 1 + opcode_bge_flag * 2 + opcode_bgeu_flag * 3 + 549, rs1_ptr, rs2_ptr, imm, 1, 1, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag, args=[a_msb_f + 128 * (opcode_blt_flag + opcode_bge_flag), b_msb_f + 128 * (opcode_blt_flag + opcode_bge_flag), 0, 0]\nmult=0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0, args=[diff_val - 1, 0, 0, 0]\n\n// Algebraic constraints:\nopcode_blt_flag * (opcode_blt_flag - 1) = 0\nopcode_bltu_flag * (opcode_bltu_flag - 1) = 0\nopcode_bge_flag * (opcode_bge_flag - 1) = 0\nopcode_bgeu_flag * (opcode_bgeu_flag - 1) = 0\n(0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag) * (0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag - 1) = 0\ncmp_result * (cmp_result - 1) = 0\ncmp_lt - (cmp_result * (opcode_blt_flag + opcode_bltu_flag) + (1 - cmp_result) * (opcode_bge_flag + opcode_bgeu_flag)) = 0\n(a__3 - a_msb_f) * (256 - (a__3 - a_msb_f)) = 0\n(b__3 - b_msb_f) * (256 - (b__3 - b_msb_f)) = 0\ndiff_marker__3 * (diff_marker__3 - 1) = 0\n(1 - (0 + diff_marker__3)) * ((b_msb_f - a_msb_f) * (2 * cmp_lt - 1)) = 0\ndiff_marker__3 * (diff_val - (b_msb_f - a_msb_f) * (2 * cmp_lt - 1)) = 0\ndiff_marker__2 * (diff_marker__2 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2)) * ((b__2 - a__2) * (2 * cmp_lt - 1)) = 0\ndiff_marker__2 * (diff_val - (b__2 - a__2) * (2 * cmp_lt - 1)) = 0\ndiff_marker__1 * (diff_marker__1 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1)) * ((b__1 - a__1) * (2 * cmp_lt - 1)) = 0\ndiff_marker__1 * (diff_val - (b__1 - a__1) * (2 * cmp_lt - 1)) = 0\ndiff_marker__0 * (diff_marker__0 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0)) * ((b__0 - a__0) * (2 * cmp_lt - 1)) = 0\ndiff_marker__0 * (diff_val - (b__0 - a__0) * (2 * cmp_lt - 1)) = 0\n(0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0) * (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0 - 1) = 0\n(1 - (0 + diff_marker__3 + diff_marker__2 + diff_marker__1 + diff_marker__0)) * cmp_lt = 0\n(0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_blt_flag + opcode_bltu_flag + opcode_bge_flag + opcode_bgeu_flag) * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32CondRdWriteAdapterAir, Rv32JalLuiCoreAir>\nSymbolic machine using 18 unique main columns:\n  inner__from_state__pc\n  inner__from_state__timestamp\n  inner__rd_ptr\n  inner__rd_aux_cols__base__prev_timestamp\n  inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  inner__rd_aux_cols__prev_data__0\n  inner__rd_aux_cols__prev_data__1\n  inner__rd_aux_cols__prev_data__2\n  inner__rd_aux_cols__prev_data__3\n  needs_write\n  imm\n  rd_data__0\n  rd_data__1\n  rd_data__2\n  rd_data__3\n  is_jal\n  is_lui\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(is_lui + is_jal), args=[inner__from_state__pc, inner__from_state__timestamp]\nmult=is_lui + is_jal, args=[inner__from_state__pc + is_lui * 4 + is_jal * imm, inner__from_state__timestamp + 1]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * needs_write, args=[1, inner__rd_ptr, inner__rd_aux_cols__prev_data__0, inner__rd_aux_cols__prev_data__1, inner__rd_aux_cols__prev_data__2, inner__rd_aux_cols__prev_data__3, inner__rd_aux_cols__base__prev_timestamp]\nmult=needs_write, args=[1, inner__rd_ptr, rd_data__0, rd_data__1, rd_data__2, rd_data__3, inner__from_state__timestamp]\n\n// Bus 2 (PC_LOOKUP):\nmult=is_lui + is_jal, args=[inner__from_state__pc, 560 + (is_lui * 1 + is_jal * 0), inner__rd_ptr, 0, imm, 1, 0, needs_write, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=needs_write, args=[inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=needs_write, args=[inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_lui + is_jal, args=[rd_data__0, rd_data__1, 0, 0]\nmult=is_lui + is_jal, args=[rd_data__2, rd_data__3, 0, 0]\nmult=is_jal, args=[rd_data__3, 192, rd_data__3 + 192, 1]\n\n// Algebraic constraints:\nis_lui * (is_lui - 1) = 0\nis_jal * (is_jal - 1) = 0\n(is_lui + is_jal) * (is_lui + is_jal - 1) = 0\nis_lui * rd_data__0 = 0\nis_lui * (0 + rd_data__1 * 1 + rd_data__2 * 256 + rd_data__3 * 65536 - imm * 16) = 0\nis_jal * (rd_data__0 + (0 + rd_data__1 * 1 + rd_data__2 * 256 + rd_data__3 * 65536) * 256 - (inner__from_state__pc + 4)) = 0\nneeds_write * (needs_write - 1) = 0\n(1 - (is_lui + is_jal)) * needs_write = 0\nneeds_write * (inner__from_state__timestamp - inner__rd_aux_cols__base__prev_timestamp - 1 - (0 + inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + inner__rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32JalrAdapterAir, Rv32JalrCoreAir>\nSymbolic machine using 28 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rs1_ptr\n  rs1_aux_cols__base__prev_timestamp\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  rd_ptr\n  rd_aux_cols__base__prev_timestamp\n  rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  rd_aux_cols__prev_data__0\n  rd_aux_cols__prev_data__1\n  rd_aux_cols__prev_data__2\n  rd_aux_cols__prev_data__3\n  needs_write\n  imm\n  rs1_data__0\n  rs1_data__1\n  rs1_data__2\n  rs1_data__3\n  rd_data__0\n  rd_data__1\n  rd_data__2\n  is_valid\n  to_pc_least_sig_bit\n  to_pc_limbs__0\n  to_pc_limbs__1\n  imm_sign\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-is_valid, args=[from_state__pc, from_state__timestamp]\nmult=is_valid, args=[to_pc_limbs__0 * 2 + to_pc_limbs__1 * 65536, from_state__timestamp + 2]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * is_valid, args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, rs1_aux_cols__base__prev_timestamp]\nmult=is_valid, args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, from_state__timestamp + 0]\nmult=2013265920 * needs_write, args=[1, rd_ptr, rd_aux_cols__prev_data__0, rd_aux_cols__prev_data__1, rd_aux_cols__prev_data__2, rd_aux_cols__prev_data__3, rd_aux_cols__base__prev_timestamp]\nmult=needs_write, args=[1, rd_ptr, from_state__pc + 4 - (0 + rd_data__0 * 256 + rd_data__1 * 65536 + rd_data__2 * 16777216), rd_data__0, rd_data__1, rd_data__2, from_state__timestamp + 1]\n\n// Bus 2 (PC_LOOKUP):\nmult=is_valid, args=[from_state__pc, 565 + 0, rd_ptr, rs1_ptr, imm, 1, 0, needs_write, imm_sign]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid, args=[rd_data__1, 8]\nmult=is_valid, args=[rd_data__2, 6]\nmult=is_valid, args=[to_pc_limbs__1, 14]\nmult=is_valid, args=[to_pc_limbs__0, 15]\nmult=is_valid, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=needs_write, args=[rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=needs_write, args=[rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid, args=[from_state__pc + 4 - (0 + rd_data__0 * 256 + rd_data__1 * 65536 + rd_data__2 * 16777216), rd_data__0, 0, 0]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0\nimm_sign * (imm_sign - 1) = 0\nto_pc_least_sig_bit * (to_pc_least_sig_bit - 1) = 0\nis_valid * ((rs1_data__0 + rs1_data__1 * 256 + imm - to_pc_limbs__0 * 2 - to_pc_least_sig_bit) * 2013235201 * ((rs1_data__0 + rs1_data__1 * 256 + imm - to_pc_limbs__0 * 2 - to_pc_least_sig_bit) * 2013235201 - 1)) = 0\nis_valid * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - to_pc_limbs__0 * 2 - to_pc_least_sig_bit) * 2013235201 - to_pc_limbs__1) * 2013235201 * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - to_pc_limbs__0 * 2 - to_pc_least_sig_bit) * 2013235201 - to_pc_limbs__1) * 2013235201 - 1)) = 0\nneeds_write * (needs_write - 1) = 0\n(1 - is_valid) * needs_write = 0\nis_valid * (from_state__timestamp + 0 - rs1_aux_cols__base__prev_timestamp - 1 - (0 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nneeds_write * (from_state__timestamp + 1 - rd_aux_cols__base__prev_timestamp - 1 - (0 + rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32LoadStoreAdapterAir, LoadSignExtendCoreAir<4, 8>\nSymbolic machine using 36 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rs1_ptr\n  rs1_data__0\n  rs1_data__1\n  rs1_data__2\n  rs1_data__3\n  rs1_aux_cols__base__prev_timestamp\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  rd_rs2_ptr\n  read_data_aux__base__prev_timestamp\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__1\n  imm\n  imm_sign\n  mem_ptr_limbs__0\n  mem_ptr_limbs__1\n  mem_as\n  write_base_aux__prev_timestamp\n  write_base_aux__timestamp_lt_aux__lower_decomp__0\n  write_base_aux__timestamp_lt_aux__lower_decomp__1\n  needs_write\n  opcode_loadb_flag0\n  opcode_loadb_flag1\n  opcode_loadh_flag\n  shift_most_sig_bit\n  data_most_sig_bit\n  shifted_read_data__0\n  shifted_read_data__1\n  shifted_read_data__2\n  shifted_read_data__3\n  prev_data__0\n  prev_data__1\n  prev_data__2\n  prev_data__3\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag), args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, rs1_aux_cols__base__prev_timestamp]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, from_state__timestamp + 0]\nmult=2013265920 * (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag), args=[(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * mem_as + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * 1, (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * rd_rs2_ptr - (shift_most_sig_bit * 2 + opcode_loadb_flag1), shift_most_sig_bit * shifted_read_data__2 + (1 - shift_most_sig_bit) * shifted_read_data__0, shift_most_sig_bit * shifted_read_data__3 + (1 - shift_most_sig_bit) * shifted_read_data__1, shift_most_sig_bit * shifted_read_data__0 + (1 - shift_most_sig_bit) * shifted_read_data__2, shift_most_sig_bit * shifted_read_data__1 + (1 - shift_most_sig_bit) * shifted_read_data__3, read_data_aux__base__prev_timestamp]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * mem_as + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * 1, (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * rd_rs2_ptr - (shift_most_sig_bit * 2 + opcode_loadb_flag1), shift_most_sig_bit * shifted_read_data__2 + (1 - shift_most_sig_bit) * shifted_read_data__0, shift_most_sig_bit * shifted_read_data__3 + (1 - shift_most_sig_bit) * shifted_read_data__1, shift_most_sig_bit * shifted_read_data__0 + (1 - shift_most_sig_bit) * shifted_read_data__2, shift_most_sig_bit * shifted_read_data__1 + (1 - shift_most_sig_bit) * shifted_read_data__3, from_state__timestamp + 1]\nmult=2013265920 * needs_write, args=[(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * 1 + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * mem_as, (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * rd_rs2_ptr + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) - 0, prev_data__0, prev_data__1, prev_data__2, prev_data__3, write_base_aux__prev_timestamp]\nmult=needs_write, args=[(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * 1 + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * mem_as, (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * rd_rs2_ptr + (1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) - 0, (opcode_loadh_flag + opcode_loadb_flag0) * shifted_read_data__0 + opcode_loadb_flag1 * shifted_read_data__1, shifted_read_data__1 * opcode_loadh_flag + (opcode_loadb_flag0 + opcode_loadb_flag1) * (data_most_sig_bit * 255), data_most_sig_bit * 255, data_most_sig_bit * 255, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[from_state__pc, (opcode_loadb_flag0 + opcode_loadb_flag1) * 6 + opcode_loadh_flag * 7 + 528, rd_rs2_ptr, rs1_ptr, imm, 1, mem_as, needs_write, imm_sign]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[shifted_read_data__0 * opcode_loadb_flag0 + shifted_read_data__1 * opcode_loadb_flag1 + shifted_read_data__1 * opcode_loadh_flag - data_most_sig_bit * 128, 7]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[(mem_ptr_limbs__0 - (shift_most_sig_bit * 2 + opcode_loadb_flag1 + 0)) * 1509949441, 14]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[mem_ptr_limbs__1, 13]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=needs_write, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0, 17]\nmult=needs_write, args=[write_base_aux__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Algebraic constraints:\nopcode_loadb_flag0 * (opcode_loadb_flag0 - 1) = 0\nopcode_loadb_flag1 * (opcode_loadb_flag1 - 1) = 0\nopcode_loadh_flag * (opcode_loadh_flag - 1) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - 1) = 0\ndata_most_sig_bit * (data_most_sig_bit - 1) = 0\nshift_most_sig_bit * (shift_most_sig_bit - 1) = 0\nneeds_write * (needs_write - 1) = 0\nneeds_write * (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - 1) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - needs_write) * (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - 1) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - needs_write) * rd_rs2_ptr = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (from_state__timestamp + 0 - rs1_aux_cols__base__prev_timestamp - 1 - (0 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * ((rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 * ((rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - 1)) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (imm_sign * (imm_sign - 1)) = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - mem_ptr_limbs__1) * 2013235201 * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - mem_ptr_limbs__1) * 2013235201 - 1)) = 0\n(mem_as - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * 2) * (mem_as - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * 2 - 1) * (mem_as - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * 2 - 2) = 0\n(1 - (0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag)) * mem_as = 0\n(0 + opcode_loadb_flag0 + opcode_loadb_flag1 + opcode_loadh_flag) * (from_state__timestamp + 1 - read_data_aux__base__prev_timestamp - 1 - (0 + read_data_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + read_data_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nneeds_write * (from_state__timestamp + 2 - write_base_aux__prev_timestamp - 1 - (0 + write_base_aux__timestamp_lt_aux__lower_decomp__0 * 1 + write_base_aux__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32LoadStoreAdapterAir, LoadStoreCoreAir<4>\nSymbolic machine using 41 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rs1_ptr\n  rs1_data__0\n  rs1_data__1\n  rs1_data__2\n  rs1_data__3\n  rs1_aux_cols__base__prev_timestamp\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  rd_rs2_ptr\n  read_data_aux__base__prev_timestamp\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__0\n  read_data_aux__base__timestamp_lt_aux__lower_decomp__1\n  imm\n  imm_sign\n  mem_ptr_limbs__0\n  mem_ptr_limbs__1\n  mem_as\n  write_base_aux__prev_timestamp\n  write_base_aux__timestamp_lt_aux__lower_decomp__0\n  write_base_aux__timestamp_lt_aux__lower_decomp__1\n  needs_write\n  flags__0\n  flags__1\n  flags__2\n  flags__3\n  is_valid\n  is_load\n  read_data__0\n  read_data__1\n  read_data__2\n  read_data__3\n  prev_data__0\n  prev_data__1\n  prev_data__2\n  prev_data__3\n  write_data__0\n  write_data__1\n  write_data__2\n  write_data__3\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-is_valid, args=[from_state__pc, from_state__timestamp]\nmult=is_valid, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * is_valid, args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, rs1_aux_cols__base__prev_timestamp]\nmult=is_valid, args=[1, rs1_ptr, rs1_data__0, rs1_data__1, rs1_data__2, rs1_data__3, from_state__timestamp + 0]\nmult=2013265920 * is_valid, args=[is_load * mem_as + (1 - is_load) * 1, is_load * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) + (1 - is_load) * rd_rs2_ptr - ((0 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 1 + (0 + flags__2 * (flags__2 - 1) * 1006632961 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 2 + (0 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 3), read_data__0, read_data__1, read_data__2, read_data__3, read_data_aux__base__prev_timestamp]\nmult=is_valid, args=[is_load * mem_as + (1 - is_load) * 1, is_load * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) + (1 - is_load) * rd_rs2_ptr - ((0 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 1 + (0 + flags__2 * (flags__2 - 1) * 1006632961 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 2 + (0 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 3), read_data__0, read_data__1, read_data__2, read_data__3, from_state__timestamp + 1]\nmult=2013265920 * needs_write, args=[is_load * 1 + (1 - is_load) * mem_as, is_load * rd_rs2_ptr + (1 - is_load) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) - ((0 + flags__1 * flags__2) * 1 + (0 + flags__0 * flags__2 + flags__1 * flags__3) * 2 + (0 + flags__2 * flags__3) * 3), prev_data__0, prev_data__1, prev_data__2, prev_data__3, write_base_aux__prev_timestamp]\nmult=needs_write, args=[is_load * 1 + (1 - is_load) * mem_as, is_load * rd_rs2_ptr + (1 - is_load) * (mem_ptr_limbs__0 + mem_ptr_limbs__1 * 65536) - ((0 + flags__1 * flags__2) * 1 + (0 + flags__0 * flags__2 + flags__1 * flags__3) * 2 + (0 + flags__2 * flags__3) * 3), write_data__0, write_data__1, write_data__2, write_data__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=is_valid, args=[from_state__pc, 528 + ((0 + flags__0 * (flags__0 - 1) * 1006632961) * 0 + (0 + flags__1 * (flags__1 - 1) * 1006632961 + flags__2 * (flags__2 - 1) * 1006632961) * 2 + (0 + flags__3 * (flags__3 - 1) * 1006632961 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 1 + (0 + flags__3 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 3 + (0 + flags__0 * flags__1 + flags__0 * flags__2) * 4 + (0 + flags__0 * flags__3 + flags__1 * flags__2 + flags__1 * flags__3 + flags__2 * flags__3) * 5), rd_rs2_ptr, rs1_ptr, imm, 1, mem_as, needs_write, imm_sign]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=is_valid, args=[(mem_ptr_limbs__0 - ((0 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 1 + (0 + flags__2 * (flags__2 - 1) * 1006632961 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 2 + (0 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * 3 + ((0 + flags__1 * flags__2) * 1 + (0 + flags__0 * flags__2 + flags__1 * flags__3) * 2 + (0 + flags__2 * flags__3) * 3))) * 1509949441, 14]\nmult=is_valid, args=[mem_ptr_limbs__1, 13]\nmult=is_valid, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[read_data_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=needs_write, args=[write_base_aux__timestamp_lt_aux__lower_decomp__0, 17]\nmult=needs_write, args=[write_base_aux__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0\nflags__0 * ((flags__0 - 1) * (flags__0 - 2)) = 0\nflags__1 * ((flags__1 - 1) * (flags__1 - 2)) = 0\nflags__2 * ((flags__2 - 1) * (flags__2 - 2)) = 0\nflags__3 * ((flags__3 - 1) * (flags__3 - 2)) = 0\n(0 + flags__0 + flags__1 + flags__2 + flags__3) * ((0 + flags__0 + flags__1 + flags__2 + flags__3 - 1) * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2)) = 0\n(0 + flags__0 + flags__1 + flags__2 + flags__3 - 1) * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * is_valid = 0\nis_load - (0 + flags__0 * (flags__0 - 1) * 1006632961 + flags__1 * (flags__1 - 1) * 1006632961 + flags__2 * (flags__2 - 1) * 1006632961 + flags__3 * (flags__3 - 1) * 1006632961 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) = 0\nis_load * (is_valid - 1) = 0\nwrite_data__0 - ((0 + flags__0 * (flags__0 - 1) * 1006632961 + flags__1 * (flags__1 - 1) * 1006632961 + flags__3 * (flags__3 - 1) * 1006632961) * read_data__0 + (0 + flags__0 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * read_data__1 + (0 + flags__2 * (flags__2 - 1) * 1006632961 + flags__1 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * read_data__2 + (0 + flags__2 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * read_data__3 + ((0 + flags__3 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__0 * flags__1 + flags__0 * flags__3) * read_data__0 + (0 + flags__0 * flags__2 + flags__1 * flags__2 + flags__1 * flags__3 + flags__2 * flags__3) * prev_data__0)) = 0\nwrite_data__1 - ((0 + flags__0 * (flags__0 - 1) * 1006632961 + flags__1 * (flags__1 - 1) * 1006632961) * read_data__1 + (0 + flags__2 * (flags__2 - 1) * 1006632961) * read_data__3 + ((0 + flags__1 * flags__2) * read_data__0 + (0 + flags__3 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920 + flags__0 * flags__1) * read_data__1 + (0 + flags__0 * flags__2 + flags__0 * flags__3 + flags__1 * flags__3 + flags__2 * flags__3) * prev_data__1)) = 0\nwrite_data__2 - ((0 + flags__0 * (flags__0 - 1) * 1006632961) * read_data__2 + ((0 + flags__0 * flags__2 + flags__1 * flags__3) * read_data__0 + (0 + flags__3 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * read_data__2 + (0 + flags__0 * flags__1 + flags__0 * flags__3 + flags__1 * flags__2 + flags__2 * flags__3) * prev_data__2)) = 0\nwrite_data__3 - ((0 + flags__0 * (flags__0 - 1) * 1006632961) * read_data__3 + ((0 + flags__2 * flags__3) * read_data__0 + (0 + flags__0 * flags__2) * read_data__1 + (0 + flags__3 * (0 + flags__0 + flags__1 + flags__2 + flags__3 - 2) * 2013265920) * read_data__3 + (0 + flags__0 * flags__1 + flags__0 * flags__3 + flags__1 * flags__2 + flags__1 * flags__3) * prev_data__3)) = 0\nneeds_write * (needs_write - 1) = 0\nneeds_write * (is_valid - 1) = 0\n(is_valid - needs_write) * (is_load - 1) = 0\n(is_valid - needs_write) * rd_rs2_ptr = 0\nis_valid * (from_state__timestamp + 0 - rs1_aux_cols__base__prev_timestamp - 1 - (0 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + rs1_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nis_valid * ((rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 * ((rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - 1)) = 0\nis_valid * (imm_sign * (imm_sign - 1)) = 0\nis_valid * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - mem_ptr_limbs__1) * 2013235201 * ((rs1_data__2 + rs1_data__3 * 256 + imm_sign * 65535 + (rs1_data__0 + rs1_data__1 * 256 + imm - mem_ptr_limbs__0) * 2013235201 - mem_ptr_limbs__1) * 2013235201 - 1)) = 0\n(mem_as - (is_valid - is_load) * 2) * (mem_as - (is_valid - is_load) * 2 - 1) * (mem_as - (is_valid - is_load) * 2 - 2) = 0\n(1 - is_valid) * mem_as = 0\nis_valid * (from_state__timestamp + 1 - read_data_aux__base__prev_timestamp - 1 - (0 + read_data_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + read_data_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nneeds_write * (from_state__timestamp + 2 - write_base_aux__prev_timestamp - 1 - (0 + write_base_aux__timestamp_lt_aux__lower_decomp__0 * 1 + write_base_aux__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32MultAdapterAir, DivRemCoreAir<4, 8>\nSymbolic machine using 59 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2_ptr\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  q__0\n  q__1\n  q__2\n  q__3\n  r__0\n  r__1\n  r__2\n  r__3\n  zero_divisor\n  r_zero\n  b_sign\n  c_sign\n  q_sign\n  sign_xor\n  c_sum_inv\n  r_sum_inv\n  r_prime__0\n  r_prime__1\n  r_prime__2\n  r_prime__3\n  r_inv__0\n  r_inv__1\n  r_inv__2\n  r_inv__3\n  lt_marker__0\n  lt_marker__1\n  lt_marker__2\n  lt_marker__3\n  lt_diff\n  opcode_div_flag\n  opcode_divu_flag\n  opcode_rem_flag\n  opcode_remu_flag\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag), args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag), args=[1, rs2_ptr, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[1, rs2_ptr, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag), args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[1, rd_ptr, (opcode_div_flag + opcode_divu_flag) * q__0 + (1 - (opcode_div_flag + opcode_divu_flag)) * r__0, (opcode_div_flag + opcode_divu_flag) * q__1 + (1 - (opcode_div_flag + opcode_divu_flag)) * r__1, (opcode_div_flag + opcode_divu_flag) * q__2 + (1 - (opcode_div_flag + opcode_divu_flag)) * r__2, (opcode_div_flag + opcode_divu_flag) * q__3 + (1 - (opcode_div_flag + opcode_divu_flag)) * r__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[from_state__pc, 0 + opcode_div_flag * 0 + opcode_divu_flag * 1 + opcode_rem_flag * 2 + opcode_remu_flag * 3 + 596, rd_ptr, rs1_ptr, rs2_ptr, 1, 0, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=opcode_div_flag + opcode_rem_flag, args=[2 * (b__3 - b_sign * 128), 2 * (c__3 - c_sign * 128), 0, 0]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - (zero_divisor + r_zero), args=[lt_diff - 1, 0, 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[q__0, (0 + (r__0 + c__0 * q__0) - b__0) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[q__1, ((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[q__2, (((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[q__3, ((((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601 + (r__3 + c__0 * q__3 + c__1 * q__2 + c__2 * q__1 + c__3 * q__0) - b__3) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[r__0, (((((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601 + (r__3 + c__0 * q__3 + c__1 * q__2 + c__2 * q__1 + c__3 * q__0) - b__3) * 2005401601 + (0 + c__1 * q__3 + c__2 * q__2 + c__3 * q__1) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[r__1, ((((((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601 + (r__3 + c__0 * q__3 + c__1 * q__2 + c__2 * q__1 + c__3 * q__0) - b__3) * 2005401601 + (0 + c__1 * q__3 + c__2 * q__2 + c__3 * q__1) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + (0 + c__2 * q__3 + c__3 * q__2) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[r__2, (((((((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601 + (r__3 + c__0 * q__3 + c__1 * q__2 + c__2 * q__1 + c__3 * q__0) - b__3) * 2005401601 + (0 + c__1 * q__3 + c__2 * q__2 + c__3 * q__1) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + (0 + c__2 * q__3 + c__3 * q__2) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + (0 + c__3 * q__3) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255) + c__2 * (q_sign * 255) + q__2 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601]\nmult=0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag, args=[r__3, ((((((((0 + (r__0 + c__0 * q__0) - b__0) * 2005401601 + (r__1 + c__0 * q__1 + c__1 * q__0) - b__1) * 2005401601 + (r__2 + c__0 * q__2 + c__1 * q__1 + c__2 * q__0) - b__2) * 2005401601 + (r__3 + c__0 * q__3 + c__1 * q__2 + c__2 * q__1 + c__3 * q__0) - b__3) * 2005401601 + (0 + c__1 * q__3 + c__2 * q__2 + c__3 * q__1) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + (0 + c__2 * q__3 + c__3 * q__2) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + (0 + c__3 * q__3) + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255) + c__2 * (q_sign * 255) + q__2 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601 + 0 + (0 + c__0 * (q_sign * 255) + q__0 * (c_sign * 255) + c__1 * (q_sign * 255) + q__1 * (c_sign * 255) + c__2 * (q_sign * 255) + q__2 * (c_sign * 255) + c__3 * (q_sign * 255) + q__3 * (c_sign * 255)) + (1 - r_zero) * (b_sign * 255) - b_sign * 255) * 2005401601]\n\n// Algebraic constraints:\nopcode_div_flag * (opcode_div_flag - 1) = 0\nopcode_divu_flag * (opcode_divu_flag - 1) = 0\nopcode_rem_flag * (opcode_rem_flag - 1) = 0\nopcode_remu_flag * (opcode_remu_flag - 1) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag) * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - 1) = 0\n(zero_divisor + r_zero) * (zero_divisor + r_zero - 1) = 0\nzero_divisor * (zero_divisor - 1) = 0\nzero_divisor * c__0 = 0\nzero_divisor * (q__0 - 255) = 0\nzero_divisor * c__1 = 0\nzero_divisor * (q__1 - 255) = 0\nzero_divisor * c__2 = 0\nzero_divisor * (q__2 - 255) = 0\nzero_divisor * c__3 = 0\nzero_divisor * (q__3 - 255) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - zero_divisor) * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - zero_divisor - 1) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - zero_divisor) * ((0 + c__0 + c__1 + c__2 + c__3) * c_sum_inv - 1) = 0\nr_zero * (r_zero - 1) = 0\nr_zero * r__0 = 0\nr_zero * r__1 = 0\nr_zero * r__2 = 0\nr_zero * r__3 = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - (zero_divisor + r_zero)) * (0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - (zero_divisor + r_zero) - 1) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag - (zero_divisor + r_zero)) * ((0 + r__0 + r__1 + r__2 + r__3) * r_sum_inv - 1) = 0\nb_sign * (b_sign - 1) = 0\nc_sign * (c_sign - 1) = 0\n(1 - (opcode_div_flag + opcode_rem_flag)) * b_sign = 0\n(1 - (opcode_div_flag + opcode_rem_flag)) * c_sign = 0\nb_sign + c_sign - 2 * b_sign * c_sign - sign_xor = 0\nq_sign * (q_sign - 1) = 0\n(0 + q__0 + q__1 + q__2 + q__3) * ((1 - zero_divisor) * (q_sign - sign_xor)) = 0\n(q_sign - sign_xor) * ((1 - zero_divisor) * q_sign) = 0\n(1 - sign_xor) * (r__0 - r_prime__0) = 0\nsign_xor * (((0 + r__0 + r_prime__0) * 2005401601 - 0) * ((0 + r__0 + r_prime__0) * 2005401601 - 1)) = 0\nsign_xor * ((r_prime__0 - 256) * r_inv__0 - 1) = 0\nsign_xor * ((1 - (0 + r__0 + r_prime__0) * 2005401601) * r_prime__0) = 0\n(1 - sign_xor) * (r__1 - r_prime__1) = 0\nsign_xor * ((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 - (0 + r__0 + r_prime__0) * 2005401601) * (((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 - 1)) = 0\nsign_xor * ((r_prime__1 - 256) * r_inv__1 - 1) = 0\nsign_xor * ((1 - ((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601) * r_prime__1) = 0\n(1 - sign_xor) * (r__2 - r_prime__2) = 0\nsign_xor * (((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601 - ((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601) * ((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601 - 1)) = 0\nsign_xor * ((r_prime__2 - 256) * r_inv__2 - 1) = 0\nsign_xor * ((1 - (((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601) * r_prime__2) = 0\n(1 - sign_xor) * (r__3 - r_prime__3) = 0\nsign_xor * ((((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601 + r__3 + r_prime__3) * 2005401601 - (((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601) * (((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601 + r__3 + r_prime__3) * 2005401601 - 1)) = 0\nsign_xor * ((r_prime__3 - 256) * r_inv__3 - 1) = 0\nsign_xor * ((1 - ((((0 + r__0 + r_prime__0) * 2005401601 + r__1 + r_prime__1) * 2005401601 + r__2 + r_prime__2) * 2005401601 + r__3 + r_prime__3) * 2005401601) * r_prime__3) = 0\nlt_marker__3 * (lt_marker__3 - 1) = 0\n(1 - (zero_divisor + r_zero + lt_marker__3)) * (r_prime__3 * (2 * c_sign - 1) + c__3 * (1 - 2 * c_sign)) = 0\nlt_marker__3 * (lt_diff - (r_prime__3 * (2 * c_sign - 1) + c__3 * (1 - 2 * c_sign))) = 0\nlt_marker__2 * (lt_marker__2 - 1) = 0\n(1 - (zero_divisor + r_zero + lt_marker__3 + lt_marker__2)) * (r_prime__2 * (2 * c_sign - 1) + c__2 * (1 - 2 * c_sign)) = 0\nlt_marker__2 * (lt_diff - (r_prime__2 * (2 * c_sign - 1) + c__2 * (1 - 2 * c_sign))) = 0\nlt_marker__1 * (lt_marker__1 - 1) = 0\n(1 - (zero_divisor + r_zero + lt_marker__3 + lt_marker__2 + lt_marker__1)) * (r_prime__1 * (2 * c_sign - 1) + c__1 * (1 - 2 * c_sign)) = 0\nlt_marker__1 * (lt_diff - (r_prime__1 * (2 * c_sign - 1) + c__1 * (1 - 2 * c_sign))) = 0\nlt_marker__0 * (lt_marker__0 - 1) = 0\n(1 - (zero_divisor + r_zero + lt_marker__3 + lt_marker__2 + lt_marker__1 + lt_marker__0)) * (r_prime__0 * (2 * c_sign - 1) + c__0 * (1 - 2 * c_sign)) = 0\nlt_marker__0 * (lt_diff - (r_prime__0 * (2 * c_sign - 1) + c__0 * (1 - 2 * c_sign))) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag) * (zero_divisor + r_zero + lt_marker__3 + lt_marker__2 + lt_marker__1 + lt_marker__0 - 1) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag) * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_div_flag + opcode_divu_flag + opcode_rem_flag + opcode_remu_flag) * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32MultAdapterAir, MulHCoreAir<4, 8>\nSymbolic machine using 39 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2_ptr\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  a_mul__0\n  a_mul__1\n  a_mul__2\n  a_mul__3\n  b_ext\n  c_ext\n  opcode_mulh_flag\n  opcode_mulhsu_flag\n  opcode_mulhu_flag\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-(0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag), args=[from_state__pc, from_state__timestamp]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * (0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag), args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * (0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag), args=[1, rs2_ptr, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[1, rs2_ptr, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * (0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag), args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[1, rd_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[from_state__pc, 593 + (0 + opcode_mulh_flag * 0 + opcode_mulhsu_flag * 1 + opcode_mulhu_flag * 2), rd_ptr, rs1_ptr, rs2_ptr, 1, 0, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=opcode_mulh_flag + opcode_mulhsu_flag, args=[2 * (b__3 - b_ext * 465814468 * 128), (opcode_mulh_flag + 1) * (c__3 - c_ext * 465814468 * 128), 0, 0]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a_mul__0, 2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a_mul__1, 2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a_mul__2, 2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a_mul__3, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a_mul__3)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a__0, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a_mul__3) + (0 + b__1 * c__3 + b__2 * c__2 + b__3 * c__1) + (0 + b__0 * c_ext + c__0 * b_ext) - a__0)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a__1, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a_mul__3) + (0 + b__1 * c__3 + b__2 * c__2 + b__3 * c__1) + (0 + b__0 * c_ext + c__0 * b_ext) - a__0) + (0 + b__2 * c__3 + b__3 * c__2) + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext) - a__1)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a__2, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a_mul__3) + (0 + b__1 * c__3 + b__2 * c__2 + b__3 * c__1) + (0 + b__0 * c_ext + c__0 * b_ext) - a__0) + (0 + b__2 * c__3 + b__3 * c__2) + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext) - a__1) + (0 + b__3 * c__3) + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext + b__2 * c_ext + c__2 * b_ext) - a__2)]\nmult=0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag, args=[a__3, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a_mul__0) + (0 + b__0 * c__1 + b__1 * c__0) - a_mul__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a_mul__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a_mul__3) + (0 + b__1 * c__3 + b__2 * c__2 + b__3 * c__1) + (0 + b__0 * c_ext + c__0 * b_ext) - a__0) + (0 + b__2 * c__3 + b__3 * c__2) + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext) - a__1) + (0 + b__3 * c__3) + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext + b__2 * c_ext + c__2 * b_ext) - a__2) + 0 + (0 + b__0 * c_ext + c__0 * b_ext + b__1 * c_ext + c__1 * b_ext + b__2 * c_ext + c__2 * b_ext + b__3 * c_ext + c__3 * b_ext) - a__3)]\n\n// Algebraic constraints:\nopcode_mulh_flag * (opcode_mulh_flag - 1) = 0\nopcode_mulhsu_flag * (opcode_mulhsu_flag - 1) = 0\nopcode_mulhu_flag * (opcode_mulhu_flag - 1) = 0\n(0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag) * (0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag - 1) = 0\nb_ext * 465814468 * (b_ext * 465814468 - 1) = 0\nc_ext * 465814468 * (c_ext * 465814468 - 1) = 0\nopcode_mulhu_flag * (b_ext * 465814468) = 0\n(opcode_mulhu_flag + opcode_mulhsu_flag) * (c_ext * 465814468) = 0\n(0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag) * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag) * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n(0 + opcode_mulh_flag + opcode_mulhsu_flag + opcode_mulhu_flag) * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32MultAdapterAir, MultiplicationCoreAir<4, 8>\nSymbolic machine using 31 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rs1_ptr\n  rs2_ptr\n  reads_aux__0__base__prev_timestamp\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__0__base__timestamp_lt_aux__lower_decomp__1\n  reads_aux__1__base__prev_timestamp\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__0\n  reads_aux__1__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__base__prev_timestamp\n  writes_aux__base__timestamp_lt_aux__lower_decomp__0\n  writes_aux__base__timestamp_lt_aux__lower_decomp__1\n  writes_aux__prev_data__0\n  writes_aux__prev_data__1\n  writes_aux__prev_data__2\n  writes_aux__prev_data__3\n  a__0\n  a__1\n  a__2\n  a__3\n  b__0\n  b__1\n  b__2\n  b__3\n  c__0\n  c__1\n  c__2\n  c__3\n  is_valid\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-is_valid, args=[from_state__pc, from_state__timestamp]\nmult=is_valid, args=[from_state__pc + 4, from_state__timestamp + 3]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * is_valid, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, reads_aux__0__base__prev_timestamp]\nmult=is_valid, args=[1, rs1_ptr, b__0, b__1, b__2, b__3, from_state__timestamp + 0]\nmult=2013265920 * is_valid, args=[1, rs2_ptr, c__0, c__1, c__2, c__3, reads_aux__1__base__prev_timestamp]\nmult=is_valid, args=[1, rs2_ptr, c__0, c__1, c__2, c__3, from_state__timestamp + 1]\nmult=2013265920 * is_valid, args=[1, rd_ptr, writes_aux__prev_data__0, writes_aux__prev_data__1, writes_aux__prev_data__2, writes_aux__prev_data__3, writes_aux__base__prev_timestamp]\nmult=is_valid, args=[1, rd_ptr, a__0, a__1, a__2, a__3, from_state__timestamp + 2]\n\n// Bus 2 (PC_LOOKUP):\nmult=is_valid, args=[from_state__pc, 592 + 0, rd_ptr, rs1_ptr, rs2_ptr, 1, 0, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[reads_aux__0__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=is_valid, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[reads_aux__1__base__timestamp_lt_aux__lower_decomp__1, 12]\nmult=is_valid, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[writes_aux__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 7 (TUPLE_RANGE_CHECKER_256_2048):\nmult=is_valid, args=[a__0, 2005401601 * (0 + (0 + b__0 * c__0) - a__0)]\nmult=is_valid, args=[a__1, 2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a__0) + (0 + b__0 * c__1 + b__1 * c__0) - a__1)]\nmult=is_valid, args=[a__2, 2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a__0) + (0 + b__0 * c__1 + b__1 * c__0) - a__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a__2)]\nmult=is_valid, args=[a__3, 2005401601 * (2005401601 * (2005401601 * (2005401601 * (0 + (0 + b__0 * c__0) - a__0) + (0 + b__0 * c__1 + b__1 * c__0) - a__1) + (0 + b__0 * c__2 + b__1 * c__1 + b__2 * c__0) - a__2) + (0 + b__0 * c__3 + b__1 * c__2 + b__2 * c__1 + b__3 * c__0) - a__3)]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0\nis_valid * (from_state__timestamp + 0 - reads_aux__0__base__prev_timestamp - 1 - (0 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__0__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nis_valid * (from_state__timestamp + 1 - reads_aux__1__base__prev_timestamp - 1 - (0 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__0 * 1 + reads_aux__1__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\nis_valid * (from_state__timestamp + 2 - writes_aux__base__prev_timestamp - 1 - (0 + writes_aux__base__timestamp_lt_aux__lower_decomp__0 * 1 + writes_aux__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0\n\n\n# VmAirWrapper<Rv32RdWriteAdapterAir, Rv32AuipcCoreAir>\nSymbolic machine using 20 unique main columns:\n  from_state__pc\n  from_state__timestamp\n  rd_ptr\n  rd_aux_cols__base__prev_timestamp\n  rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0\n  rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1\n  rd_aux_cols__prev_data__0\n  rd_aux_cols__prev_data__1\n  rd_aux_cols__prev_data__2\n  rd_aux_cols__prev_data__3\n  is_valid\n  imm_limbs__0\n  imm_limbs__1\n  imm_limbs__2\n  pc_limbs__0\n  pc_limbs__1\n  rd_data__0\n  rd_data__1\n  rd_data__2\n  rd_data__3\n\n// Bus 0 (EXECUTION_BRIDGE):\nmult=-is_valid, args=[from_state__pc, from_state__timestamp]\nmult=is_valid, args=[from_state__pc + 4, from_state__timestamp + 1]\n\n// Bus 1 (MEMORY):\nmult=2013265920 * is_valid, args=[1, rd_ptr, rd_aux_cols__prev_data__0, rd_aux_cols__prev_data__1, rd_aux_cols__prev_data__2, rd_aux_cols__prev_data__3, rd_aux_cols__base__prev_timestamp]\nmult=is_valid, args=[1, rd_ptr, rd_data__0, rd_data__1, rd_data__2, rd_data__3, from_state__timestamp]\n\n// Bus 2 (PC_LOOKUP):\nmult=is_valid, args=[from_state__pc, 576 + 0, rd_ptr, 0, 0 + imm_limbs__0 * 1 + imm_limbs__1 * 256 + imm_limbs__2 * 65536, 1, 0, 0, 0]\n\n// Bus 3 (VARIABLE_RANGE_CHECKER):\nmult=is_valid, args=[rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0, 17]\nmult=is_valid, args=[rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1, 12]\n\n// Bus 6 (BITWISE_LOOKUP):\nmult=is_valid, args=[rd_data__0, rd_data__1, 0, 0]\nmult=is_valid, args=[rd_data__2, rd_data__3, 0, 0]\nmult=is_valid, args=[imm_limbs__0, imm_limbs__1, 0, 0]\nmult=is_valid, args=[imm_limbs__2, pc_limbs__0, 0, 0]\nmult=is_valid, args=[pc_limbs__1, (from_state__pc - (rd_data__0 + (0 + pc_limbs__0 * 256 + pc_limbs__1 * 65536))) * 2013265801 * 4, 0, 0]\n\n// Algebraic constraints:\nis_valid * (is_valid - 1) = 0\nis_valid * (2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0) * (2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0) - 1)) = 0\nis_valid * (2005401601 * (pc_limbs__1 + imm_limbs__1 - rd_data__2 + 2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0)) * (2005401601 * (pc_limbs__1 + imm_limbs__1 - rd_data__2 + 2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0)) - 1)) = 0\nis_valid * (2005401601 * ((from_state__pc - (rd_data__0 + (0 + pc_limbs__0 * 256 + pc_limbs__1 * 65536))) * 2013265801 + imm_limbs__2 - rd_data__3 + 2005401601 * (pc_limbs__1 + imm_limbs__1 - rd_data__2 + 2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0))) * (2005401601 * ((from_state__pc - (rd_data__0 + (0 + pc_limbs__0 * 256 + pc_limbs__1 * 65536))) * 2013265801 + imm_limbs__2 - rd_data__3 + 2005401601 * (pc_limbs__1 + imm_limbs__1 - rd_data__2 + 2005401601 * (pc_limbs__0 + imm_limbs__0 - rd_data__1 + 0))) - 1)) = 0\nis_valid * (from_state__timestamp - rd_aux_cols__base__prev_timestamp - 1 - (0 + rd_aux_cols__base__timestamp_lt_aux__lower_decomp__0 * 1 + rd_aux_cols__base__timestamp_lt_aux__lower_decomp__1 * 131072)) = 0"
  },
  {
    "path": "riscv-elf/Cargo.toml",
    "content": "[package]\nname = \"powdr-riscv-elf\"\ndescription = \"powdr RISCV ELF utils\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[dependencies]\npowdr-isa-utils.workspace = true\npowdr-riscv-types.workspace = true\npowdr-syscalls.workspace = true\n\ngimli = \"0.31\"\ngoblin = \"0.8\"\nitertools.workspace = true\nlog.workspace = true\nraki = \"0.1.4\"\nthiserror = \"1.0\"\ntracing.workspace = true\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n\n[[bin]]\nname = \"elf-labels\"\npath = \"src/bin/elf-labels.rs\"\n"
  },
  {
    "path": "riscv-elf/src/bin/elf-labels.rs",
    "content": "#![allow(clippy::print_stdout)]\n\nuse goblin::elf::{\n    header::{EI_CLASS, ELFCLASS32, ELFCLASS64},\n    Elf,\n};\nuse powdr_riscv_elf::{load_elf, rv64};\nuse std::env;\nuse std::fs;\nuse std::panic;\nuse std::path::Path;\nuse std::process;\n\nfn main() {\n    let args: Vec<String> = env::args().collect();\n\n    if args.len() != 2 {\n        eprintln!(\"Usage: {} <elf-file>\", args[0]);\n        process::exit(1);\n    }\n\n    let elf_path = Path::new(&args[1]);\n\n    if !elf_path.exists() {\n        eprintln!(\"Error: File '{}' does not exist\", elf_path.display());\n        process::exit(1);\n    }\n\n    // Read the file to check if it's 32-bit or 64-bit\n    let file_buffer = match fs::read(elf_path) {\n        Ok(buffer) => buffer,\n        Err(e) => {\n            eprintln!(\"Error reading file: {e}\");\n            process::exit(1);\n        }\n    };\n\n    let elf = match Elf::parse(&file_buffer) {\n        Ok(elf) => elf,\n        Err(e) => {\n            eprintln!(\"Error parsing ELF header: {e}\");\n            process::exit(1);\n        }\n    };\n\n    match elf.header.e_ident[EI_CLASS] {\n        ELFCLASS32 => {\n            // The load_elf function panics on errors, so we catch it\n            let result = panic::catch_unwind(|| load_elf(elf_path));\n\n            match result {\n                Ok(program) => {\n                    println!(\n                        \"RV32 ELF file analyzed successfully: {}\",\n                        elf_path.display()\n                    );\n                    println!();\n                    print_elf_info_32(&program);\n                }\n                Err(_) => {\n                    eprintln!(\"Error loading RV32 ELF file: The file may be corrupted or not a valid RISC-V ELF\");\n                    process::exit(1);\n                }\n            }\n        }\n        ELFCLASS64 => {\n            // The load_elf_rv64 function panics on errors, so we catch it\n            let result = panic::catch_unwind(|| rv64::compute_jumpdests(elf_path));\n\n            match result {\n                Ok(labels) => {\n                    println!(\n                        \"RV64 ELF file analyzed successfully: {}\",\n                        elf_path.display()\n                    );\n                    println!();\n                    print_elf_info_64(&labels);\n                }\n                Err(_) => {\n                    eprintln!(\"Error loading RV64 ELF file: The file may be corrupted or not a valid RISC-V ELF\");\n                    process::exit(1);\n                }\n            }\n        }\n        _ => {\n            eprintln!(\"Unsupported ELF class\");\n            process::exit(1);\n        }\n    }\n}\n\nfn print_elf_info_32(program: &powdr_riscv_elf::ElfProgram) {\n    // Get text labels from the program\n    let text_labels = program.text_labels();\n\n    if text_labels.is_empty() {\n        println!(\"No text labels found in the ELF file.\");\n    } else {\n        println!(\"Text labels found: {}\", text_labels.len());\n        println!();\n        println!(\"{:<16}\", \"Address\");\n        println!(\"{}\", \"-\".repeat(16));\n\n        // Text labels are already sorted in BTreeSet\n        for address in text_labels {\n            println!(\"0x{address:08x}\");\n        }\n    }\n\n    // Report on debug symbols\n    let debug_info = program.debug_info();\n    println!();\n    println!(\"Debug information:\");\n\n    // Since we can't iterate over SymbolTable directly, we'll use text_labels\n    // and look up each address\n    let mut symbol_count = 0;\n    let mut function_symbols = Vec::new();\n\n    for &addr in text_labels {\n        if let Some(name) = debug_info.symbols.try_get_one(addr) {\n            symbol_count += 1;\n            // Simple heuristic for functions: doesn't start with $ or contain .\n            if !name.starts_with(\"$\") && !name.contains(\".\") {\n                function_symbols.push((addr, name));\n            }\n        }\n    }\n\n    println!(\"  Symbols at text label addresses: {symbol_count}\");\n    println!(\"  Function symbols: {}\", function_symbols.len());\n\n    if !function_symbols.is_empty() {\n        println!();\n        println!(\"Function symbols:\");\n        println!(\"{:<16} {:<40}\", \"Address\", \"Symbol\");\n        println!(\"{}\", \"-\".repeat(60));\n\n        for (address, name) in function_symbols {\n            println!(\"0x{address:08x}      {name}\");\n        }\n    }\n\n    // Also show notes if available\n    if !debug_info.notes.is_empty() {\n        println!();\n        println!(\"Debug notes:\");\n        let mut notes: Vec<_> = debug_info.notes.iter().collect();\n        notes.sort_by_key(|(addr, _)| *addr);\n\n        for (addr, note) in notes {\n            println!(\"0x{addr:08x}: {note}\");\n        }\n    }\n}\n\nfn print_elf_info_64(labels: &rv64::Rv64Labels) {\n    println!(\"Entry point: 0x{:016x}\", labels.entry_point);\n    println!(\"PC base: 0x{:016x}\", labels.pc_base);\n    println!();\n\n    if labels.jumpdests.is_empty() {\n        println!(\"No text labels or jump destinations found.\");\n    } else {\n        println!(\n            \"Text labels and jump destinations found: {}\",\n            labels.jumpdests.len()\n        );\n        println!();\n\n        // Show all labels with symbols if available\n        println!(\"{:<20} {:<40}\", \"Address\", \"Symbol (if available)\");\n        println!(\"{}\", \"-\".repeat(60));\n\n        for &addr in &labels.jumpdests {\n            // Find symbol name if available\n            let symbol = labels\n                .symbols\n                .iter()\n                .find(|(sym_addr, _)| *sym_addr == addr)\n                .map(|(_, name)| name.as_str())\n                .unwrap_or(\"\");\n\n            println!(\"0x{addr:016x}  {symbol}\");\n        }\n\n        // Summary of symbols\n        println!();\n        println!(\"Summary:\");\n        println!(\"  Total labels/jumpdests: {}\", labels.jumpdests.len());\n        println!(\"  Named symbols: {}\", labels.symbols.len());\n        println!(\n            \"  Jumpdests without symbols: {}\",\n            labels.jumpdests_with_debug_info.len()\n        );\n\n        // Show function-like symbols separately\n        let function_symbols: Vec<_> = labels\n            .symbols\n            .iter()\n            .filter(|(_, name)| !name.starts_with(\"$\") && !name.contains(\".\"))\n            .collect();\n\n        if !function_symbols.is_empty() {\n            println!(\"  Function symbols: {}\", function_symbols.len());\n        }\n\n        // Show label to address map\n        println!();\n        println!(\"=== Label to Address Map ===\");\n        println!(\"{:<40} {:<20}\", \"Label\", \"Address\");\n        println!(\"{}\", \"-\".repeat(60));\n\n        let mut sorted_symbols = labels.symbols.clone();\n        sorted_symbols.sort_by(|a, b| a.1.cmp(&b.1));\n\n        for (addr, name) in sorted_symbols {\n            println!(\"{name:<40} 0x{addr:016x}\");\n        }\n\n        // Show jumpdests that are not labels\n        println!();\n        println!(\"=== Jump Destinations Without Symbols ===\");\n        println!(\n            \"{:<20} {:<20} {:<40}\",\n            \"Target Address\", \"From Address\", \"Instruction\"\n        );\n        println!(\"{}\", \"-\".repeat(80));\n\n        let mut sorted_jumpdests: Vec<_> = labels.jumpdests_with_debug_info.iter().collect();\n        sorted_jumpdests.sort_by_key(|(addr, _)| *addr);\n\n        for (target_addr, sources) in sorted_jumpdests {\n            for source in sources {\n                println!(\n                    \"0x{:016x}  0x{:016x}  {}\",\n                    target_addr, source.from_addr, source.instruction\n                );\n            }\n        }\n\n        println!();\n        println!(\"PC Base: 0x{:016x}\", labels.pc_base);\n    }\n}\n"
  },
  {
    "path": "riscv-elf/src/debug_info.rs",
    "content": "use std::{\n    borrow::Cow,\n    collections::{BTreeMap, BTreeSet, HashMap},\n    path::Path,\n};\n\nuse gimli::{\n    read::AttributeValue, DebuggingInformationEntry, Dwarf, EndianSlice, LittleEndian, Operation,\n    Unit, UnitRef,\n};\nuse goblin::elf::{\n    sym::{STT_FUNC, STT_OBJECT},\n    Elf, SectionHeader,\n};\nuse itertools::Itertools;\n\nuse super::AddressMap;\n\ntype Reader<'a> = EndianSlice<'a, LittleEndian>;\n\n#[derive(thiserror::Error, Debug)]\npub enum Error {\n    #[error(\"no debug information available\")]\n    NoDebugInfo,\n    #[error(\"DIE tree traversal skipped a level\")]\n    UnexpectedLevel,\n    #[error(\"failed to parse debug information: {0}\")]\n    Parsing(#[from] gimli::Error),\n}\n\n/// Debug information extracted from the ELF file.\n#[derive(Default)]\npub struct DebugInfo {\n    /// List of source files: (directory, file name).\n    pub file_list: Vec<(String, String)>,\n    /// Relates addresses to source locations.\n    pub source_locations: Vec<SourceLocationInfo>,\n    /// Maps addresses to symbol names.\n    pub symbols: SymbolTable,\n    /// Human readable notes about an address\n    pub notes: HashMap<u32, String>,\n}\n\n#[derive(Debug)]\npub struct SourceLocationInfo {\n    pub address: u32,\n    pub file: u64,\n    pub line: u64,\n    pub col: u64,\n}\n\nimpl DebugInfo {\n    /// Extracts debug information from the ELF file, if available.\n    pub fn new(\n        elf: &Elf,\n        file_buffer: &[u8],\n        address_map: &AddressMap,\n        is_data_addr: &dyn Fn(u32) -> bool,\n        jump_targets: &BTreeSet<u32>,\n    ) -> Result<Self, Error> {\n        let dwarf = load_dwarf_sections(elf, file_buffer)?;\n\n        let mut file_list = Vec::new();\n        let mut source_locations = Vec::new();\n        let mut notes = HashMap::new();\n\n        // Read the ELF symbol table, to be joined with symbols from the DWARF.\n        let mut symbols = read_symbol_table(elf);\n\n        // Iterate over the compilation units:\n        let mut units_iter = dwarf.units();\n        while let Some(unit) = units_iter.next()? {\n            let unit = dwarf.unit(unit)?;\n            // Shadows the Unit with a reference to itself, because it is more\n            // convenient to work with a UnitRef.\n            let unit = UnitRef::new(&dwarf, &unit);\n\n            // Read the source locations for this compilation unit.\n            let file_index_delta =\n                read_source_locations(unit, &mut file_list, &mut source_locations)?;\n\n            read_unit_symbols(\n                &dwarf,\n                unit,\n                file_index_delta,\n                is_data_addr,\n                jump_targets,\n                &mut symbols,\n                &mut notes,\n            )?;\n        }\n\n        // Filter out the source locations that are not in the text section\n        filter_locations_in_text(&mut source_locations, address_map);\n\n        // Deduplicate the symbols\n        dedup_names(&mut symbols);\n\n        // Index by address, not by name.\n        let symbols = SymbolTable(\n            symbols\n                .into_iter()\n                .map(|(name, address)| (address, name))\n                .into_group_map()\n                .into_iter()\n                .collect(),\n        );\n\n        Ok(DebugInfo {\n            file_list,\n            source_locations,\n            symbols,\n            notes,\n        })\n    }\n}\n\n/// Reads the source locations for a compilation unit.\nfn read_source_locations(\n    unit: UnitRef<Reader>,\n    file_list: &mut Vec<(String, String)>,\n    source_locations: &mut Vec<SourceLocationInfo>,\n) -> Result<u64, gimli::Error> {\n    // Traverse all the line locations for the compilation unit.\n    let base_dir = Path::new(\n        unit.comp_dir\n            .map(|s| s.to_string())\n            .transpose()?\n            .unwrap_or(\"\"),\n    );\n    let file_idx_delta = file_list.len() as u64;\n    if let Some(line_program) = unit.line_program.clone() {\n        // Get the source file listing\n        for file_entry in line_program.header().file_names() {\n            let directory = file_entry\n                .directory(line_program.header())\n                .map(|attr| as_str(unit, attr))\n                .transpose()?\n                .unwrap_or(\"\");\n\n            // This unwrap can not panic because both base_dir and\n            // directory have been validated as UTF-8 strings.\n            let directory = base_dir\n                .join(directory)\n                .into_os_string()\n                .into_string()\n                .unwrap();\n\n            let path = as_str(unit, file_entry.path_name())?;\n\n            file_list.push((directory, path.to_owned()));\n        }\n\n        // Get the locations indexed by address\n        let mut rows = line_program.rows();\n        while let Some((_, row)) = rows.next_row()? {\n            // End markers point to the address after the end, so we skip them.\n            if row.prologue_end() || row.end_sequence() {\n                continue;\n            }\n\n            source_locations.push(SourceLocationInfo {\n                address: row.address() as u32,\n                file: row.file_index() + file_idx_delta,\n                line: match row.line() {\n                    None => 0,\n                    Some(v) => v.get(),\n                },\n                col: match row.column() {\n                    gimli::ColumnType::LeftEdge => 0,\n                    gimli::ColumnType::Column(v) => v.get(),\n                },\n            })\n        }\n    }\n\n    Ok(file_idx_delta)\n}\n\n/// Traverse the tree in which the information about the compilation\n/// unit is stored and extract function and variable names.\nfn read_unit_symbols(\n    dwarf: &Dwarf<Reader>,\n    unit: UnitRef<Reader>,\n    file_idx_delta: u64,\n    is_data_addr: &dyn Fn(u32) -> bool,\n    jump_targets: &BTreeSet<u32>,\n    symbols: &mut Vec<(String, u32)>,\n    notes: &mut HashMap<u32, String>,\n) -> Result<(), Error> {\n    // To simplify the algorithm, we start the name stack with a placeholder value.\n    let mut full_name = vec![None];\n    let mut entries = unit.entries();\n    while let Some((level_delta, entry)) = entries.next_dfs()? {\n        // Get the entry name as a human readable string (this is used in a comment)\n        let name = find_attr(entry, gimli::DW_AT_name)\n            .map(|name| unit.attr_string(name).map(|s| s.to_string_lossy()))\n            .transpose()?;\n\n        match level_delta {\n            delta if delta > 1 => return Err(Error::UnexpectedLevel),\n            1 => (),\n            _ => {\n                full_name.truncate((full_name.len() as isize + level_delta - 1) as usize);\n            }\n        }\n        full_name.push(name);\n\n        match entry.tag() {\n            // This is the entry for a function or method.\n            gimli::DW_TAG_subprogram => {\n                let attr = find_attr(entry, gimli::DW_AT_linkage_name);\n                let Some(linkage_name) = attr.map(|ln| unit.attr_string(ln)).transpose()? else {\n                    // This function has no linkage name in DWARF, so it\n                    // must be in ELFs symbol table.\n                    continue;\n                };\n\n                let start_addresses = get_function_start(dwarf, &unit, entry)?;\n                let name = linkage_name.to_string()?;\n                for address in start_addresses {\n                    if jump_targets.contains(&address) {\n                        symbols.push((name.to_owned(), address));\n                    }\n                }\n            }\n            // This is the entry for a variable.\n            gimli::DW_TAG_variable => {\n                let Some(address) = get_static_var_address(&unit, entry)? else {\n                    continue;\n                };\n\n                if !is_data_addr(address) {\n                    continue;\n                }\n\n                if full_name.last().is_some() {\n                    // The human readable name of the variable is available,\n                    // so we assemble a pretty note to go into the comment.\n                    let mut file_line = None;\n                    if let Some(AttributeValue::FileIndex(file_idx)) =\n                        find_attr(entry, gimli::DW_AT_decl_file)\n                    {\n                        if let Some(AttributeValue::Udata(line)) =\n                            find_attr(entry, gimli::DW_AT_decl_line)\n                        {\n                            file_line = Some((file_idx + file_idx_delta, line));\n                        }\n                    }\n\n                    let value = format!(\n                        \"{}{}\",\n                        full_name\n                            .iter()\n                            .map(|s| match s {\n                                Some(s) => s,\n                                None => &Cow::Borrowed(\"?\"),\n                            })\n                            .join(\"::\"),\n                        if let Some((file, line)) = file_line {\n                            format!(\" at file {file} line {line}\")\n                        } else {\n                            String::new()\n                        }\n                    );\n\n                    notes.insert(address, value);\n                }\n\n                // The variable symbol name is only used as a fallback\n                // in case there is no pretty note.\n                if let Some(linkage_name) = find_attr(entry, gimli::DW_AT_linkage_name)\n                    .map(|ln| unit.attr_string(ln))\n                    .transpose()?\n                {\n                    symbols.push((linkage_name.to_string()?.to_owned(), address));\n                }\n            }\n            _ => {}\n        };\n    }\n\n    Ok(())\n}\n\nfn load_dwarf_sections<'a>(elf: &Elf, file_buffer: &'a [u8]) -> Result<Dwarf<Reader<'a>>, Error> {\n    // Index the sections by their names:\n    let debug_sections: HashMap<&str, &SectionHeader> = elf\n        .section_headers\n        .iter()\n        .filter_map(|shdr| {\n            elf.shdr_strtab\n                .get_at(shdr.sh_name)\n                .map(|name| (name, shdr))\n        })\n        .collect();\n\n    if debug_sections.is_empty() {\n        return Err(Error::NoDebugInfo);\n    }\n\n    // Load the DWARF sections:\n    Ok(gimli::Dwarf::load(move |section| {\n        Ok::<_, ()>(Reader::new(\n            debug_sections\n                .get(section.name())\n                .map(|shdr| {\n                    &file_buffer[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize]\n                })\n                .unwrap_or(&[]),\n            Default::default(),\n        ))\n    })\n    .unwrap())\n}\n\n/// This function linear searches for an attribute of an entry.\n///\n/// My first idea was to iterate over the attribute list once, matching for all\n/// attributes I was interested in. But then I figured out this operation is\n/// N*M, where N is the number of attributes in the list and M is the number of\n/// attributes I am interested in. So doing the inverse is easier and has the\n/// same complexity. Since it is hard to tell in practice which one is faster, I\n/// went with the easier approach.\nfn find_attr<'a>(\n    entry: &DebuggingInformationEntry<Reader<'a>>,\n    attr_type: gimli::DwAt,\n) -> Option<AttributeValue<Reader<'a>>> {\n    let mut attrs = entry.attrs();\n    while let Some(attr) = attrs.next().unwrap() {\n        if attr.name() == attr_type {\n            return Some(attr.value());\n        }\n    }\n    None\n}\n\nfn as_str<'a>(\n    unit: UnitRef<Reader<'a>>,\n    attr: AttributeValue<Reader<'a>>,\n) -> Result<&'a str, gimli::Error> {\n    unit.attr_string(attr)?.to_string()\n}\n\nfn get_static_var_address(\n    unit: &Unit<Reader>,\n    entry: &DebuggingInformationEntry<Reader>,\n) -> Result<Option<u32>, gimli::Error> {\n    let Some(attr) = find_attr(entry, gimli::DW_AT_location) else {\n        // No location available\n        return Ok(None);\n    };\n\n    let AttributeValue::Exprloc(address) = attr else {\n        // Not an static variable\n        return Ok(None);\n    };\n\n    // Do the magic to find the variable address\n    let mut ops = address.operations(unit.encoding());\n    let first_op = ops.next()?;\n    let second_op = ops.next()?;\n    let (Some(Operation::Address { address }), None) = (first_op, second_op) else {\n        // The address is not a constant\n        return Ok(None);\n    };\n\n    Ok(Some(address as u32))\n}\n\nfn get_function_start(\n    dwarf: &Dwarf<Reader>,\n    unit: &Unit<Reader>,\n    entry: &DebuggingInformationEntry<Reader>,\n) -> Result<Vec<u32>, gimli::Error> {\n    let mut ret = Vec::new();\n\n    if let Some(low_pc) = find_attr(entry, gimli::DW_AT_low_pc)\n        .map(|val| dwarf.attr_address(unit, val))\n        .transpose()?\n        .flatten()\n    {\n        ret.push(low_pc as u32);\n    }\n\n    if let Some(ranges) = find_attr(entry, gimli::DW_AT_ranges)\n        .map(|val| dwarf.attr_ranges_offset(unit, val))\n        .transpose()?\n        .flatten()\n    {\n        let mut iter = dwarf.ranges(unit, ranges)?;\n        while let Some(range) = iter.next()? {\n            ret.push(range.begin as u32);\n        }\n    }\n\n    Ok(ret)\n}\n\n/// Filter out source locations that are not in a text section.\nfn filter_locations_in_text(locations: &mut Vec<SourceLocationInfo>, address_map: &AddressMap) {\n    locations.sort_unstable_by_key(|loc| loc.address);\n\n    let mut done_idx = 0;\n    for (&start_addr, &header) in address_map.0.iter() {\n        // Remove all entries that are in between done and the start address.\n        let start_idx = find_first_idx(&locations[done_idx..], start_addr) + done_idx;\n        locations.drain(done_idx..start_idx);\n\n        // The end address is one past the last byte of the section.\n        let end_addr = start_addr + header.p_memsz as u32;\n        done_idx += find_first_idx(&locations[done_idx..], end_addr);\n    }\n}\n\nfn find_first_idx(slice: &[SourceLocationInfo], addr: u32) -> usize {\n    match slice.binary_search_by_key(&addr, |loc| loc.address) {\n        Ok(mut idx) => {\n            while idx > 0 && slice[idx - 1].address == addr {\n                idx -= 1;\n            }\n            idx\n        }\n        Err(idx) => idx,\n    }\n}\n\n/// Index the symbols by their addresses.\n#[derive(Default)]\npub struct SymbolTable(BTreeMap<u32, Vec<String>>);\n\nimpl SymbolTable {\n    pub fn new(elf: &Elf) -> SymbolTable {\n        let mut symbols = read_symbol_table(elf);\n\n        dedup_names(&mut symbols);\n\n        SymbolTable(\n            symbols\n                .into_iter()\n                .map(|(name, addr)| (addr, name.to_string()))\n                .into_group_map()\n                .into_iter()\n                .collect(),\n        )\n    }\n\n    fn default_label(addr: u32) -> Cow<'static, str> {\n        Cow::Owned(format!(\"__.L{addr:08x}\"))\n    }\n\n    /// Get a symbol, if the address has one.\n    pub fn try_get_one(&self, addr: u32) -> Option<&str> {\n        self.0\n            .get(&addr)\n            .and_then(|v| v.first().map(|s| s.as_str()))\n    }\n\n    /// Get a symbol, or a default label formed from the address value.\n    pub fn get_one(&self, addr: u32) -> Cow<'_, str> {\n        match self.try_get_one(addr) {\n            Some(s) => Cow::Borrowed(s),\n            None => Self::default_label(addr),\n        }\n    }\n\n    /// Get all symbol, or a default label formed from the address value.\n    pub fn get_all(&self, addr: u32) -> impl Iterator<Item = Cow<'_, str>> {\n        static EMPTY: Vec<String> = Vec::new();\n        let elems = self.0.get(&addr).unwrap_or(&EMPTY);\n        let default = if elems.is_empty() {\n            Some(Self::default_label(addr))\n        } else {\n            None\n        };\n        elems\n            .iter()\n            .map(|s| Cow::Borrowed(s.as_str()))\n            .chain(default)\n    }\n\n    /// Returns a reference to the raw symbol table, mapping addresses to symbol names.\n    pub fn table(&self) -> &BTreeMap<u32, Vec<String>> {\n        &self.0\n    }\n\n    /// Returns a symbol at the address or at the first address before this one that has a symbol.\n    /// Also returns the offset of the provided address relative to that symbol.\n    pub fn try_get_one_or_preceding(&self, addr: u64) -> Option<(&str, u32)> {\n        let addr = u32::try_from(addr).unwrap();\n        self.0\n            .range(..=addr)\n            .last()\n            .and_then(|(a, v)| v.first().map(|s| (s.as_str(), addr - a)))\n    }\n\n    /// Return the inner table\n    pub fn into_table(self) -> BTreeMap<u32, Vec<String>> {\n        self.0\n    }\n\n    pub fn from_table(table: BTreeMap<u32, Vec<String>>) -> Self {\n        Self(table)\n    }\n}\n\nfn read_symbol_table(elf: &Elf) -> Vec<(String, u32)> {\n    elf.syms\n        .iter()\n        .filter_map(|sym| {\n            // We only care about global symbols that have string names, and are\n            // either functions or variables.\n            if sym.st_name != 0 && (sym.st_type() == STT_OBJECT || sym.st_type() == STT_FUNC) {\n                Some((elf.strtab[sym.st_name].to_owned(), sym.st_value as u32))\n            } else {\n                None\n            }\n        })\n        .collect()\n}\n\n/// Deduplicates by removing identical entries and appending the address to\n/// repeated names. The vector ends up sorted.\nfn dedup_names(symbols: &mut Vec<(String, u32)>) {\n    while dedup_names_pass(symbols) {}\n}\n\n/// Deduplicates the names of the symbols by appending one level of address to\n/// the name.\n///\n/// Returns `true` if the names were deduplicated.\nfn dedup_names_pass(symbols: &mut Vec<(String, u32)>) -> bool {\n    symbols.sort_unstable();\n    symbols.dedup();\n\n    let mut deduplicated = false;\n    let mut iter = symbols.iter_mut();\n\n    // The first different name defines a group, which ends on the next\n    // different name. The whole group is deduplicated if it contains more than\n    // one element.\n    let mut next_group = iter.next().map(|(name, address)| (name, *address));\n    while let Some((group_name, group_address)) = next_group {\n        let mut group_deduplicated = false;\n        next_group = None;\n\n        // Find duplicates and update names in the group\n        for (name, address) in &mut iter {\n            if name == group_name {\n                group_deduplicated = true;\n                deduplicated = true;\n                *name = format!(\"{name}_{address:08x}\");\n            } else {\n                next_group = Some((name, *address));\n                break;\n            }\n        }\n\n        // If there were duplicates in the group, update the group leader, too.\n        if group_deduplicated {\n            *group_name = format!(\"{group_name}_{group_address:08x}\");\n        }\n    }\n\n    deduplicated\n}\n\n#[cfg(test)]\nmod tests {\n    #[test]\n    fn dedup_names() {\n        let mut symbols = vec![\n            (\"baz\".to_string(), 0x8000),\n            (\"bar\".to_string(), 0x3000),\n            (\"foo\".to_string(), 0x1000),\n            (\"bar\".to_string(), 0x5000),\n            (\"foo\".to_string(), 0x2000),\n            (\"baz\".to_string(), 0x7000),\n            (\"baz\".to_string(), 0x9000),\n            (\"doo\".to_string(), 0x0042),\n            (\"baz\".to_string(), 0xa000),\n            (\"baz\".to_string(), 0x6000),\n            (\"bar\".to_string(), 0x4000),\n        ];\n\n        super::dedup_names(&mut symbols);\n\n        let expected = vec![\n            (\"bar_00003000\".to_string(), 0x3000),\n            (\"bar_00004000\".to_string(), 0x4000),\n            (\"bar_00005000\".to_string(), 0x5000),\n            (\"baz_00006000\".to_string(), 0x6000),\n            (\"baz_00007000\".to_string(), 0x7000),\n            (\"baz_00008000\".to_string(), 0x8000),\n            (\"baz_00009000\".to_string(), 0x9000),\n            (\"baz_0000a000\".to_string(), 0xa000),\n            (\"doo\".to_string(), 0x0042),\n            (\"foo_00001000\".to_string(), 0x1000),\n            (\"foo_00002000\".to_string(), 0x2000),\n        ];\n        assert_eq!(symbols, expected);\n\n        let mut symbols = vec![\n            (\"john\".to_string(), 0x42),\n            (\"john\".to_string(), 0x87),\n            (\"john\".to_string(), 0x1aa),\n            (\"john_000001aa\".to_string(), 0x1aa),\n            (\"john_00000042\".to_string(), 0x103),\n            (\"john_00000087\".to_string(), 0x103),\n        ];\n\n        super::dedup_names(&mut symbols);\n\n        let expected = vec![\n            (\"john_00000042_00000042\".to_string(), 0x42),\n            (\"john_00000042_00000103\".to_string(), 0x103),\n            (\"john_00000087_00000087\".to_string(), 0x87),\n            (\"john_00000087_00000103\".to_string(), 0x103),\n            (\"john_000001aa\".to_string(), 0x1aa),\n        ];\n\n        assert_eq!(symbols, expected);\n    }\n}\n"
  },
  {
    "path": "riscv-elf/src/lib.rs",
    "content": "use std::{\n    cell::Cell,\n    cmp::Ordering,\n    collections::{btree_map::Entry, BTreeMap, BTreeSet},\n    fs,\n    path::Path,\n};\n\nuse goblin::elf::{\n    header::{EI_CLASS, EI_DATA, ELFCLASS32, ELFDATA2LSB, EM_RISCV, ET_DYN},\n    program_header::PT_LOAD,\n    reloc::{R_RISCV_32, R_RISCV_HI20, R_RISCV_RELATIVE},\n    Elf, ProgramHeader,\n};\nuse itertools::{Either, Itertools};\nuse powdr_isa_utils::SingleDataValue;\nuse powdr_syscalls::Syscall;\nuse raki::{\n    decode::Decode,\n    instruction::{Extensions, Instruction as Ins, OpcodeKind as Op},\n    Isa,\n};\n\nuse powdr_riscv_types::{\n    self, InstructionArgs, MemEntry, Register, RiscVProgram, SourceFileInfo, Statement,\n};\n\npub mod debug_info;\npub mod rv64;\n\nuse self::debug_info::{DebugInfo, SymbolTable};\n\n/// The program header type (p_type) for Powdr prover data segments.\npub const PT_POWDR_PROVER_DATA: u32 = 0x600000da;\n\npub struct ElfProgram {\n    dbg: DebugInfo,\n    data_map: BTreeMap<u32, Data>,\n    text_labels: BTreeSet<u32>,\n    instructions: Vec<HighLevelInsn>,\n    prover_data_bounds: (u32, u32),\n    entry_point: u32,\n}\n\npub fn load_elf(file_name: &Path) -> ElfProgram {\n    log::info!(\"Loading ELF file: {}\", file_name.display());\n    let file_buffer = fs::read(file_name).unwrap();\n    load_elf_from_buffer(&file_buffer)\n}\n\npub fn load_elf_from_buffer(file_buffer: &[u8]) -> ElfProgram {\n    let elf = Elf::parse(file_buffer).unwrap();\n\n    // Assert the file is 32 bits.\n    assert_eq!(\n        elf.header.e_ident[EI_CLASS], ELFCLASS32,\n        \"Only 32-bit ELF files are supported!\"\n    );\n\n    // Assert the file is little-endian.\n    assert_eq!(\n        elf.header.e_ident[EI_DATA], ELFDATA2LSB,\n        \"Only little-endian ELF files are supported!\"\n    );\n\n    // Assert the file contains RISC-V code.\n    assert_eq!(\n        elf.header.e_machine, EM_RISCV,\n        \"Only RISC-V ELF files are supported!\"\n    );\n\n    // Assert this is either a PIE file, or that we have the relocation symbols\n    // available. This is needed because we have to lift all the references to\n    // code addresses into labels.\n    assert!(\n        elf.header.e_type == ET_DYN || !elf.shdr_relocs.is_empty(),\n        \"We can only translate PIE ELFs (-pie) or ELFs with relocation symbols (--emit-relocs).\"\n    );\n\n    // Map of addresses into memory sections, so we can know what address belong\n    // in what section.\n    let mut address_map = AddressMap(BTreeMap::new());\n    let mut prover_data_bounds = None;\n    for ph in elf.program_headers.iter() {\n        match ph.p_type {\n            PT_LOAD => {\n                address_map.0.insert(ph.p_vaddr as u32, ph);\n            }\n            PT_POWDR_PROVER_DATA => {\n                assert_eq!(\n                    prover_data_bounds, None,\n                    \"Only one prover data segment is supported!\"\n                );\n                prover_data_bounds =\n                    Some((ph.p_vaddr as u32, ph.p_vaddr as u32 + ph.p_memsz as u32));\n            }\n            _ => {}\n        }\n    }\n\n    // If no prover data segment was provided, make it empty.\n    let prover_data_bounds = prover_data_bounds.unwrap_or((0, 0));\n\n    // Set of R_RISCV_HI20 relocations, needed in non-PIE code to identify\n    // loading of absolute addresses to text.\n    let text_rellocs_set: BTreeSet<u32> = elf\n        .shdr_relocs\n        .iter()\n        .flat_map(|(_, r)| r.iter())\n        .filter(|r| r.r_type == R_RISCV_HI20)\n        .map(|r| r.r_offset as u32)\n        .collect();\n\n    // Keep a list of referenced text addresses, so we can generate the labels.\n    let mut referenced_text_addrs = BTreeSet::from([elf.entry as u32]);\n\n    // Find the text addresses referenced from text sections and load the data sections.\n    let mut data_map = BTreeMap::new();\n    for (&addr, &p) in address_map.0.iter() {\n        let section_data = &file_buffer[p.p_offset as usize..(p.p_offset + p.p_filesz) as usize];\n\n        if p.is_executable() {\n            search_text_addrs(\n                addr,\n                section_data,\n                &address_map,\n                &text_rellocs_set,\n                &mut referenced_text_addrs,\n            );\n        } else {\n            load_data_section(addr, section_data, &mut data_map);\n        }\n    }\n\n    // Lift all the references to text addresses in data sections, and add them\n    // to the set. How to do this depends on whether the file is PIE or not.\n    (if elf.header.e_type == ET_DYN {\n        pie_relocate_data_sections\n    } else {\n        static_relocate_data_sections\n    })(\n        &elf,\n        &address_map,\n        &mut data_map,\n        &mut referenced_text_addrs,\n    );\n\n    // Load all the text sections.\n    let mut lifted_text_sections = Vec::new();\n    for (&addr, &p) in address_map.0.iter().filter(|(_, p)| p.is_executable()) {\n        let section_data = &file_buffer[p.p_offset as usize..(p.p_offset + p.p_filesz) as usize];\n        let insns = lift_instructions(\n            addr,\n            section_data,\n            &address_map,\n            &text_rellocs_set,\n            &referenced_text_addrs,\n        );\n        if !insns.is_empty() {\n            lifted_text_sections.push(insns);\n        }\n    }\n\n    // Sort text sections by address and flatten them.\n    lifted_text_sections.sort_by_key(|insns| insns[0].loc.address);\n    let lifted_text_sections = lifted_text_sections\n        .into_iter()\n        .flatten()\n        .collect::<Vec<_>>();\n\n    // Try loading the debug information.\n    let debug_info = match debug_info::DebugInfo::new(\n        &elf,\n        file_buffer,\n        &address_map,\n        &|key| data_map.contains_key(&key),\n        &referenced_text_addrs,\n    ) {\n        Ok(debug_info) => {\n            log::info!(\"Debug information loaded successfully.\");\n            debug_info\n        }\n        Err(err) => {\n            match err {\n                debug_info::Error::NoDebugInfo => {\n                    log::info!(\"No DWARF debug information found.\")\n                }\n                err => {\n                    log::warn!(\"Error reading DWARF debug information: {err}\")\n                }\n            }\n            log::info!(\"Falling back to using ELF symbol table.\");\n\n            DebugInfo {\n                symbols: SymbolTable::new(&elf),\n                ..Default::default()\n            }\n        }\n    };\n\n    ElfProgram {\n        dbg: debug_info,\n        data_map,\n        text_labels: referenced_text_addrs,\n        instructions: lifted_text_sections,\n        entry_point: elf.entry as u32,\n        prover_data_bounds,\n    }\n}\n\nfn pie_relocate_data_sections(\n    elf: &Elf,\n    address_map: &AddressMap,\n    data_map: &mut BTreeMap<u32, Data>,\n    referenced_text_addrs: &mut BTreeSet<u32>,\n) {\n    // In PIE files, we can read the dynamic relocation table.\n    for r in elf.dynrelas.iter() {\n        let addr = r.r_offset as u32;\n        if !address_map.is_in_data_section(addr) {\n            unimplemented!(\"We assumed all dynamic relocations were data relocations!\");\n        }\n\n        // We only support the R_RISCV_RELATIVE relocation type:\n        assert_eq!(r.r_type, R_RISCV_RELATIVE, \"Unsupported relocation type!\");\n\n        let data_value = r.r_addend.unwrap() as u32;\n\n        if address_map.is_in_text_section(data_value) {\n            data_map.insert(addr, Data::TextLabel(data_value));\n\n            // We also need to add the referenced address to the list of text\n            // addresses, so we can generate the label.\n            referenced_text_addrs.insert(data_value);\n        } else {\n            data_map.insert(addr, Data::Value(data_value));\n        }\n    }\n\n    assert_eq!(elf.dynrels.len(), 0, \"Unsupported relocation type!\");\n}\n\nfn static_relocate_data_sections(\n    elf: &Elf,\n    address_map: &AddressMap,\n    data_map: &mut BTreeMap<u32, Data>,\n    referenced_text_addrs: &mut BTreeSet<u32>,\n) {\n    // In non-PIE files, we need to use the linking relocation table.\n    for r in elf.shdr_relocs.iter().flat_map(|(_, relocs)| relocs.iter()) {\n        let addr = r.r_offset as u32;\n        if !address_map.is_in_data_section(addr) {\n            // Relocation of the text section has already been handled in instruction lifting.\n            continue;\n        }\n\n        // We only support the R_RISCV_32 relocation type for the data section:\n        assert_eq!(r.r_type, R_RISCV_32, \"Unsupported relocation type!\");\n\n        let Entry::Occupied(mut entry) = data_map.entry(r.r_offset as u32) else {\n            panic!(\"Unexpected 0 in relocated data entry!\");\n        };\n\n        let Data::Value(original_addr) = *entry.get() else {\n            panic!(\"Related entry already replaced with a label!\");\n        };\n\n        if address_map.is_in_text_section(original_addr) {\n            entry.insert(Data::TextLabel(original_addr));\n\n            // We also need to add the referenced address to the list of text\n            // addresses, so we can generate the label.\n            referenced_text_addrs.insert(original_addr);\n        }\n    }\n}\n\nimpl ElfProgram {\n    pub fn debug_info(&self) -> &DebugInfo {\n        &self.dbg\n    }\n\n    pub fn text_labels(&self) -> &BTreeSet<u32> {\n        &self.text_labels\n    }\n}\n\nimpl RiscVProgram for ElfProgram {\n    fn take_source_files_info(&mut self) -> impl Iterator<Item = SourceFileInfo<'_>> {\n        self.dbg\n            .file_list\n            .iter()\n            .enumerate()\n            .map(|(id, (dir, file))| SourceFileInfo {\n                // +1 because files are indexed from 1\n                id: id as u32 + 1,\n                file,\n                dir,\n            })\n    }\n\n    fn take_initial_mem(&mut self) -> impl Iterator<Item = MemEntry> {\n        self.data_map.iter().map(|(addr, data)| {\n            let value = match data {\n                Data::TextLabel(label) => {\n                    SingleDataValue::LabelReference(self.dbg.symbols.get_one(*label).into())\n                }\n                Data::Value(value) => SingleDataValue::Value(*value),\n            };\n\n            let label = self\n                .dbg\n                .notes\n                .get(addr)\n                .map(|note| note.as_str())\n                .or_else(|| self.dbg.symbols.try_get_one(*addr))\n                .map(|s| s.to_string());\n\n            MemEntry {\n                label,\n                addr: *addr,\n                value,\n            }\n        })\n    }\n\n    fn take_executable_statements(\n        &mut self,\n    ) -> impl Iterator<Item = Statement<'_, impl AsRef<str>, impl InstructionArgs>> {\n        // In the output, the precedence is labels, locations, and then instructions.\n        // We merge the 3 iterators with this operations: merge(labels, merge(locs, instructions)), where each is sorted by address.\n\n        // First the inner merge: locs and instructions.\n        let locs = self.dbg.source_locations.iter();\n        let instructions = self.instructions.iter();\n        let locs_and_instructions = locs\n            .map(|loc| (Cell::new(0), loc))\n            .merge_join_by(instructions, |next_loc, next_insn| {\n                assert!(\n                    next_loc.1.address >= next_insn.loc.address,\n                    \"Debug location {:08x} doesn't match instruction address!\",\n                    next_loc.1.address\n                );\n                if next_loc.1.address < next_insn.loc.address + next_insn.loc.size {\n                    next_loc.0.set(next_insn.loc.address);\n                    true\n                } else {\n                    false\n                }\n            })\n            .map(|result| match result {\n                // Extract the address from the Either, for easier comparison in the next step.\n                Either::Left((address, loc)) => (address.get(), Either::Left(loc)),\n                Either::Right(insn) => (insn.loc.address, Either::Right(insn)),\n            });\n\n        // Now the outer merge: labels and locs_and_instructions.\n        let labels = self.text_labels.iter();\n        labels\n            .merge_join_by(\n                locs_and_instructions,\n                |&label_addr, (right_addr, _)| match label_addr.cmp(right_addr) {\n                    Ordering::Less => panic!(\"Label {label_addr:08x} doesn't match exact address!\"),\n                    Ordering::Equal => true,\n                    Ordering::Greater => false,\n                },\n            )\n            .flat_map(|result| -> Box<dyn Iterator<Item = _>> {\n                match result {\n                    Either::Left(label) => {\n                        Box::new(self.dbg.symbols.get_all(*label).map(Statement::Label))\n                    }\n                    Either::Right((_, Either::Left(loc))) => {\n                        Box::new(std::iter::once(Statement::DebugLoc {\n                            file: loc.file,\n                            line: loc.line,\n                            col: loc.col,\n                        }))\n                    }\n                    Either::Right((_, Either::Right(insn))) => {\n                        Box::new(std::iter::once(Statement::Instruction {\n                            op: insn.op,\n                            args: WrappedArgs {\n                                args: &insn.args,\n                                symbol_table: &self.dbg.symbols,\n                            },\n                        }))\n                    }\n                }\n            })\n    }\n\n    fn prover_data_bounds(&self) -> (u32, u32) {\n        self.prover_data_bounds\n    }\n\n    fn start_function(&self) -> impl AsRef<str> {\n        self.dbg.symbols.get_one(self.entry_point)\n    }\n}\n\n/// The instruction arguments for code generation. Needs the symbol table to\n/// translate addresses to labels in the output code.\nstruct WrappedArgs<'a> {\n    args: &'a HighLevelArgs,\n    symbol_table: &'a SymbolTable,\n}\n\nimpl InstructionArgs for WrappedArgs<'_> {\n    type Error = String;\n\n    fn l(&self) -> Result<impl AsRef<str>, Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::CodeLabel(addr),\n                rd: None,\n                rs1: None,\n                rs2: None,\n            } => Ok(self.symbol_table.get_one(*addr).to_string()),\n            _ => Err(format!(\"Expected: label, got {:?}\", self.args)),\n        }\n    }\n\n    fn r(&self) -> Result<Register, Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::None,\n                rd: None,\n                rs1: Some(rs1),\n                rs2: None,\n            } => Ok(Register::new(*rs1 as u8)),\n            _ => Err(format!(\"Expected: rs1, got {:?}\", self.args)),\n        }\n    }\n\n    fn rri(&self) -> Result<(Register, Register, u32), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::Value(imm),\n                rd: Some(rd),\n                rs1: Some(rs1),\n                rs2: None,\n            } => Ok((\n                Register::new(*rd as u8),\n                Register::new(*rs1 as u8),\n                *imm as u32,\n            )),\n            _ => Err(format!(\"Expected: rd, rs1, imm, got {:?}\", self.args)),\n        }\n    }\n\n    fn rrr(&self) -> Result<(Register, Register, Register), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::None,\n                rd: Some(rd),\n                rs1: Some(rs1),\n                rs2: Some(rs2),\n            } => Ok((\n                Register::new(*rd as u8),\n                Register::new(*rs1 as u8),\n                Register::new(*rs2 as u8),\n            )),\n            _ => Err(format!(\"Expected: rd, rs1, rs2, got {:?}\", self.args)),\n        }\n    }\n\n    fn rrr2(&self) -> Result<(Register, Register, Register), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::None,\n                rd: Some(rd),\n                rs1: Some(rs1),\n                rs2: Some(rs2),\n            } => Ok((\n                Register::new(*rd as u8),\n                Register::new(*rs2 as u8),\n                Register::new(*rs1 as u8),\n            )),\n            _ => Err(format!(\"Expected: rd, rs2, rs1, got {:?}\", self.args)),\n        }\n    }\n\n    fn ri(&self) -> Result<(Register, u32), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::Value(imm),\n                rd: Some(rd),\n                rs1: None,\n                rs2: None,\n            } => Ok((Register::new(*rd as u8), *imm as u32)),\n            _ => Err(format!(\"Expected: rd, imm, got {:?}\", self.args)),\n        }\n    }\n\n    fn rr(&self) -> Result<(Register, Register), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::None,\n                rd: Some(rd),\n                rs1: Some(rs1),\n                rs2: None,\n            } => Ok((Register::new(*rd as u8), Register::new(*rs1 as u8))),\n            _ => Err(format!(\"Expected: rd, rs1, got {:?}\", self.args)),\n        }\n    }\n\n    fn rrl(\n        &self,\n    ) -> Result<(Register, Register, impl AsRef<str>), <Self as InstructionArgs>::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::CodeLabel(addr),\n                rd: None,\n                rs1: Some(rs1),\n                rs2: Some(rs2),\n            } => Ok((\n                Register::new(*rs1 as u8),\n                Register::new(*rs2 as u8),\n                self.symbol_table.get_one(*addr).to_string(),\n            )),\n            _ => Err(format!(\"Expected: rs1, rs2, label, got {:?}\", self.args)),\n        }\n    }\n\n    fn rl(&self) -> Result<(Register, impl AsRef<str>), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::CodeLabel(addr),\n                rd: None,\n                rs1: Some(rs1),\n                rs2: None,\n            } => Ok((\n                Register::new(*rs1 as u8),\n                self.symbol_table.get_one(*addr).to_string(),\n            )),\n            HighLevelArgs {\n                imm: HighLevelImmediate::CodeLabel(addr),\n                rd: Some(rd),\n                rs1: None,\n                rs2: None,\n            } => Ok((\n                Register::new(*rd as u8),\n                self.symbol_table.get_one(*addr).into(),\n            )),\n            _ => Err(format!(\"Expected: {{rs1|rd}}, label, got {:?}\", self.args)),\n        }\n    }\n\n    fn rro(&self) -> Result<(Register, Register, u32), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::Value(imm),\n                rd: Some(rd),\n                rs1: Some(rs1),\n                rs2: None,\n            } => Ok((\n                Register::new(*rd as u8),\n                Register::new(*rs1 as u8),\n                *imm as u32,\n            )),\n            HighLevelArgs {\n                imm: HighLevelImmediate::Value(imm),\n                rd: None,\n                rs1: Some(rs1),\n                rs2: Some(rs2),\n            } => Ok((\n                Register::new(*rs2 as u8),\n                Register::new(*rs1 as u8),\n                *imm as u32,\n            )),\n            _ => Err(format!(\n                \"Expected: {{rd, rs1 | rs2, rs1}}, imm, got {:?}\",\n                self.args\n            )),\n        }\n    }\n\n    fn empty(&self) -> Result<(), Self::Error> {\n        match self.args {\n            HighLevelArgs {\n                imm: HighLevelImmediate::None,\n                rd: None,\n                rs1: None,\n                rs2: None,\n            } => Ok(()),\n            _ => Err(format!(\"Expected: no args, got {:?}\", self.args)),\n        }\n    }\n}\n\n/// Indexes the program sections by their virtual address.\n///\n/// Allows for querying if an address is in a data or text section.\npub struct AddressMap<'a>(BTreeMap<u32, &'a ProgramHeader>);\n\nimpl AddressMap<'_> {\n    fn is_in_data_section(&self, addr: u32) -> bool {\n        self.get_section_of_addr(addr)\n            .is_some_and(|section| !section.is_executable())\n    }\n\n    fn is_in_text_section(&self, addr: u32) -> bool {\n        self.get_section_of_addr(addr)\n            .is_some_and(ProgramHeader::is_executable)\n    }\n\n    fn get_section_of_addr(&self, addr: u32) -> Option<&ProgramHeader> {\n        // Get the latest section that starts before the address.\n        let section = self\n            .0\n            .range(..=addr)\n            .next_back()\n            .map(|(_, &section)| section)?;\n\n        if addr > section.p_vaddr as u32 + section.p_memsz as u32 {\n            // The address is after the end of the section.\n            None\n        } else {\n            Some(section)\n        }\n    }\n}\n\n#[derive(Debug)]\nenum Data {\n    TextLabel(u32),\n    Value(u32),\n}\n\nfn load_data_section(mut addr: u32, data: &[u8], data_map: &mut BTreeMap<u32, Data>) {\n    for word in data.chunks(4) {\n        let mut padded = [0; 4];\n        padded[..word.len()].copy_from_slice(word);\n\n        let value = u32::from_le_bytes(padded);\n        if value != 0 {\n            data_map.insert(addr, Data::Value(value));\n        } else {\n            // We don't need to store zero values, as they are implicit.\n        }\n\n        addr += 4;\n    }\n}\n\nenum UnimpOrInstruction {\n    Unimp16,\n    Unimp32,\n    Instruction(Ins),\n}\n\nimpl UnimpOrInstruction {\n    fn len(&self) -> u32 {\n        match self {\n            UnimpOrInstruction::Unimp16 => 2,\n            UnimpOrInstruction::Unimp32 => 4,\n            UnimpOrInstruction::Instruction(ins) => match ins.extension {\n                Extensions::C => 2,\n                _ => 4,\n            },\n        }\n    }\n}\n\nstruct MaybeInstruction {\n    address: u32,\n    insn: UnimpOrInstruction,\n}\n\n#[derive(Debug)]\nenum HighLevelImmediate {\n    None,\n    CodeLabel(u32),\n    Value(i32),\n}\n\n#[derive(Debug)]\nstruct HighLevelArgs {\n    rd: Option<u32>,\n    rs1: Option<u32>,\n    rs2: Option<u32>,\n    imm: HighLevelImmediate,\n}\n\n/// The default args are all empty.\nimpl Default for HighLevelArgs {\n    fn default() -> Self {\n        HighLevelArgs {\n            rd: None,\n            rs1: None,\n            rs2: None,\n            imm: HighLevelImmediate::None,\n        }\n    }\n}\n\n#[derive(Debug)]\nstruct Location {\n    address: u32,\n    size: u32,\n}\n\n#[derive(Debug)]\nstruct HighLevelInsn {\n    loc: Location,\n    op: &'static str,\n    args: HighLevelArgs,\n}\n\nenum ReadOrWrite<'a, T> {\n    Read(&'a T),\n    Write(&'a mut T),\n}\n\nstruct InstructionLifter<'a> {\n    rellocs_set: &'a BTreeSet<u32>,\n    address_map: &'a AddressMap<'a>,\n    referenced_text_addrs: ReadOrWrite<'a, BTreeSet<u32>>,\n}\n\nimpl InstructionLifter<'_> {\n    fn composed_immediate(\n        &self,\n        hi: i32,\n        lo: i32,\n        rd_ui: usize,\n        rd_addi: usize,\n        insn2_addr: u32,\n        is_address: bool,\n    ) -> Option<(&'static str, HighLevelArgs)> {\n        let immediate = hi.wrapping_add(lo);\n\n        let is_ref_to_text = is_address && self.address_map.is_in_text_section(immediate as u32) &&\n            // This is very sad: sometimes the global pointer lands in the\n            // middle of the text section, so we have to make an exception when\n            // setting the gp (x3).\n            rd_addi != 3;\n\n        let (op, imm) = if is_ref_to_text {\n            // If rd_ui != rd_addi, we don't set rd_ui, thus our behavior is not\n            // conformant, but it is probably fine for compiler generated code,\n            // and it has worked so far.\n            (\"la\", HighLevelImmediate::CodeLabel(immediate as u32))\n        } else if rd_ui == rd_addi {\n            if let ReadOrWrite::Read(referenced_text_addrs) = &self.referenced_text_addrs {\n                if referenced_text_addrs.contains(&insn2_addr) {\n                    // We can't join the two instructions because there is a\n                    // jump to the second. Let each one be handled separately.\n                    return None;\n                }\n            }\n            (\"li\", HighLevelImmediate::Value(immediate))\n        } else {\n            // This pair of instructions leaks rd_ui. Since this is not a\n            // reference to text, we can afford to be more conformant and handle\n            // each instruction separately.\n            return None;\n        };\n\n        Some((\n            op,\n            HighLevelArgs {\n                rd: Some(rd_ui as u32),\n                imm,\n                ..Default::default()\n            },\n        ))\n    }\n}\n\nimpl TwoOrOneMapper<MaybeInstruction, HighLevelInsn> for InstructionLifter<'_> {\n    fn try_map_two(\n        &mut self,\n        insn1: &MaybeInstruction,\n        insn2: &MaybeInstruction,\n    ) -> Option<HighLevelInsn> {\n        use UnimpOrInstruction::Instruction as I;\n\n        let loc = Location {\n            address: insn1.address,\n            size: insn1.insn.len() + insn2.insn.len(),\n        };\n        let insn2_addr = insn2.address;\n        let (I(insn1), I(insn2)) = (&insn1.insn, &insn2.insn) else {\n            return None;\n        };\n\n        let result = match (insn1, insn2) {\n            (\n                // li rd, immediate\n                Ins {\n                    opc: Op::LUI,\n                    rd: Some(rd_lui),\n                    imm: Some(hi),\n                    ..\n                },\n                Ins {\n                    opc: Op::ADDI,\n                    rd: Some(rd_addi),\n                    rs1: Some(rs1_addi),\n                    imm: Some(lo),\n                    ..\n                },\n            ) if rd_lui == rs1_addi => {\n                // Sometimes, in non-PIE code, this pair of instructions is used\n                // to load an address into a register. We must check if this is\n                // the case, and if the address points to a text section, we\n                // must load it from a label.\n                let is_address = self.rellocs_set.contains(&loc.address);\n                let (op, args) =\n                    self.composed_immediate(*hi, *lo, *rd_lui, *rd_addi, insn2_addr, is_address)?;\n\n                HighLevelInsn { op, args, loc }\n            }\n            (\n                // inline-able system call:\n                //   addi t0, x0, immediate\n                //   ecall\n                Ins {\n                    opc: Op::ADDI,\n                    rd: Some(5),\n                    rs1: Some(0),\n                    imm: Some(opcode),\n                    ..\n                },\n                Ins { opc: Op::ECALL, .. },\n            ) => {\n                // If this is not a know system call, we just let the executor deal with the problem.\n                let syscall = u8::try_from(*opcode)\n                    .ok()\n                    .and_then(|opcode| Syscall::try_from(opcode).ok())?;\n\n                HighLevelInsn {\n                    loc,\n                    op: syscall.name(),\n                    args: Default::default(),\n                }\n            }\n            (\n                // All other double instructions we can lift start with auipc.\n                Ins {\n                    opc: Op::AUIPC,\n                    rd: Some(rd_auipc),\n                    imm: Some(hi),\n                    ..\n                },\n                insn2,\n            ) => {\n                let hi = hi.wrapping_add(loc.address as i32);\n                match insn2 {\n                    // la rd, symbol\n                    Ins {\n                        opc: Op::ADDI,\n                        rd: Some(rd_addi),\n                        rs1: Some(rs1_addi),\n                        imm: Some(lo),\n                        ..\n                    } if rd_auipc == rs1_addi => {\n                        // AUIPC obviously always refer to an address.\n                        const IS_ADDRESS: bool = true;\n                        let (op, args) = self.composed_immediate(\n                            hi, *lo, *rd_auipc, *rd_addi, insn2_addr, IS_ADDRESS,\n                        )?;\n\n                        HighLevelInsn { op, args, loc }\n                    }\n                    // l{b|h|w}[u] rd, symbol\n                    Ins {\n                        opc: l_op,\n                        rd: Some(rd_l),\n                        rs1: Some(rs1_l),\n                        rs2: None,\n                        imm: Some(lo),\n                        ..\n                    } if matches!(l_op, Op::LB | Op::LH | Op::LW | Op::LBU | Op::LHU)\n                        && rd_auipc == rd_l\n                        && rd_l == rs1_l =>\n                    {\n                        // We don't support code introspection, so it is better\n                        // to panic if this is the case:\n                        let addr = hi.wrapping_add(*lo);\n                        assert!(!self.address_map.is_in_text_section(addr as u32));\n\n                        HighLevelInsn {\n                            op: l_op.to_string(),\n                            args: HighLevelArgs {\n                                rd: Some(*rd_l as u32),\n                                rs1: Some(0), // this is x0 because the entire address is in the immediate\n                                imm: HighLevelImmediate::Value(addr),\n                                ..Default::default()\n                            },\n                            loc,\n                        }\n                    }\n                    // s{b|h|w} rd, symbol, rt\n                    Ins {\n                        opc: l_op,\n                        rd: None,\n                        rs1: Some(rt_l),\n                        rs2: Some(_),\n                        imm: Some(lo),\n                        ..\n                    } if matches!(l_op, Op::SB | Op::SH | Op::SW) && rd_auipc == rt_l => {\n                        // We don't support code modification, so it is better\n                        // to panic if this is the case:\n                        let addr = hi.wrapping_add(*lo);\n                        assert!(!self.address_map.is_in_text_section(addr as u32));\n\n                        // Otherwise, this is a data store instruction. To be\n                        // more conformant, it is better to let two\n                        // instructions be handled separately.\n                        return None;\n                    }\n                    // call offset\n                    Ins {\n                        opc: Op::JALR,\n                        rd: Some(link_reg),\n                        rs1: Some(hi_reg),\n                        rs2: None,\n                        imm: Some(lo),\n                        ..\n                    } if rd_auipc == hi_reg && hi_reg == link_reg => HighLevelInsn {\n                        op: \"jal\",\n                        args: HighLevelArgs {\n                            imm: HighLevelImmediate::CodeLabel(hi.wrapping_add(*lo) as u32),\n                            rd: Some(*link_reg as u32),\n                            ..Default::default()\n                        },\n                        loc,\n                    },\n                    // tail offset\n                    Ins {\n                        opc: Op::JALR,\n                        rd: Some(0),\n                        rs1: Some(6),\n                        rs2: None,\n                        imm: Some(lo),\n                        ..\n                    } if *rd_auipc == 6 => HighLevelInsn {\n                        op: \"tail\",\n                        args: HighLevelArgs {\n                            imm: HighLevelImmediate::CodeLabel(hi.wrapping_add(*lo) as u32),\n                            ..Default::default()\n                        },\n                        loc,\n                    },\n                    _ => {\n                        panic!(\n                            \"Unexpected instruction after AUIPC: {insn2:?} at {:08x}\",\n                            loc.address\n                        );\n                    }\n                }\n            }\n            _ => return None,\n        };\n\n        // TODO: implement here other kinds of RISC-V fusions as optimization.\n\n        if let (ReadOrWrite::Write(refs), HighLevelImmediate::CodeLabel(addr)) =\n            (&mut self.referenced_text_addrs, &result.args.imm)\n        {\n            refs.insert(*addr);\n        }\n\n        Some(result)\n    }\n\n    fn map_one(&mut self, insn: MaybeInstruction) -> HighLevelInsn {\n        let loc = Location {\n            address: insn.address,\n            size: insn.insn.len(),\n        };\n        let UnimpOrInstruction::Instruction(insn) = insn.insn else {\n            return HighLevelInsn {\n                op: \"unimp\",\n                args: Default::default(),\n                loc,\n            };\n        };\n\n        let mut imm = match insn.opc {\n            // All jump instructions that have an address as immediate\n            Op::JAL | Op::BEQ | Op::BNE | Op::BLT | Op::BGE | Op::BLTU | Op::BGEU => {\n                let addr = (insn.imm.unwrap() + loc.address as i32) as u32;\n                if let ReadOrWrite::Write(refs) = &mut self.referenced_text_addrs {\n                    refs.insert(addr);\n                }\n\n                HighLevelImmediate::CodeLabel(addr)\n            }\n            // We currently only support standalone jalr if offset is zero\n            Op::JALR => {\n                assert!(\n                    insn.imm.unwrap() == 0,\n                    \"jalr with non-zero offset is not supported\"\n                );\n\n                HighLevelImmediate::Value(0)\n            }\n            // LUI is special because the decoder already shifts the immediate,\n            // but the code gen expects it unshifted, so we have to undo.\n            Op::LUI => HighLevelImmediate::Value(insn.imm.unwrap() >> 12),\n            // We don't support arbitrary AUIPCs, but it is trivial to transform\n            // one to an LI. If it passed the two-by-two transformation and got\n            // here, this is a reference to data, so it is safe to transform it.\n            Op::AUIPC => {\n                return HighLevelInsn {\n                    op: \"li\",\n                    args: HighLevelArgs {\n                        rd: insn.rd.map(|x| x as u32),\n                        imm: HighLevelImmediate::Value(\n                            insn.imm.unwrap().wrapping_add(loc.address as i32),\n                        ),\n                        ..Default::default()\n                    },\n                    loc,\n                };\n            }\n            // All other instructions, which have the immediate as a value\n            _ => match insn.imm {\n                Some(imm) => HighLevelImmediate::Value(imm),\n                None => HighLevelImmediate::None,\n            },\n        };\n\n        // The acquire and release bits of an atomic instructions are decoded as\n        // the immediate value, but we don't need the bits and an immediate is\n        // not expected, so we must remove it.\n        if let Extensions::A = insn.extension {\n            imm = HighLevelImmediate::None;\n        }\n\n        // TODO: lift other instructions to their pseudoinstructions,\n        // because they can have simplified implementations (like the\n        // branch-zero variants and add to x0).\n\n        HighLevelInsn {\n            op: insn.opc.to_string(),\n            args: HighLevelArgs {\n                rd: insn.rd.map(|x| x as u32),\n                rs1: insn.rs1.map(|x| x as u32),\n                rs2: insn.rs2.map(|x| x as u32),\n                imm,\n            },\n            loc,\n        }\n    }\n}\n\n/// Find all the references to text addresses in the instructions and add them\n/// to the set.\nfn search_text_addrs(\n    base_addr: u32,\n    data: &[u8],\n    address_map: &AddressMap,\n    rellocs_set: &BTreeSet<u32>,\n    referenced_text_addrs: &mut BTreeSet<u32>,\n) {\n    try_map_two_by_two(\n        RiscVInstructionIterator::new(base_addr, data),\n        InstructionLifter {\n            rellocs_set,\n            address_map,\n            referenced_text_addrs: ReadOrWrite::Write(referenced_text_addrs),\n        },\n    );\n}\n\n/// Lift the instructions back to higher-level instructions.\n///\n/// Turn addresses into labels and merge instructions into\n/// pseudoinstructions.\nfn lift_instructions(\n    base_addr: u32,\n    data: &[u8],\n    address_map: &AddressMap,\n    rellocs_set: &BTreeSet<u32>,\n    referenced_text_addrs: &BTreeSet<u32>,\n) -> Vec<HighLevelInsn> {\n    try_map_two_by_two(\n        RiscVInstructionIterator::new(base_addr, data),\n        InstructionLifter {\n            rellocs_set,\n            address_map,\n            referenced_text_addrs: ReadOrWrite::Read(referenced_text_addrs),\n        },\n    )\n}\n\nstruct RiscVInstructionIterator<'a> {\n    curr_address: u32,\n    remaining_data: &'a [u8],\n}\n\nimpl RiscVInstructionIterator<'_> {\n    fn new(base_addr: u32, data: &[u8]) -> RiscVInstructionIterator<'_> {\n        RiscVInstructionIterator {\n            curr_address: base_addr,\n            remaining_data: data,\n        }\n    }\n}\n\nimpl Iterator for RiscVInstructionIterator<'_> {\n    type Item = MaybeInstruction;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.remaining_data.is_empty() {\n            return None;\n        }\n\n        // Decide if the next instruction is 32 bits or 16 bits (\"C\" extension):\n        let advance;\n        let maybe_insn;\n        if self.remaining_data[0] & 0b11 == 0b11 {\n            // 32 bits\n            advance = 4;\n            let insn = u32::from_le_bytes(\n                self.remaining_data[0..4]\n                    .try_into()\n                    .expect(\"Not enough bytes to complete a 32-bit instruction\"),\n            )\n            .decode(Isa::Rv32);\n\n            // When C extension is disabled, both LLVM and GNU binutils uses the\n            // privileged instruction CSRRW to represent the `unimp` mnemonic.\n            // https://groups.google.com/a/groups.riscv.org/g/sw-dev/c/Xu6UmcIAKIk/m/piJEHdBlAAAJ\n            //\n            // We must handle this case here.\n\n            let insn = if let Ok(insn) = insn {\n                if matches!(insn.opc, Op::CSRRW) {\n                    UnimpOrInstruction::Unimp32\n                } else {\n                    UnimpOrInstruction::Instruction(insn)\n                }\n            } else {\n                UnimpOrInstruction::Unimp32\n            };\n\n            maybe_insn = MaybeInstruction {\n                address: self.curr_address,\n                insn,\n            };\n        } else {\n            // 16 bits\n            advance = 2;\n            let bin_instruction = u16::from_le_bytes(\n                self.remaining_data[0..2]\n                    .try_into()\n                    .expect(\"Not enough bytes to complete a 16-bit instruction\"),\n            );\n            maybe_insn = MaybeInstruction {\n                address: self.curr_address,\n                insn: match bin_instruction.decode(Isa::Rv32) {\n                    Ok(c_insn) => UnimpOrInstruction::Instruction(to_32bit_equivalent(c_insn)),\n                    Err(raki::decode::DecodingError::IllegalInstruction) => {\n                        // Although not a real RISC-V instruction, sometimes 0x0000\n                        // is used on purpose as an illegal instruction (it even has\n                        // its own mnemonic \"unimp\"), so we support it here.\n                        // Otherwise, there is something more fishy going on, and we\n                        // panic.\n\n                        // TODO: maybe we should just emit `unimp` for every unknown.\n                        assert_eq!(\n                            bin_instruction, 0,\n                            \"Failed to decode 16-bit instruction at {:08x}\",\n                            self.curr_address\n                        );\n                        UnimpOrInstruction::Unimp16\n                    }\n                    Err(err) => panic!(\n                        \"Unexpected decoding error at {:08x}: {err:?}\",\n                        self.curr_address\n                    ),\n                },\n            };\n        }\n\n        // Advance the address and the data\n        self.curr_address += advance;\n        self.remaining_data = &self.remaining_data[advance as usize..];\n\n        Some(maybe_insn)\n    }\n}\n\n/// Translates an extension \"C\" instruction to the equivalent 32-bit instruction.\nfn to_32bit_equivalent(mut insn: Ins) -> Ins {\n    let new_opc = match insn.opc {\n        Op::C_LW => Op::LW,\n        Op::C_SW => Op::SW,\n        Op::C_NOP => {\n            return Ins {\n                opc: Op::ADDI,\n                rd: Some(0),\n                rs1: Some(0),\n                ..insn\n            }\n        }\n        Op::C_ADDI | Op::C_ADDI16SP => Op::ADDI,\n        Op::C_ADDI4SPN => {\n            return Ins {\n                opc: Op::ADDI,\n                rs1: Some(2), // add to x2 (stack pointer)\n                ..insn\n            };\n        }\n        Op::C_LI => {\n            return Ins {\n                opc: Op::ADDI,\n                rs1: Some(0),\n                ..insn\n            }\n        }\n        Op::C_JAL => {\n            return Ins {\n                opc: Op::JAL,\n                rd: Some(1), // output to x1 (return address)\n                ..insn\n            };\n        }\n        Op::C_LUI => Op::LUI,\n        Op::C_SRLI => Op::SRLI,\n        Op::C_SRAI => Op::SRAI,\n        Op::C_ANDI => Op::ANDI,\n        Op::C_SUB => Op::SUB,\n        Op::C_XOR => Op::XOR,\n        Op::C_OR => Op::OR,\n        Op::C_AND => Op::AND,\n        Op::C_J => {\n            return Ins {\n                opc: Op::JAL,\n                rd: Some(0), // discard output\n                ..insn\n            };\n        }\n        Op::C_BEQZ => {\n            return Ins {\n                opc: Op::BEQ,\n                rs2: Some(0), // compare with zero\n                ..insn\n            };\n        }\n        Op::C_BNEZ => {\n            return Ins {\n                opc: Op::BNE,\n                rs2: Some(0), // compare with zero\n                ..insn\n            };\n        }\n        Op::C_SLLI => Op::SLLI,\n        Op::C_LWSP => {\n            return Ins {\n                opc: Op::LW,\n                rs1: Some(2), // load relative to x2 (stack pointer)\n                ..insn\n            };\n        }\n        Op::C_JR => {\n            return Ins {\n                opc: Op::JALR,\n                // discard the return address:\n                rd: Some(0),\n                // There is a binary value for rs2 in C.JR (set to 0), which is\n                // returned by the decoder, but there isn't an equivalent to the\n                // expanded JALR instruction, so we must set None here:\n                rs2: None,\n                imm: Some(0),\n                ..insn\n            };\n        }\n        Op::C_MV => {\n            return Ins {\n                opc: Op::ADD,\n                rs1: Some(0), // add to zero\n                ..insn\n            };\n        }\n        Op::C_EBREAK => Op::EBREAK,\n        Op::C_JALR => {\n            return Ins {\n                opc: Op::JALR,\n                // output to x1 (return address):\n                rd: Some(1),\n                // There is a binary value for rs2 in C.JALR (set to 0), which\n                // is returned by the decoder, but there isn't an equivalent to\n                // the expanded JALR instruction, so we must set None here:\n                rs2: None,\n                imm: Some(0), // jump to the exact address\n                ..insn\n            };\n        }\n        Op::C_ADD => Op::ADD,\n        Op::C_SWSP => {\n            return Ins {\n                opc: Op::SW,\n                rs1: Some(2), // store relative to x2 (stack pointer)\n                ..insn\n            };\n        }\n        Op::C_LD | Op::C_SD | Op::C_ADDIW | Op::C_SUBW | Op::C_ADDW | Op::C_LDSP | Op::C_SDSP => {\n            unreachable!(\"not a riscv32 instruction\")\n        }\n        _ => unreachable!(\"not a RISC-V \\\"C\\\" extension instruction\"),\n    };\n\n    insn.opc = new_opc;\n    insn\n}\n\n/// Helper trait for function `try_map_two_by_two`.\n///\n/// Provides the methods to try to map two elements into one first, and one to\n/// one as fallback.\ntrait TwoOrOneMapper<E, R> {\n    /// Tries to map two elements into one. If it fails, `map_one` is called.\n    fn try_map_two(&mut self, first: &E, second: &E) -> Option<R>;\n    /// Maps one element individually. This one can not fail.\n    fn map_one(&mut self, element: E) -> R;\n}\n\n/// Takes an iterator, and maps the elements two by two. If fails, maps\n/// individually.\n///\n/// TODO: this would be more elegant as a generator, but they are unstable.\nfn try_map_two_by_two<E, R>(\n    input: impl Iterator<Item = E>,\n    mut mapper: impl TwoOrOneMapper<E, R>,\n) -> Vec<R> {\n    let mut result = Vec::new();\n    let mut iter = input.peekable();\n\n    while let Some(first) = iter.next() {\n        if let Some(second) = iter.peek() {\n            if let Some(mapped) = mapper.try_map_two(&first, second) {\n                result.push(mapped);\n                iter.next();\n            } else {\n                result.push(mapper.map_one(first));\n            }\n        } else {\n            result.push(mapper.map_one(first));\n        }\n    }\n\n    result\n}\n"
  },
  {
    "path": "riscv-elf/src/rv64.rs",
    "content": "use std::collections::BTreeSet;\nuse std::fs;\nuse std::path::Path;\n\nuse goblin::elf::{\n    header::{EI_CLASS, EI_DATA, ELFCLASS64, ELFDATA2LSB, EM_RISCV},\n    Elf,\n};\nuse raki::{decode::Decode, instruction::OpcodeKind as Op, Isa};\n\n/// Information about a jump destination\n#[derive(Debug, Clone)]\npub struct JumpDest {\n    /// The instruction address that generates this jump\n    pub from_addr: u64,\n    /// The instruction that generates this jump\n    pub instruction: String,\n}\n\n/// Minimal RV64 ELF program representation for label/jumpdest collection\npub struct Rv64Labels {\n    /// All text labels and jump destinations\n    pub jumpdests: BTreeSet<u64>,\n    /// Entry point address\n    pub entry_point: u64,\n    /// Symbol table for debugging\n    pub symbols: Vec<(u64, String)>,\n    /// Jump destinations that are not symbols (address -> source instructions)\n    pub jumpdests_with_debug_info: BTreeMap<u64, Vec<JumpDest>>,\n    /// PC base (lowest executable address)\n    pub pc_base: u64,\n}\n\npub fn compute_jumpdests(file_name: &Path) -> Rv64Labels {\n    log::info!(\"Loading RV64 ELF file: {}\", file_name.display());\n    let file_buffer = fs::read(file_name).unwrap();\n    compute_jumpdests_from_buffer(&file_buffer)\n}\n\npub fn compute_jumpdests_from_buffer(file_buffer: &[u8]) -> Rv64Labels {\n    let elf = Elf::parse(file_buffer).unwrap();\n\n    // Verify it's a 64-bit RISC-V ELF\n    assert_eq!(\n        elf.header.e_ident[EI_CLASS], ELFCLASS64,\n        \"Only 64-bit ELF files are supported by rv64 module!\"\n    );\n    assert_eq!(\n        elf.header.e_ident[EI_DATA], ELFDATA2LSB,\n        \"Only little-endian ELF files are supported!\"\n    );\n    assert_eq!(\n        elf.header.e_machine, EM_RISCV,\n        \"Only RISC-V ELF files are supported!\"\n    );\n\n    let mut jumpdests = BTreeSet::new();\n    let mut jumpdests_with_debug_info = BTreeMap::new();\n\n    // Add entry point\n    jumpdests.insert(elf.entry);\n\n    // Find PC base (lowest executable address)\n    let pc_base = elf\n        .program_headers\n        .iter()\n        .filter(|ph| ph.is_executable())\n        .map(|ph| ph.p_vaddr)\n        .min()\n        .unwrap_or(0);\n\n    // Collect symbols that are in text sections\n    let mut symbols = Vec::new();\n    let mut symbol_addrs = BTreeSet::new();\n    for sym in elf.syms.iter() {\n        if sym.st_value != 0 {\n            // Check if this symbol is in an executable section\n            let in_text = elf.program_headers.iter().any(|ph| {\n                ph.is_executable()\n                    && sym.st_value >= ph.p_vaddr\n                    && sym.st_value < ph.p_vaddr + ph.p_memsz\n            });\n\n            if in_text {\n                jumpdests.insert(sym.st_value);\n                symbol_addrs.insert(sym.st_value);\n                if let Some(name) = elf.strtab.get_at(sym.st_name) {\n                    symbols.push((sym.st_value, name.to_string()));\n                }\n            }\n        }\n    }\n\n    // Scan text sections for jump destinations\n    for ph in elf.program_headers.iter() {\n        if ph.is_executable() {\n            let seg = &file_buffer[ph.p_offset as usize..(ph.p_offset + ph.p_filesz) as usize];\n            scan_for_jump_targets(\n                ph.p_vaddr,\n                seg,\n                &mut jumpdests,\n                &mut jumpdests_with_debug_info,\n                &symbol_addrs,\n            );\n        }\n    }\n\n    Rv64Labels {\n        jumpdests,\n        entry_point: elf.entry,\n        symbols,\n        jumpdests_with_debug_info,\n        pc_base,\n    }\n}\n\nuse std::collections::BTreeMap;\n\nfn scan_for_jump_targets(\n    base_addr: u64,\n    data: &[u8],\n    jumpdests: &mut BTreeSet<u64>,\n    jumpdests_with_debug_info: &mut BTreeMap<u64, Vec<JumpDest>>,\n    label_addrs: &BTreeSet<u64>,\n) {\n    data.chunks(4)\n        // Cast to [u8; 4]\n        .map(|data| data.try_into().unwrap())\n        .inspect(|data: &[u8; 4]| {\n            assert!(data[0] & 0b11 == 0b11, \"Expected 32-bit instruction\");\n        })\n        .map(u32::from_le_bytes)\n        // Decode the instruction bytes\n        .map(|insn_bytes| {\n            insn_bytes\n                .decode(Isa::Rv64)\n                .expect(\"Failed to decode instruction\")\n        })\n        // Remember the `rs1` and `imm` of the previous instruction if it was AUIPC, used to propagate it to the next JALR\n        .scan(None, |previous_if_auipc, insn| {\n            let previous_auipc_rs1 = std::mem::replace(\n                previous_if_auipc,\n                matches!(insn.opc, Op::AUIPC).then_some((insn.rs1, insn.imm)),\n            );\n            Some((insn, previous_auipc_rs1))\n        })\n        .enumerate()\n        .for_each(|(instruction_index, (insn, previous_if_auipc))| {\n            let addr = base_addr + (instruction_index * 4) as u64;\n\n            // Check for jump/branch instructions\n            match insn.opc {\n                Op::JAL => {\n                    // JAL has a PC-relative immediate\n                    if let Some(imm) = insn.imm {\n                        let target = (addr as i64 + imm as i64) as u64;\n                        jumpdests.insert(target);\n\n                        // Track non-symbol jumpdests\n                        if !label_addrs.contains(&target) {\n                            let jump_info = JumpDest {\n                                from_addr: addr,\n                                instruction: format!(\n                                    \"jal {}, 0x{:x}\",\n                                    insn.rd\n                                        .map(|r| format!(\"x{r}\"))\n                                        .unwrap_or_else(|| \"?\".to_string()),\n                                    target\n                                ),\n                            };\n                            jumpdests_with_debug_info\n                                .entry(target)\n                                .or_default()\n                                .push(jump_info);\n                        }\n                    }\n                }\n                Op::BEQ | Op::BNE | Op::BLT | Op::BGE | Op::BLTU | Op::BGEU => {\n                    // Conditional branches have PC-relative immediates\n                    if let Some(imm) = insn.imm {\n                        let target = (addr as i64 + imm as i64) as u64;\n                        jumpdests.insert(target);\n\n                        // Track non-symbol jumpdests\n                        if !label_addrs.contains(&target) {\n                            let jump_info = JumpDest {\n                                from_addr: addr,\n                                instruction: format!(\n                                    \"{} {}, {}, 0x{:x}\",\n                                    format!(\"{:?}\", insn.opc).to_lowercase(),\n                                    insn.rs1\n                                        .map(|r| format!(\"x{r}\"))\n                                        .unwrap_or_else(|| \"?\".to_string()),\n                                    insn.rs2\n                                        .map(|r| format!(\"x{r}\"))\n                                        .unwrap_or_else(|| \"?\".to_string()),\n                                    target\n                                ),\n                            };\n                            jumpdests_with_debug_info\n                                .entry(target)\n                                .or_default()\n                                .push(jump_info);\n                        }\n                    }\n                }\n                Op::JALR => {\n                    if let Some((rs1, imm)) = previous_if_auipc {\n                        // JALR with a preceding AUIPC\n                        if insn.rd == rs1 {\n                            // This is an AUIPC+JALR pair, we can resolve it statically\n                            if let (Some(auipc_imm), Some(jalr_imm)) = (imm, insn.imm) {\n                                let target =\n                                    (addr as i64 + auipc_imm as i64 + jalr_imm as i64) as u64;\n                                jumpdests.insert(target);\n\n                                // Track non-symbol jumpdests\n                                if !label_addrs.contains(&target) {\n                                    let jump_info = JumpDest {\n                                        from_addr: addr,\n                                        instruction: format!(\"auipc+jalr -> 0x{target:x}\"),\n                                    };\n                                    jumpdests_with_debug_info\n                                        .entry(target)\n                                        .or_default()\n                                        .push(jump_info);\n                                }\n                            }\n                        }\n                    } else {\n                        // Standalone JALR without preceding AUIPC\n                        // These are dynamic jumps we can't resolve statically:\n                        // - Return instructions (jalr x0, x1, 0)\n                        // - Indirect calls through function pointers\n                        // - Computed jumps (switch statements, vtables)\n                        // We just note their existence for completeness\n\n                        let rs1_str = insn\n                            .rs1\n                            .map(|r| format!(\"x{r}\"))\n                            .unwrap_or_else(|| \"?\".to_string());\n                        let rd_str = insn\n                            .rd\n                            .map(|r| format!(\"x{r}\"))\n                            .unwrap_or_else(|| \"?\".to_string());\n                        let imm = insn.imm.unwrap_or(0);\n\n                        // Only log if it's not a standard return (jalr x0, x1, 0)\n                        if !(insn.rd == Some(0) && insn.rs1 == Some(1) && imm == 0) {\n                            tracing::debug!(\n                                \"Note: Dynamic jump at 0x{addr:x}: jalr {rd_str}, {rs1_str}, {imm}\",\n                            );\n                        }\n                    }\n                }\n                _ => {}\n            };\n        });\n}\n"
  },
  {
    "path": "riscv-types/Cargo.toml",
    "content": "[package]\nname = \"powdr-riscv-types\"\ndescription = \"powdr RISCV types and traits\"\nversion.workspace = true\nedition.workspace = true\nlicense.workspace = true\nhomepage.workspace = true\nrepository.workspace = true\n\n[dependencies]\npowdr-isa-utils.workspace = true\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "riscv-types/src/lib.rs",
    "content": "use powdr_isa_utils::SingleDataValue;\nuse std::fmt;\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct Register {\n    value: u8,\n}\n\nimpl Register {\n    pub fn new(value: u8) -> Self {\n        Self { value }\n    }\n\n    pub fn is_zero(&self) -> bool {\n        self.value == 0\n    }\n\n    pub fn addr(&self) -> u8 {\n        self.value\n    }\n}\n\n/// List of machine registers, declared in the asm machine.\n/// NOTE: the bootloader expects the PC to be the last register in this list.\npub const REGISTER_NAMES: [&str; 3] = [\"main::query_arg_1\", \"main::query_arg_2\", \"main::pc\"];\n\n/// These are the names of the RISCV registers that are stored in memory.\npub const REGISTER_MEMORY_NAMES: [&str; 37] = [\n    \"x0\",\n    \"x1\",\n    \"x2\",\n    \"x3\",\n    \"x4\",\n    \"x5\",\n    \"x6\",\n    \"x7\",\n    \"x8\",\n    \"x9\",\n    \"x10\",\n    \"x11\",\n    \"x12\",\n    \"x13\",\n    \"x14\",\n    \"x15\",\n    \"x16\",\n    \"x17\",\n    \"x18\",\n    \"x19\",\n    \"x20\",\n    \"x21\",\n    \"x22\",\n    \"x23\",\n    \"x24\",\n    \"x25\",\n    \"x26\",\n    \"x27\",\n    \"x28\",\n    \"x29\",\n    \"x30\",\n    \"x31\",\n    \"tmp1\",\n    \"tmp2\",\n    \"tmp3\",\n    \"tmp4\",\n    \"lr_sc_reservation\",\n];\n\nimpl fmt::Display for Register {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}\", REGISTER_MEMORY_NAMES[self.value as usize])\n    }\n}\n\nimpl From<&str> for Register {\n    fn from(s: &str) -> Self {\n        REGISTER_MEMORY_NAMES\n            .iter()\n            .position(|&name| name == s)\n            .map(|value| Self::new(value as u8))\n            .unwrap_or_else(|| panic!(\"Invalid register\"))\n    }\n}\n\npub enum Statement<'a, L: AsRef<str>, A: InstructionArgs> {\n    DebugLoc { file: u64, line: u64, col: u64 },\n    Label(L),\n    Instruction { op: &'a str, args: A },\n}\n\npub struct MemEntry {\n    pub label: Option<String>,\n    pub addr: u32,\n    pub value: SingleDataValue,\n}\n\npub struct SourceFileInfo<'a> {\n    pub id: u32,\n    pub dir: &'a str,\n    pub file: &'a str,\n}\n\n/// A RISC-V program that can be translated to POWDR ASM.\npub trait RiscVProgram {\n    /// Takes the listing of source files, to be used in the debug statements.\n    fn take_source_files_info(&mut self) -> impl Iterator<Item = SourceFileInfo<'_>>;\n\n    /// Takes the initial memory snapshot.\n    fn take_initial_mem(&mut self) -> impl Iterator<Item = MemEntry>;\n\n    /// Takes the executable statements and labels.\n    fn take_executable_statements(\n        &mut self,\n    ) -> impl Iterator<Item = Statement<'_, impl AsRef<str>, impl InstructionArgs>>;\n\n    /// Returns the addresses of the start and end of prover data.\n    fn prover_data_bounds(&self) -> (u32, u32);\n\n    /// The name of the function that should be called to start the program.\n    fn start_function(&self) -> impl AsRef<str>;\n}\n\npub trait InstructionArgs {\n    type Error: fmt::Display;\n\n    fn l(&self) -> Result<impl AsRef<str>, Self::Error>;\n    fn r(&self) -> Result<Register, Self::Error>;\n    fn rri(&self) -> Result<(Register, Register, u32), Self::Error>;\n    /// Returns the usual rd, rs1, rs2\n    fn rrr(&self) -> Result<(Register, Register, Register), Self::Error>;\n    /// Special case used in amo* instructions, returning rd, rs2, rs1\n    fn rrr2(&self) -> Result<(Register, Register, Register), Self::Error>;\n    fn ri(&self) -> Result<(Register, u32), Self::Error>;\n    fn rr(&self) -> Result<(Register, Register), Self::Error>;\n    fn rrl(&self) -> Result<(Register, Register, impl AsRef<str>), Self::Error>;\n    fn rl(&self) -> Result<(Register, impl AsRef<str>), Self::Error>;\n    fn rro(&self) -> Result<(Register, Register, u32), Self::Error>;\n    fn empty(&self) -> Result<(), Self::Error>;\n}\n"
  },
  {
    "path": "rust-toolchain.toml",
    "content": "[toolchain]\nchannel = \"nightly-2025-10-01\"\n"
  },
  {
    "path": "scripts/analyze_nightly.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nNightly regression analyzer for benchmark results.\n\nThis script analyzes the latest nightly benchmark results and compares them\nto the previous nightly run. It reports any performance regressions in\nAPC (autoprecompile) configurations only, ignoring manual precompile results.\n\nResults are fetched from: https://github.com/powdr-labs/bench-results/tree/gh-pages/results\n\"\"\"\n\nimport argparse\nfrom datetime import date\nimport json\nimport re\nimport sys\nfrom dataclasses import dataclass\nfrom io import StringIO\nfrom typing import Optional\nfrom urllib.request import urlopen, Request\nfrom urllib.error import URLError, HTTPError\n\nimport pandas as pd\n\n\nGITHUB_API_BASE = \"https://api.github.com/repos/powdr-labs/bench-results\"\nRAW_CONTENT_BASE = \"https://raw.githubusercontent.com/powdr-labs/bench-results/gh-pages\"\n\n# Benchmarks to analyze\nBENCHMARKS = [\"keccak\", \"sha256\", \"pairing\", \"u256\", \"matmul\", \"ecc\", \"ecrecover\", \"reth\"]\n\n# Date pattern for result directories (YYYY-MM-DD-HHMM)\nDATE_PATTERN = re.compile(r\"^\\d{4}-\\d{2}-\\d{2}-\\d{4}$\")\n\n# Pattern to extract APC count from config name (e.g., \"apc030\" -> 30)\nAPC_PATTERN = re.compile(r\"apc(\\d+)\")\n\n\ndef is_apc_config(config: str) -> bool:\n    \"\"\"Check if a config uses APCs (apc count > 0).\"\"\"\n    match = APC_PATTERN.search(config)\n    if match:\n        return int(match.group(1)) > 0\n    return False\n\n\n@dataclass\nclass BenchmarkResult:\n    \"\"\"Holds the best result for a benchmark.\"\"\"\n    benchmark: str\n    best_config: str\n    best_time_ms: float\n    all_results: dict[str, float]\n\n\n@dataclass\nclass ComparisonResult:\n    \"\"\"Holds the comparison between two benchmark runs.\"\"\"\n    benchmark: str\n    latest_time_ms: float\n    latest_config: str\n    previous_time_ms: float\n    previous_config: str\n    change_percent: float\n    is_regression: bool\n    config_changed: bool  # True if best config differs between runs\n\n\ndef fetch_url(url: str, headers: Optional[dict] = None) -> str:\n    \"\"\"Fetch content from a URL.\n\n    Raises:\n        URLError: If the URL cannot be reached.\n        HTTPError: If the server returns an error status code.\n    \"\"\"\n    req = Request(url)\n    if headers:\n        for key, value in headers.items():\n            req.add_header(key, value)\n\n    with urlopen(req, timeout=60) as response:\n        return response.read().decode('utf-8')\n\n\ndef get_results_directories() -> list[str]:\n    \"\"\"Get list of result directories from GitHub, sorted by date descending.\n\n    Raises:\n        URLError: If the GitHub API cannot be reached.\n        HTTPError: If the GitHub API returns an error status code.\n        json.JSONDecodeError: If the API response is not valid JSON.\n    \"\"\"\n    url = f\"{GITHUB_API_BASE}/contents/results?ref=gh-pages\"\n    headers = {\"Accept\": \"application/vnd.github.v3+json\"}\n\n    content = fetch_url(url, headers)\n    entries = json.loads(content)\n\n    # Filter to only date-formatted directories\n    dirs = [\n        entry[\"name\"] for entry in entries\n        if entry[\"type\"] == \"dir\" and DATE_PATTERN.match(entry[\"name\"])\n    ]\n\n    # Sort by date descending (lexicographic works for YYYY-MM-DD-HHMM format)\n    dirs.sort(reverse=True)\n    return dirs\n\n\ndef fetch_benchmark_results(run_dir: str, benchmark: str) -> Optional[BenchmarkResult]:\n    \"\"\"Fetch and parse results for a specific benchmark from a run.\"\"\"\n    url = f\"{RAW_CONTENT_BASE}/results/{run_dir}/{benchmark}/basic_metrics.csv\"\n\n    try:\n        content = fetch_url(url)\n    except (URLError, HTTPError) as e:\n        print(f\"Warning: Could not fetch {benchmark} results from {run_dir}: {e}\", file=sys.stderr)\n        return None\n\n    try:\n        df = pd.read_csv(StringIO(content))\n        all_results: dict[str, float] = {\n            str(row['filename']): float(row['total_proof_time_ms'])\n            for _, row in df.iterrows()\n        }\n\n        # Only consider APC configs (apc count > 0), ignoring manual and baseline (apc000)\n        apc_results = {k: v for k, v in all_results.items() if is_apc_config(k)}\n\n        if not apc_results:\n            return None\n\n        # Find the best (lowest) total_proof_time_ms among APC configs\n        best_config = min(apc_results, key=lambda k: apc_results[k])\n        best_time = apc_results[best_config]\n\n        return BenchmarkResult(\n            benchmark=benchmark,\n            best_config=best_config,\n            best_time_ms=best_time,\n            all_results=all_results\n        )\n    except (KeyError, ValueError) as e:\n        print(f\"Warning: Malformed CSV for {benchmark} in {run_dir}: {e}\", file=sys.stderr)\n        return None\n\n\ndef compare_results(\n    latest: BenchmarkResult,\n    previous: BenchmarkResult,\n    regression_threshold: float = 0.0\n) -> ComparisonResult:\n    \"\"\"Compare latest results to previous results.\"\"\"\n    if previous.best_time_ms == 0:\n        change_percent = 0.0\n        is_regression = False\n    else:\n        change_percent = (\n            (latest.best_time_ms - previous.best_time_ms) / previous.best_time_ms\n        ) * 100\n        is_regression = change_percent > regression_threshold\n\n    # Check if best config changed\n    config_changed = latest.best_config != previous.best_config\n\n    return ComparisonResult(\n        benchmark=latest.benchmark,\n        latest_time_ms=latest.best_time_ms,\n        latest_config=latest.best_config,\n        previous_time_ms=previous.best_time_ms,\n        previous_config=previous.best_config,\n        change_percent=change_percent,\n        is_regression=is_regression,\n        config_changed=config_changed,\n    )\n\n\ndef print_error_report(error_msg: str) -> None:\n    \"\"\"Print a minimal error report to stdout.\"\"\"\n    print(\"# Nightly Benchmark Comparison Report\")\n    print(\"\")\n    print(\"## Errors\")\n    print(\"\")\n    print(f\"- {error_msg}\")\n\n\ndef format_change_percent(change: float) -> str:\n    \"\"\"Format a percentage change with appropriate sign.\"\"\"\n    if change == 0.0:\n        return \"0.0%\"\n    elif change > 0:\n        return f\"+{change:.1f}%\"\n    else:\n        return f\"{change:.1f}%\"\n\n\ndef format_report(\n    latest_run: str,\n    previous_run: str,\n    comparisons: list[ComparisonResult],\n    errors: list[str],\n    warnings: list[str]\n) -> str:\n    \"\"\"Format the comparison report as markdown.\"\"\"\n    lines: list[str] = []\n\n    def add_table_section(title: str, items: list[ComparisonResult]) -> None:\n        \"\"\"Add a markdown table section for comparison results.\"\"\"\n        if not items:\n            return\n        lines.append(f\"## {title}\")\n        lines.append(\"\")\n        lines.append(\"| Benchmark | Latest (ms) | Previous (ms) | Change |\")\n        lines.append(\"|-----------|-------------|---------------|--------|\")\n        for r in items:\n            lines.append(\n                f\"| {r.benchmark} | {r.latest_time_ms:.0f} ({r.latest_config}) | \"\n                f\"{r.previous_time_ms:.0f} ({r.previous_config}) | \"\n                f\"{format_change_percent(r.change_percent)} |\"\n            )\n        lines.append(\"\")\n\n    lines.append(\"# Nightly Benchmark Comparison Report\")\n    lines.append(\"\")\n    lines.append(f\"**Latest run:** {latest_run}\")\n    lines.append(f\"**Previous run:** {previous_run}\")\n    lines.append(\"\")\n\n    if errors:\n        lines.append(\"## Errors\")\n        lines.append(\"\")\n        for error in errors:\n            lines.append(f\"- {error}\")\n        lines.append(\"\")\n\n    if warnings:\n        lines.append(\"## Warnings\")\n        lines.append(\"\")\n        for warning in warnings:\n            lines.append(f\"- {warning}\")\n        lines.append(\"\")\n\n    regressions = [c for c in comparisons if c.is_regression]\n    improvements = [c for c in comparisons if c.change_percent < 0]\n    stable = [c for c in comparisons if not c.is_regression and c.change_percent >= 0]\n\n    add_table_section(\"Regressions\", regressions)\n    add_table_section(\"Improvements\", improvements)\n    add_table_section(\"Stable\", stable)\n\n    return \"\\n\".join(lines)\n\n\ndef main():\n    parser = argparse.ArgumentParser(\n        description=\"Analyze nightly benchmark results and report regressions.\"\n    )\n    parser.add_argument(\n        \"--regression-threshold\",\n        type=float,\n        default=0.0,\n        help=\"Percentage threshold above which a change is considered a regression (default: 0.0)\"\n    )\n    parser.add_argument(\n        \"--latest\",\n        type=str,\n        help=\"Specific run directory to use as latest (default: auto-detect)\"\n    )\n    parser.add_argument(\n        \"--previous\",\n        type=str,\n        help=\"Specific run directory to use as previous (default: auto-detect)\"\n    )\n    parser.add_argument(\n        \"--benchmarks\",\n        type=str,\n        nargs=\"+\",\n        default=BENCHMARKS,\n        help=f\"Benchmarks to analyze (default: {' '.join(BENCHMARKS)})\"\n    )\n    parser.add_argument(\n        \"--output-format\",\n        choices=[\"markdown\", \"json\"],\n        default=\"markdown\",\n        help=\"Output format (default: markdown)\"\n    )\n    args = parser.parse_args()\n\n    # Get result directories\n    print(\"Fetching results directories...\", file=sys.stderr)\n    try:\n        result_dirs = get_results_directories()\n    except (URLError, HTTPError) as e:\n        print_error_report(f\"Could not fetch results directories: {e}\")\n        sys.exit(1)\n    except json.JSONDecodeError as e:\n        print_error_report(f\"Failed to parse GitHub API response: {e}\")\n        sys.exit(1)\n\n    if len(result_dirs) < 2:\n        print_error_report(\"Need at least 2 result directories to compare\")\n        sys.exit(1)\n\n    # Find today's run (must exist unless --latest is provided)\n    if args.latest:\n        latest_run = args.latest\n    else:\n        today = date.today().strftime(\"%Y-%m-%d\")\n        today_runs = [d for d in result_dirs if d.startswith(today)]\n        if not today_runs:\n            print_error_report(f\"No results found for today ({today})\")\n            sys.exit(1)\n        latest_run = today_runs[0]  # Most recent run today (dirs are sorted descending)\n\n    # Find previous run (most recent run that's not the latest)\n    if args.previous:\n        previous_run = args.previous\n    else:\n        previous_runs = [d for d in result_dirs if d != latest_run]\n        if not previous_runs:\n            print_error_report(\"No previous run found to compare against\")\n            sys.exit(1)\n        previous_run = previous_runs[0]\n\n    print(f\"Comparing {latest_run} (latest) vs {previous_run} (previous)\", file=sys.stderr)\n\n    # Fetch results for each benchmark\n    comparisons = []\n    errors = []\n    warnings = []\n\n    for benchmark in args.benchmarks:\n        print(f\"Analyzing {benchmark}...\", file=sys.stderr)\n\n        latest_result = fetch_benchmark_results(latest_run, benchmark)\n        previous_result = fetch_benchmark_results(previous_run, benchmark)\n\n        if latest_result is None:\n            errors.append(f\"{benchmark}: No APC results found in latest run\")\n            continue\n\n        if previous_result is None:\n            errors.append(f\"{benchmark}: No APC results found in previous run\")\n            continue\n\n        comparison = compare_results(\n            latest_result,\n            previous_result,\n            args.regression_threshold\n        )\n        comparisons.append(comparison)\n\n        # Check for config changes\n        if comparison.config_changed:\n            warnings.append(\n                f\"{benchmark}: Best APC config changed from {comparison.previous_config} \"\n                f\"to {comparison.latest_config}\"\n            )\n\n    # Generate report\n    if args.output_format == \"json\":\n        output = {\n            \"latest_run\": latest_run,\n            \"previous_run\": previous_run,\n            \"comparisons\": [\n                {\n                    \"benchmark\": c.benchmark,\n                    \"latest_time_ms\": c.latest_time_ms,\n                    \"latest_config\": c.latest_config,\n                    \"previous_time_ms\": c.previous_time_ms,\n                    \"previous_config\": c.previous_config,\n                    \"change_percent\": c.change_percent,\n                    \"is_regression\": c.is_regression,\n                    \"config_changed\": c.config_changed,\n                }\n                for c in comparisons\n            ],\n            \"errors\": errors,\n            \"warnings\": warnings,\n            \"has_regressions\": any(c.is_regression for c in comparisons),\n            \"has_errors\": len(errors) > 0,\n            \"has_warnings\": len(warnings) > 0,\n        }\n        print(json.dumps(output, indent=2))\n    else:\n        report = format_report(latest_run, previous_run, comparisons, errors, warnings)\n        print(report)\n\n    # Exit with error code if there are regressions or errors\n    has_regressions = any(c.is_regression for c in comparisons)\n    has_errors = len(errors) > 0\n    has_warnings = len(warnings) > 0\n\n    if has_errors:\n        print(\"\\nErrors were encountered during analysis.\", file=sys.stderr)\n        sys.exit(2)\n\n    if has_regressions:\n        print(\"\\nRegressions detected!\", file=sys.stderr)\n        sys.exit(1)\n\n    if has_warnings:\n        print(\"\\nWarnings were generated (see report).\", file=sys.stderr)\n\n    print(\"\\nNo regressions detected.\", file=sys.stderr)\n    sys.exit(0)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "scripts/update-dep.sh",
    "content": "#!/bin/bash\n\n# Script to update openvm or stark-backend git revision hashes across the repository.\n#\n# Usage:\n#   ./scripts/update-dep.sh openvm <new-rev>\n#   ./scripts/update-dep.sh stark-backend <new-rev>\n#\n# Examples:\n#   ./scripts/update-dep.sh openvm v1.5.0-powdr\n#   ./scripts/update-dep.sh stark-backend v1.3.0-powdr\n\nset -e\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nREPO_ROOT=\"$(cd \"${SCRIPT_DIR}/..\" && pwd)\"\n\nDEP_TYPE=\"$1\"\nNEW_REV=\"$2\"\n\nusage() {\n    echo \"Usage: $0 <openvm|stark-backend> <new-rev>\"\n    echo \"\"\n    echo \"Examples:\"\n    echo \"  $0 openvm v1.5.0-powdr\"\n    echo \"  $0 stark-backend v1.3.0-powdr\"\n    echo \"\"\n    echo \"This script updates all git revision references for the specified dependency.\"\n    exit 1\n}\n\nif [[ -z \"$DEP_TYPE\" ]] || [[ -z \"$NEW_REV\" ]]; then\n    usage\nfi\n\ncase \"$DEP_TYPE\" in\n    openvm)\n        GREP_PATTERN='powdr-labs/openvm.git'\n        GIT_URL='https://github.com/powdr-labs/openvm.git'\n        ;;\n    stark-backend)\n        GREP_PATTERN='powdr-labs/stark-backend.git'\n        GIT_URL='https://github.com/powdr-labs/stark-backend.git'\n        ;;\n    *)\n        echo \"Error: Unknown dependency type '$DEP_TYPE'\"\n        echo \"\"\n        usage\n        ;;\nesac\n\necho \"Updating $DEP_TYPE dependencies to: $NEW_REV\"\necho \"\"\n\n# Find all Cargo.toml files with the specified git dependencies\n# Store in an array to safely handle paths with spaces\nCARGO_FILES=()\nwhile IFS= read -r file; do\n    [[ -n \"$file\" ]] && CARGO_FILES+=(\"$file\")\ndone < <(find \"$REPO_ROOT\" -name \"Cargo.toml\" -exec grep -l \"$GREP_PATTERN\" {} \\; 2>/dev/null || true)\n\nif [[ ${#CARGO_FILES[@]} -eq 0 ]]; then\n    echo \"No Cargo.toml files with $DEP_TYPE dependencies found.\"\n    exit 0\nfi\n\nfor file in \"${CARGO_FILES[@]}\"; do\n    echo \"Updating $file\"\n    \n    # Update revisions\n    # Match: rev = \"...\" after the git URL\n    sed -i -E 's|(git = \"'\"$GIT_URL\"'\", rev = \")[^\"]+(\")|'\"\\1${NEW_REV}\\2|g\" \"$file\"\ndone\n\necho \"\"\necho \"Done! Updated the following files:\"\nfor file in \"${CARGO_FILES[@]}\"; do\n    echo \"  - ${file#\"$REPO_ROOT\"/}\"\ndone\n\necho \"\"\necho \"Please review the changes and run 'cargo check' to verify.\"\n"
  },
  {
    "path": "syscalls/Cargo.toml",
    "content": "[package]\nname = \"powdr-syscalls\"\ndescription = \"powdr syscalls\"\nversion = { workspace = true }\nedition = { workspace = true }\nlicense = { workspace = true }\nhomepage = { workspace = true }\nrepository = { workspace = true }\n\n[dependencies]\n\n[lints]\nworkspace = true\n\n[lib]\nbench = false # See https://github.com/bheisler/criterion.rs/issues/458\n"
  },
  {
    "path": "syscalls/src/lib.rs",
    "content": "#![no_std]\n\nmacro_rules! syscalls {\n    ($(($num:expr, $identifier:ident, $name:expr, $input_count:expr, $output_count:expr)),* $(,)?) => {\n        /// We use repr(u8) to make sure the enum discriminant will fit into the\n        /// 12 bits of the immediate field of the `addi` instruction,\n        #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]\n        #[repr(u8)]\n        pub enum Syscall {\n            $($identifier = $num),*\n        }\n\n        impl Syscall {\n            pub const fn name(&self) -> &'static str {\n                match self {\n                    $(Syscall::$identifier => $name),*\n                }\n            }\n\n            pub const fn arity(&self) -> (u32, u32) {\n                match self {\n                    $(Syscall::$identifier => ($input_count, $output_count)),*\n                }\n            }\n        }\n\n        impl core::fmt::Display for Syscall {\n            fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {\n                write!(f, \"{}\", match self {\n                    $(Syscall::$identifier => $name),*\n                })\n            }\n        }\n\n        impl core::str::FromStr for Syscall {\n            type Err = ();\n            fn from_str(input: &str) -> Result<Self, Self::Err> {\n                match input {\n                    $($name => Ok(Syscall::$identifier)),*,\n                    _ => Err(()),\n                }\n            }\n        }\n\n        impl From<Syscall> for u8 {\n            fn from(syscall: Syscall) -> Self {\n                syscall as Self\n            }\n        }\n\n        impl core::convert::TryFrom<u8> for Syscall {\n            type Error = ();\n            fn try_from(value: u8) -> Result<Self, Self::Error> {\n                match value {\n                    $($num => Ok(Syscall::$identifier)),*,\n                    _ => Err(()),\n                }\n            }\n        }\n    }\n}\n\n// Generate `Syscall` enum with supported syscalls and their numbers.\nsyscalls!(\n    (1, Input, \"input\", 2, 1),\n    (2, Output, \"output\", 2, 0),\n    (3, PoseidonGL, \"poseidon_gl\", 1, 0),\n    (4, Affine256, \"affine_256\", 4, 0),\n    (5, EcAdd, \"ec_add\", 3, 0),\n    (6, EcDouble, \"ec_double\", 2, 0),\n    (7, KeccakF, \"keccakf\", 2, 0),\n    (8, Mod256, \"mod_256\", 3, 0),\n    (9, Halt, \"halt\", 0, 0),\n    (10, Poseidon2GL, \"poseidon2_gl\", 2, 0),\n    (11, NativeHash, \"native_hash\", 1, 0),\n    (12, CommitPublic, \"commit_public\", 2, 0),\n    (13, InvertGL, \"invert_gl\", 2, 2),\n    (14, SplitGLVec, \"split_gl_vec\", 2, 0),\n    (15, MergeGL, \"merge_gl\", 3, 0),\n);\n"
  }
]