[
  {
    "path": ".editorconfig",
    "content": "root=true\n\n[*]\nindent_style = space\nindent_size = 2\nend_of_line = lf\ninsert_final_newline = true\n\n[*.py]\nindent_size = 4\n"
  },
  {
    "path": ".gitattributes",
    "content": "* text=auto eol=lf\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: \"\\U0001F41C Bug report\"\nabout: Report a reproducible bug.\ntitle: ''\nlabels: bug\nassignees: ''\n---\n\n### Subject of the issue\n\n<!-- Describe your issue here. -->\n\n### Your environment\n\n<!--\n* Please provide the output of `algokit doctor` command response,\n* This will give us a good idea about your environment\n-->\n\n### Steps to reproduce\n\n1.\n2.\n\n### Expected behaviour\n\n### Actual behaviour\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: \"\\U0001F514 Feature Request\"\nabout: Suggestions for how we can improve the algorand platform.\ntitle: ''\nlabels: enhancement\nassignees: ''\n---\n\n## Problem\n\n<!-- What is the problem that we’re trying to solve? -->\n\n## Solution\n\n<!-- Do you have a potential/suggested solution? Document more than one if possible. -->\n\n### Proposal\n\n<!-- Describe the solution you’d like in detail. -->\n\n### Pros and Cons\n\n<!-- What are the advantages and disadvantages of this solution? -->\n\n## Dependencies\n\n<!-- Does the solution have any team or design dependencies? -->\n"
  },
  {
    "path": ".github/actions/build-binaries/linux/action.yaml",
    "content": "name: \"Build Linux Binary Artifacts\"\ndescription: \"Build Linux specific pyinstaller binary artifacts\"\ninputs:\n  package_name:\n    description: \"The name of the package\"\n    required: true\n  artifacts_dir:\n    description: \"The directory to write artifacts you want to publish\"\n    required: true\n  version:\n    description: \"The version to use for this artifact\"\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Build binary\n      shell: bash\n      run: |\n        poetry run poe package_unix\n\n    - name: Package binary artifact\n      shell: bash\n      run: |\n        cd dist/algokit/\n        echo snap > ./_internal/algokit/resources/distribution-method\n        tar -zcf ${{ inputs.artifacts_dir }}/${{ inputs.package_name }}-snap.tar.gz *\n        cd ../..\n\n    - name: Upload binary artifact\n      uses: actions/upload-artifact@v4\n      with:\n        name: ${{ inputs.package_name }}-snap\n        path: ${{ inputs.artifacts_dir }}/${{ inputs.package_name }}-snap.tar.gz\n"
  },
  {
    "path": ".github/actions/build-binaries/macos/action.yaml",
    "content": "name: \"Build macOS Binary Artifacts\"\ndescription: \"Build macOS specific pyinstaller binary artifacts\"\ninputs:\n  package_name:\n    description: \"The name of the package\"\n    required: true\n  artifacts_dir:\n    description: \"The directory to write artifacts you want to publish\"\n    required: true\n  version:\n    description: \"The version to use for this artifact\"\n  with_codesign:\n    description: \"Flag to determine if we should sign the binary\"\n    required: true\n  apple_team_id:\n    description: \"The Apple Team ID\"\n    required: true\n  apple_bundle_id:\n    description: \"The bundle ID to be used for packaging and notarisation\"\n    required: true\n  apple_cert_id:\n    description: \"The Apple Developer ID certificate ID\"\n    required: true\n  apple_notary_user:\n    description: \"The Apple user to notarise the package\"\n    required: true\n  apple_notary_password:\n    description: \"The Apple password to notarise the package\"\n    required: true\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Build binary\n      shell: bash\n      run: |\n        export APPLE_CERT_ID=\"${{ inputs.with_codesign == 'true' && inputs.apple_cert_id || '' }}\"\n        export APPLE_BUNDLE_ID=\"${{ inputs.with_codesign == 'true' && inputs.apple_bundle_id || format('beta.{0}', inputs.apple_bundle_id) }}\"\n        poetry run poe package_mac\n      env:\n        APPLE_CERT_ID: ${{ inputs.with_codesign == 'true' && inputs.apple_cert_id || '' }}\n        APPLE_BUNDLE_ID: ${{ inputs.with_codesign == 'true' && inputs.apple_bundle_id || format('beta.{0}', inputs.apple_bundle_id) }}\n\n    - name: Add metadata to binary\n      shell: bash\n      run: |\n        echo brew > ${{ github.workspace }}/dist/algokit/_internal/algokit/resources/distribution-method\n\n    # Workaround an issue with PyInstaller where Python.framework was incorrectly signed during the build\n    - name: Codesign python.framework\n      if: ${{ inputs.with_codesign == 'true' }}\n      shell: bash\n      run: |\n        codesign --force --sign \"${{ inputs.apple_cert_id }}\" --timestamp \"${{ github.workspace }}/dist/algokit/_internal/Python.framework\"\n\n    - name: Notarize\n      if: ${{ inputs.with_codesign == 'true' }}\n      uses: lando/notarize-action@v2\n      with:\n        appstore-connect-team-id: ${{ inputs.apple_team_id }}\n        appstore-connect-username: ${{ inputs.apple_notary_user }}\n        appstore-connect-password: ${{ inputs.apple_notary_password }}\n        primary-bundle-id: ${{ inputs.apple_bundle_id }}\n        product-path: \"${{ github.workspace }}/dist/algokit\"\n        tool: notarytool\n        verbose: true\n\n    - name: Package binary artifact\n      shell: bash\n      run: |\n        cd dist/algokit/\n        tar -zcf ${{ inputs.artifacts_dir }}/${{ inputs.package_name }}-brew.tar.gz *\n        cd ../..\n\n    - name: Upload binary artifact\n      uses: actions/upload-artifact@v4\n      with:\n        name: ${{ inputs.package_name }}\n        path: ${{ inputs.artifacts_dir }}/${{ inputs.package_name }}-brew.tar.gz\n"
  },
  {
    "path": ".github/actions/build-binaries/windows/action.yaml",
    "content": "name: \"Build Windows Binary Artifacts\"\ndescription: \"Build Windows specific pyinstaller binary artifacts\"\ninputs:\n  package_name:\n    description: \"The name of the package\"\n    required: true\n  version:\n    description: \"The version to use for this artifact\"\n  artifacts_dir:\n    description: \"The directory to write artifacts you want to publish\"\n    required: true\n  with_codesign:\n    description: \"Flag to determine if we should sign the binary\"\n    required: true\n  azure_tenant_id:\n    description: \"The Microsoft Entra tenant (directory) ID.\"\n    required: true\n  azure_client_id:\n    description: \"The client (application) ID of an App Registration in the tenant.\"\n    required: true\n  azure_client_secret:\n    description: \"A client secret that was generated for the App Registration.\"\n    required: true\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Configure build environment\n      shell: pwsh\n      run: |\n        # Find Windows SDK tools dynamically (works on both Windows 2022 and 2025)\n        $sdkPath = \"C:\\Program Files (x86)\\Windows Kits\\10\\bin\"\n        \n        # First, try to find makepri and makeappx in PATH\n        $makepriCmd = Get-Command makepri -ErrorAction SilentlyContinue\n        $makeappxCmd = Get-Command makeappx -ErrorAction SilentlyContinue\n        \n        if (-not $makepriCmd -or -not $makeappxCmd) {\n          Write-Host \"SDK tools not in PATH, searching for Windows SDK...\"\n          \n          if (Test-Path $sdkPath) {\n            # Find the latest SDK version\n            $latestVersion = Get-ChildItem $sdkPath -Directory | \n              Where-Object { $_.Name -match '^\\d+\\.\\d+\\.\\d+\\.\\d+$' } | \n              Sort-Object { [Version]$_.Name } -Descending | \n              Select-Object -First 1\n            \n            if ($latestVersion) {\n              $sdkBinPath = Join-Path $latestVersion.FullName \"x64\"\n              Write-Host \"Adding Windows SDK to PATH: $sdkBinPath\"\n              echo \"$sdkBinPath\" >> $env:GITHUB_PATH\n              \n              # Verify tools exist\n              $makepriPath = Join-Path $sdkBinPath \"makepri.exe\"\n              $makeappxPath = Join-Path $sdkBinPath \"makeappx.exe\"\n              \n              if (-not (Test-Path $makepriPath)) {\n                throw \"makepri.exe not found at $makepriPath\"\n              }\n              if (-not (Test-Path $makeappxPath)) {\n                throw \"makeappx.exe not found at $makeappxPath\"\n              }\n              \n              Write-Host \"✓ Found makepri.exe at: $makepriPath\"\n              Write-Host \"✓ Found makeappx.exe at: $makeappxPath\"\n            } else {\n              throw \"No Windows SDK versions found in $sdkPath\"\n            }\n          } else {\n            throw \"Windows SDK not found at $sdkPath\"\n          }\n        } else {\n          Write-Host \"✓ SDK tools already in PATH\"\n          Write-Host \"  makepri: $($makepriCmd.Source)\"\n          Write-Host \"  makeappx: $($makeappxCmd.Source)\"\n        }\n        \n        # Set environment variables\n        echo 'BINARY_BUILD_DIR=dist\\algokit' >> $env:GITHUB_ENV\n        echo 'WINGET_INSTALLER=${{ inputs.artifacts_dir }}\\${{ inputs.package_name }}-winget.msix' >> $env:GITHUB_ENV\n\n    - name: Build binary\n      shell: bash\n      run: |\n        poetry run poe package_windows\n\n    - name: Add metadata to binary\n      shell: bash\n      run: |\n        echo winget > '${{ env.BINARY_BUILD_DIR }}\\_internal\\algokit\\resources\\distribution-method'\n\n    - name: Sign executable\n      if: ${{ inputs.with_codesign == 'true' }}\n      uses: azure/trusted-signing-action@v0.3.20\n      with:\n        azure-tenant-id: ${{ inputs.azure_tenant_id }}\n        azure-client-id: ${{ inputs.azure_client_id }}\n        azure-client-secret: ${{ inputs.azure_client_secret }}\n        endpoint: https://weu.codesigning.azure.net/\n        trusted-signing-account-name: algokit-signing\n        certificate-profile-name: algokit\n        files-folder: ${{ env.BINARY_BUILD_DIR }}\n        files-folder-filter: exe\n        file-digest: SHA256\n        timestamp-rfc3161: http://timestamp.acs.microsoft.com\n        timestamp-digest: SHA256\n\n    - name: Build winget installer\n      shell: pwsh\n      run: |\n        & .\\scripts\\winget\\build-installer.ps1 `\n          -binaryDir '${{ env.BINARY_BUILD_DIR }}' `\n          -releaseVersion '${{ inputs.version }}' `\n          -outputFile '${{ env.WINGET_INSTALLER }}'\n\n    - name: Sign winget installer\n      if: ${{ inputs.with_codesign == 'true' }}\n      uses: azure/trusted-signing-action@v0.3.20\n      with:\n        azure-tenant-id: ${{ inputs.azure_tenant_id }}\n        azure-client-id: ${{ inputs.azure_client_id }}\n        azure-client-secret: ${{ inputs.azure_client_secret }}\n        endpoint: https://weu.codesigning.azure.net/\n        trusted-signing-account-name: algokit-signing\n        certificate-profile-name: algokit\n        files-folder: ${{ env.WINGET_INSTALLER }}\n        files-folder-filter: msix\n        file-digest: SHA256\n        timestamp-rfc3161: http://timestamp.acs.microsoft.com\n        timestamp-digest: SHA256\n\n    - name: Upload winget artifact\n      uses: actions/upload-artifact@v4\n      with:\n        name: ${{ inputs.package_name }}-winget\n        path: ${{ env.WINGET_INSTALLER }}\n        if-no-files-found: error\n"
  },
  {
    "path": ".github/actions/install-apple-dev-id-cert/action.yaml",
    "content": "name: \"Install Apple Developer ID certificate\"\ndescription: \"Install Apple Developer ID certificate to macos-build keychain\"\ninputs:\n  cert_data:\n    description: \"Base64 string represents the Apple developer ID certificate\"\n    required: true\n  cert_password:\n    description: \"The password to unlock the Apple developer ID certificate\"\n    required: true\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Install cert\n      shell: bash\n      env:\n        APPLE_CERT_DATA: ${{ inputs.cert_data }}\n        APPLE_CERT_PASSWORD: ${{ inputs.cert_password }}\n      run: |\n        # Export certs\n        echo \"$APPLE_CERT_DATA\" | base64 --decode > /tmp/certs.p12\n\n        # Create keychain\n        security create-keychain -p actions macos-build.keychain\n        security default-keychain -s macos-build.keychain\n        security unlock-keychain -p actions macos-build.keychain\n        security set-keychain-settings -t 3600 -u macos-build.keychain\n        echo \"Keychain created\"\n\n        # Import certs to keychain\n        security import /tmp/certs.p12 -k ~/Library/Keychains/macos-build.keychain -P \"$APPLE_CERT_PASSWORD\" -T /usr/bin/codesign -T /usr/bin/productsign\n        echo \"Cert imported\"\n\n        # Key signing\n        security set-key-partition-list -S apple-tool:,apple: -s -k actions macos-build.keychain\n        echo \"Key signed\"\n\n        # Delete temp file\n        rm /tmp/certs.p12\n        echo \"Done\"\n"
  },
  {
    "path": ".github/actions/setup-poetry/action.yaml",
    "content": "name: \"Python Poetry Action\"\ndescription: \"An action to setup Poetry\"\nruns:\n  using: \"composite\"\n  steps:\n    # A workaround for pipx isn't installed on M1 runner.\n    # We should remove it after this issue is resolved.\n    # https://github.com/actions/runner-images/issues/9256\n    - if: ${{ runner.os == 'macOS' && runner.arch == 'ARM64' }}\n      run: |\n        pip install poetry\n        pip install poetry-plugin-export\n      shell: bash\n\n    - if: ${{ runner.os != 'macOS' || runner.arch != 'ARM64' }}\n      run: |\n        pip install --user pipx\n        pipx ensurepath\n        pipx install poetry ${{ runner.os == 'macOS' && '--python \"$Python_ROOT_DIR/bin/python\"' || '' }}\n        pipx inject poetry poetry-plugin-export\n      shell: bash\n\n    - name: Get full Python version\n      id: full-python-version\n      shell: bash\n      run: echo \"full_version=$(python -c 'import sys; print(\".\".join(map(str, sys.version_info[:3])))')\" >> $GITHUB_OUTPUT\n\n    - name: Setup poetry cache\n      uses: actions/cache@v4\n      with:\n        path: ./.venv\n        key: venv-${{ hashFiles('poetry.lock') }}-${{ runner.os }}-${{ runner.arch }}-${{ steps.full-python-version.outputs.full_version }}\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"pip\"\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n    commit-message:\n      prefix: \"chore(deps)\"\n    groups:\n      all:\n        patterns:\n          - \"*\"\n        update-types:\n          - \"minor\"\n          - \"patch\"\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "Fixes #\n\n## Proposed Changes\n\n  -\n  -\n  -\n"
  },
  {
    "path": ".github/workflows/build-binaries.yaml",
    "content": "name: Build, Test and Publish Pyinstaller Binaries\n\non:\n  workflow_call:\n    inputs:\n      production_release:\n        required: true\n        type: string\n      python_version:\n        required: true\n        type: string\n      release_version:\n        required: false\n        type: string\n\njobs:\n  build-binaries:\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        # macos-14 is the Apple Silicon M1 runner (mac os 14)\n        # macos-15-intel is the last available Intel Mac runner (mac os 15)\n        # See https://github.com/actions/runner-images?tab=readme-ov-file#available-images\n        os: [ubuntu-22.04, windows-latest, macos-15-intel, macos-14]\n\n    steps:\n      - name: Set signing condition\n        id: signing\n        run: |\n          # Allow signing on:\n          # 1. Main branch non-PR events when production_release is true OR\n          # 2. Main branch CRON triggered events\n          if [[ \"${{ github.event_name }}\" != \"pull_request\" && \\\n                \"${{ github.ref_name }}\" == \"main\" && \\\n                (\"${{ inputs.production_release }}\" == \"true\" || \"${{ github.event_name }}\" == \"schedule\") ]]; then\n            echo \"allowed=true\" >> $GITHUB_OUTPUT\n          else\n            echo \"allowed=false\" >> $GITHUB_OUTPUT\n          fi\n        shell: bash\n\n      - name: Checkout source code\n        uses: actions/checkout@v4\n        with:\n          ref: ${{ inputs.release_version != '' && format('v{0}', inputs.release_version) || '' }}\n\n      - name: Set up Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: ${{ inputs.python_version }}\n\n      - name: Set up Poetry\n        uses: ./.github/actions/setup-poetry\n\n      - name: Install dependencies\n        run: poetry install --no-interaction\n\n      - name: Configure build environment\n        shell: bash\n        run: |\n          artifacts_dir=\"${{ github.workspace }}${{ runner.os == 'Windows' && '\\dist\\artifacts' || '/dist/artifacts' }}\"\n          mkdir -p $artifacts_dir\n          package_name_version=\"${{ inputs.release_version != '' && format('-{0}', inputs.release_version) || '' }}\"\n          package_name=\"algokit${package_name_version}-${{ runner.os }}_${{ runner.arch }}\"\n          echo \"PACKAGE_NAME=`echo $package_name | tr '[:upper:]' '[:lower:]'`\" >> $GITHUB_ENV\n          echo \"ARTIFACTS_DIR=${artifacts_dir}\" >> $GITHUB_ENV\n\n      # GitHub doesn't support expressions in the uses block\n      - name: Build windows binary\n        if: ${{ runner.os == 'Windows' }}\n        uses: ./.github/actions/build-binaries/windows\n        with:\n          package_name: ${{ env.PACKAGE_NAME }}\n          version: ${{ inputs.release_version }}\n          artifacts_dir: ${{ env.ARTIFACTS_DIR }}\n          with_codesign: ${{ steps.signing.outputs.allowed }}\n          azure_tenant_id: ${{ steps.signing.outputs.allowed == 'true' && secrets.AZURE_TENANT_ID || '' }}\n          azure_client_id: ${{ steps.signing.outputs.allowed == 'true' && secrets.AZURE_CLIENT_ID || '' }}\n          azure_client_secret: ${{ steps.signing.outputs.allowed == 'true' && secrets.AZURE_CLIENT_SECRET || '' }}\n\n      - name: Build linux binary\n        if: ${{ runner.os == 'Linux' }}\n        uses: ./.github/actions/build-binaries/linux\n        with:\n          package_name: ${{ env.PACKAGE_NAME }}\n          version: ${{ inputs.release_version }}\n          artifacts_dir: ${{ env.ARTIFACTS_DIR }}\n\n      - name: Install Apple Developer Id Cert\n        if: ${{ runner.os == 'macOS' && steps.signing.outputs.allowed == 'true' }}\n        uses: ./.github/actions/install-apple-dev-id-cert\n        with:\n          cert_data: ${{ secrets.APPLE_CERT_DATA }}\n          cert_password: ${{ secrets.APPLE_CERT_PASSWORD }}\n\n      - name: Build macOS binary\n        if: ${{ runner.os == 'macOS' }}\n        uses: ./.github/actions/build-binaries/macos\n        with:\n          package_name: ${{ env.PACKAGE_NAME }}\n          version: ${{ inputs.release_version }}\n          artifacts_dir: ${{ env.ARTIFACTS_DIR }}\n          with_codesign: ${{ steps.signing.outputs.allowed }}\n          apple_team_id: ${{ steps.signing.outputs.allowed == 'true' && secrets.APPLE_TEAM_ID || '' }}\n          apple_bundle_id: ${{ inputs.production_release == 'true' && vars.APPLE_BUNDLE_ID || format('beta.{0}', vars.APPLE_BUNDLE_ID) }}\n          apple_cert_id: ${{ steps.signing.outputs.allowed == 'true' && secrets.APPLE_CERT_ID || '' }}\n          apple_notary_user: ${{ steps.signing.outputs.allowed == 'true' && secrets.APPLE_NOTARY_USER || '' }}\n          apple_notary_password: ${{ steps.signing.outputs.allowed == 'true' && secrets.APPLE_NOTARY_PASSWORD || '' }}\n\n      - name: Add binary to path\n        run: |\n          echo \"${{ github.workspace }}${{ runner.os == 'Windows' && '\\dist\\algokit' || '/dist/algokit' }}\" >> $GITHUB_PATH\n        shell: bash\n\n      - name: Run portability tests\n        if: ${{ runner.os == 'Windows' }}\n        run: |\n          git config --global user.email \"actions@github.com\" && git config --global user.name \"github-actions\"\n          poetry run pytest tests/ -m pyinstaller_binary_tests --log-cli-level=INFO\n        shell: cmd\n\n      - name: Run portability tests\n        if: ${{ runner.os != 'Windows' }}\n        run: |\n          git config --global user.email \"actions@github.com\" && git config --global user.name \"github-actions\"\n          poetry run pytest tests/ -m pyinstaller_binary_tests --log-cli-level=INFO\n        shell: bash\n\n      # softprops/action-gh-release doesn't support the \\ character in paths\n      - name: Adjust artifacts directory for softprops/action-gh-release\n        if: ${{ runner.os == 'Windows' }}\n        shell: pwsh\n        run: |\n          $adjusted = '${{ env.ARTIFACTS_DIR }}' -replace '\\\\','/'\n          echo \"ARTIFACTS_DIR=$adjusted\" >> $env:GITHUB_ENV\n\n      - name: Append artifacts to release\n        if: ${{ inputs.production_release == 'true' }}\n        uses: softprops/action-gh-release@v1\n        with:\n          fail_on_unmatched_files: true\n          files: |\n            ${{ env.ARTIFACTS_DIR }}/*.*\n          tag_name: ${{ format('v{0}', inputs.release_version) }}\n          prerelease: ${{ contains(inputs.release_version, 'beta') }}\n"
  },
  {
    "path": ".github/workflows/build-python.yaml",
    "content": "name: Build, Test and Publish Python\n\non: [workflow_call]\n\njobs:\n  build-python:\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [\"ubuntu-latest\", \"macos-latest\", \"windows-latest\"]\n        python: [\"3.10\", \"3.11\", \"3.12\", \"3.13\", \"3.14\"]\n    runs-on: ${{ matrix.os }}\n    steps:\n      - name: Checkout source code\n        uses: actions/checkout@v4\n\n      - name: Set up Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: ${{ matrix.python }}\n\n      - name: Set up Node.js\n        uses: actions/setup-node@v4\n        with:\n          node-version: 20\n\n      - name: Set up Poetry\n        uses: ./.github/actions/setup-poetry\n\n      - name: Install dependencies\n        # TODO: remove fixed pipx dependency once 3.12 compatibility is addressed\n        # track here -> https://github.com/crytic/tealer/pull/209\n        run: poetry install --no-interaction --without docs && pipx install tealer==0.1.2\n\n      - name: Install PuyaPy\n        if: ${{ matrix.python == '3.12' }}\n        run: pipx install puyapy\n\n      - name: pytest\n        shell: bash\n        if: ${{ !(matrix.python == '3.12' && matrix.os == 'ubuntu-latest') }}\n        # git config is needed due to several tests relying on e2e copier invocation and copier relies on git during `copy` command\n        run: |\n          set -o pipefail\n          git config --global user.email \"actions@github.com\" && git config --global user.name \"github-actions\"\n          poetry run pytest -n auto\n        id: pytest\n\n      - name: pytest + coverage\n        shell: bash\n        if: matrix.python == '3.12' && matrix.os == 'ubuntu-latest'\n        env:\n          COVERAGE_CORE: sysmon\n        # git config is needed due to several tests relying on e2e copier invocation and copier relies on git during `copy` command\n        run: |\n          set -o pipefail\n          git config --global user.email \"actions@github.com\" && git config --global user.name \"github-actions\"\n          poetry run pytest -n auto --junitxml=pytest-junit.xml --cov-report=term-missing:skip-covered --cov=src | tee pytest-coverage.txt\n        id: pytest-cov\n\n      - name: Upload received snapshots (in case of failure)\n        if: failure() && (steps.pytest.outcome == 'failure' || steps.pytest-cov.outcome == 'failure')\n        uses: actions/upload-artifact@v4\n        with:\n          name: test-artifacts-${{ matrix.os }}-python${{ matrix.python }}\n          path: tests/**/*.received.txt\n\n      - name: pytest coverage comment - using Python 3.12 on ubuntu-latest\n        if: matrix.python == '3.12' && matrix.os == 'ubuntu-latest'\n        continue-on-error: true # forks fail to add a comment, so continue any way\n        uses: MishaKav/pytest-coverage-comment@main\n        with:\n          pytest-coverage-path: ./pytest-coverage.txt\n          junitxml-path: ./pytest-junit.xml\n\n      - name: Build Wheel\n        run: poetry build --format wheel\n"
  },
  {
    "path": ".github/workflows/cd.yaml",
    "content": "name: Continuous Delivery of Python package\n\non:\n  push:\n    branches:\n      - main\n    paths-ignore:\n      - \"docs/**\"\n      - \"**.md\"\n      - \".vscode/**\"\n      - \".idea/**\"\n      - \".gitignore\"\n      - \".editorconfig\"\n      - \".pre-commit-config.yaml\"\n      - \".github/**\"\n      - \"tests/**\"\n      - \"scripts/**\"\n  workflow_dispatch:\n    inputs:\n      production_release:\n        description: \"Production release?\"\n        required: true\n        default: \"true\"\n\nconcurrency: release\n\npermissions:\n  contents: write\n  packages: read\n\njobs:\n  ci-check-python:\n    name: Check Python\n    uses: ./.github/workflows/check-python.yaml\n\n  ci-build-python:\n    name: Build Python\n    uses: ./.github/workflows/build-python.yaml\n    needs: ci-check-python\n\n  release:\n    name: Release wheels to pypi\n    needs: ci-build-python\n    runs-on: ubuntu-latest\n    outputs:\n      release_version: ${{ steps.get_release_version.outputs.RELEASE_VERSION }}\n    steps:\n      - name: Generate bot token\n        uses: actions/create-github-app-token@v1\n        id: app_token\n        with:\n          app-id: ${{ secrets.BOT_ID }}\n          private-key: ${{ secrets.BOT_SK }}\n\n      - uses: actions/checkout@v4\n        with:\n          # Fetch entire repository history so we can determine version number from it\n          fetch-depth: 0\n          token: ${{ steps.app_token.outputs.token }}\n\n      - name: Set up Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: \"3.10\"\n\n      - name: Set up Poetry\n        uses: ./.github/actions/setup-poetry\n\n      - name: Install dependencies\n        run: poetry install --no-interaction --no-root\n\n      - name: Get branch name\n        shell: bash\n        run: echo \"branch=${GITHUB_REF#refs/heads/}\" >> $GITHUB_OUTPUT\n        id: get_branch\n\n      - name: Set Git user as GitHub actions\n        run: git config --global user.email \"179917785+engineering-ci[bot]@users.noreply.github.com\" && git config --global user.name \"engineering-ci[bot]\"\n\n      - name: Create Continuous Deployment - Feature Branch\n        if: steps.get_branch.outputs.branch != 'main' && inputs.production_release != 'true'\n        run: |\n          poetry run semantic-release \\\n            -v DEBUG \\\n            --prerelease \\\n            --patch \\\n            --define=prerelease_tag=beta+${{ steps.get_branch.outputs.branch }} \\\n            --define=branch=${{ steps.get_branch.outputs.branch }} \\\n            publish\n          release_version_tag=\"$(git describe $(git rev-list --tags --max-count=1))\"\n          gh release edit --prerelease $release_version_tag\n          echo \"RELEASE_VERSION=${release_version_tag:1}\" >> $GITHUB_ENV\n        env:\n          GH_TOKEN: ${{ steps.app_token.outputs.token }}\n          REPOSITORY_USERNAME: __token__\n          REPOSITORY_PASSWORD: ${{ secrets.PYPI_API_KEY }}\n\n      - name: Create Continuous Deployment - Beta (non-prod)\n        if: steps.get_branch.outputs.branch == 'main' && inputs.production_release != 'true'\n        run: |\n          poetry run semantic-release \\\n            -v DEBUG \\\n            --prerelease \\\n            --define=branch=main \\\n            publish\n          release_version=\"$(poetry run semantic-release print-version --current)\"\n          gh release edit --prerelease \"v$release_version\"\n          echo \"RELEASE_VERSION=$release_version\" >> $GITHUB_ENV\n        env:\n          GH_TOKEN: ${{ steps.app_token.outputs.token }}\n          REPOSITORY_USERNAME: __token__\n          REPOSITORY_PASSWORD: ${{ secrets.PYPI_API_KEY }}\n\n      - name: Create Continuous Deployment - Production\n        if: steps.get_branch.outputs.branch == 'main' && inputs.production_release == 'true'\n        run: |\n          poetry run semantic-release \\\n            -v DEBUG \\\n            --define=version_source=\"commit\" \\\n            --define=patch_without_tag=true \\\n            --define=upload_to_repository=true \\\n            --define=branch=main \\\n            publish\n          release_version=\"$(poetry run semantic-release print-version --current)\"\n          echo \"RELEASE_VERSION=$release_version\" >> $GITHUB_ENV\n        env:\n          GH_TOKEN: ${{ steps.app_token.outputs.token }}\n          REPOSITORY_USERNAME: __token__\n          REPOSITORY_PASSWORD: ${{ secrets.PYPI_API_KEY }}\n\n      - name: Get release version\n        shell: bash\n        run: echo \"RELEASE_VERSION=$RELEASE_VERSION\" >> $GITHUB_OUTPUT\n        id: get_release_version\n\n      - name: Upload artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: algokit-cli\n          path: dist/algokit*-py3-none-any.whl\n          if-no-files-found: error\n\n  build-and-upload-binaries:\n    name: Build and Upload Binaries\n    if: ${{ github.ref_name == 'main' }}\n    uses: ./.github/workflows/build-binaries.yaml\n    needs: release\n    with:\n      production_release: ${{ inputs.production_release }}\n      python_version: \"3.12\"\n      release_version: ${{ needs.release.outputs.release_version }}\n    secrets: inherit\n\n  cd-publish-release-packages:\n    name: Release binaries via distribution channels\n    needs:\n      - release\n      - build-and-upload-binaries\n    if: ${{ github.ref_name == 'main' && inputs.production_release == 'true' }} # Might want to adjust this to publish (pre-release) on merge as well.\n    uses: ./.github/workflows/publish-release-packages.yaml\n    with:\n      artifactName: algokit-cli\n      release_version: ${{ needs.release.outputs.release_version }}\n    secrets: inherit\n"
  },
  {
    "path": ".github/workflows/check-python.yaml",
    "content": "name: Check Python Code\n\non:\n  workflow_call:\n\njobs:\n  check-python:\n    runs-on: \"ubuntu-latest\"\n    steps:\n      - name: Checkout source code\n        uses: actions/checkout@v4\n\n      - name: Set up Python 3.12\n        uses: actions/setup-python@v5\n        with:\n          python-version: \"3.12\"\n\n      - name: Set up Poetry\n        uses: ./.github/actions/setup-poetry\n\n      - name: Install dependencies\n        run: poetry install --no-interaction --with docs\n\n      - name: Audit with pip-audit\n        run: |\n          poetry export --without=dev --without=docs -o requirements.txt\n          poetry run pip-audit -r requirements.txt\n          # If a vulnerability is found in a dependency without an available fix,\n          # it can be temporarily ignored by adding --ignore-vuln e.g. poetry run pip-audit -r requirements.txt --ignore-vuln 'GHSA-xxxxx'\n\n      - name: Check formatting with Ruff\n        run: |\n          # stop the build if there are files that don't meet formatting requirements\n          poetry run ruff format --check .\n\n      - name: Check linting with Ruff\n        run: |\n          # stop the build if there are Python syntax errors or undefined names\n          poetry run ruff check .\n\n      - name: Check types with mypy\n        run: poetry run mypy\n\n      - name: Check docs are up to date\n        run: |\n          poetry run poe docs\n          [ $(git status --porcelain docs/ | wc -l) -eq \"0\" ]\n"
  },
  {
    "path": ".github/workflows/clear-caches.yaml",
    "content": "name: Clear Repository Caches\n\non:\n  schedule:\n    # Run every 5 days at 2 AM UTC\n    - cron: \"0 2 */5 * *\"\n  workflow_dispatch: # Allow manual trigger\n\npermissions:\n  actions: write\n\njobs:\n  clear-caches:\n    name: Clear Repository Caches\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n\n      - name: Clear all repository caches\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          echo \"🧹 Starting cache cleanup process...\"\n          if gh cache delete --all --succeed-on-no-caches; then\n            echo \"🎉 Cache cleanup completed!\"\n          else\n            echo \"❌ Failed to clear caches\"\n            exit 1\n          fi\n"
  },
  {
    "path": ".github/workflows/pr.yaml",
    "content": "name: Codebase validation\n\non:\n  pull_request:\n    paths-ignore:\n      - \"README.md\"\n\n  schedule:\n    - cron: \"0 8 * * 1\" # Each monday 8 AM UTC\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  pr-check:\n    name: Check Python\n    uses: ./.github/workflows/check-python.yaml\n\n  pr-build:\n    name: Build & Test Python\n    needs: pr-check\n    uses: ./.github/workflows/build-python.yaml\n\n  pr-binaries-build:\n    name: Build & Test Binaries\n    needs: pr-check\n    uses: ./.github/workflows/build-binaries.yaml\n    with:\n      production_release: \"false\"\n      python_version: \"3.12\"\n    secrets: inherit\n"
  },
  {
    "path": ".github/workflows/publish-release-packages.yaml",
    "content": "name: Publish packages to public repositories\n\non:\n  workflow_call:\n    inputs:\n      artifactName:\n        required: true\n        type: string\n        description: \"The github artifact holding the wheel file which will be published\"\n      release_version:\n        required: true\n        type: string\n        description: \"The release version that will be published (e.g. 0.1.0)\"\n      do_brew:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to brew repository\"\n      do_snap:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to snap repository\"\n      do_winget:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to Winget repository\"\n  workflow_dispatch:\n    inputs:\n      artifactName:\n        required: true\n        type: string\n        description: \"The github artifact holding the wheel file which will be published\"\n      release_version:\n        required: true\n        type: string\n        description: \"The release version that will be published (e.g. 0.1.0)\"\n      do_brew:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to brew repository\"\n      do_snap:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to snap repository\"\n      do_winget:\n        required: false\n        default: true\n        type: boolean\n        description: \"Publish to Winget repository\"\n\njobs:\n  publish-brew:\n    runs-on: ubuntu-latest\n    if: ${{ inputs.do_brew }}\n    steps:\n      - uses: actions/create-github-app-token@v1\n        id: app-token\n        with:\n          app-id: ${{ secrets.BOT_ID }}\n          private-key: ${{ secrets.BOT_SK }}\n          repositories: homebrew-tap\n          owner: algorandfoundation\n\n      - name: Checkout source code\n        uses: actions/checkout@v4\n\n      - name: Download wheel from release\n        run: gh release download v${{ inputs.release_version }} --pattern \"*.whl\" --dir dist\n        env:\n          GH_TOKEN: ${{ github.token }}\n\n      - name: Download binary artifact from release\n        run: gh release download v${{ inputs.release_version }} --pattern \"*-brew.tar.gz\" --dir dist\n        env:\n          GH_TOKEN: ${{ github.token }}\n\n      - name: Set Git user as GitHub actions\n        run: git config --global user.email \"179917785+engineering-ci[bot]@users.noreply.github.com\" && git config --global user.name \"engineering-ci[bot]\"\n\n      - name: Update homebrew cask\n        run: scripts/update-brew-cask.sh \"dist/algokit*-py3-none-any.whl\" \"dist/algokit*-macos_arm64-brew.tar.gz\" \"dist/algokit*-macos_x64-brew.tar.gz\" \"algorandfoundation/homebrew-tap\"\n        env:\n          TAP_GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}\n\n  publish-winget:\n    runs-on: windows-latest\n    if: ${{ inputs.do_winget }}\n    steps:\n      - name: Checkout source code\n        uses: actions/checkout@v4\n\n      - name: Publish to winget\n        shell: pwsh\n        env:\n          WINGET_GITHUB_TOKEN: ${{ secrets.WINGET_GITHUB_TOKEN }}\n        run: |\n          echo 'Publishing to winget'\n          & .\\scripts\\winget\\update-package.ps1 `\n            -releaseVersion '${{ inputs.release_version }}'\n\n  publish-snap:\n    runs-on: ubuntu-latest\n    if: ${{ inputs.do_snap }}\n\n    steps:\n      - name: Checkout source code\n        uses: actions/checkout@v4\n\n      - name: Download binary artifact from release\n        run: |\n          gh release download v${{ inputs.release_version }} --pattern \"*-snap.tar.gz\" --dir dist\n          BINARY_PATH=$(ls dist/*-snap.tar.gz)\n          echo \"BINARY_PATH=$BINARY_PATH\" >> $GITHUB_ENV\n        env:\n          GH_TOKEN: ${{ github.token }}\n\n      - name: Generate snapcraft.yaml\n        run: |\n          ./scripts/snap/create-snapcraft-yaml.sh ${{ github.workspace }} ${{ inputs.release_version }} ${{ env.BINARY_PATH }} \"stable\"\n\n      - name: Upload snapcraft.yaml as reference artifact\n        uses: actions/upload-artifact@v4\n        with:\n          name: snapcraft-yaml\n          path: ${{ github.workspace }}/snap/snapcraft.yaml\n\n      - name: Build snap\n        uses: snapcore/action-build@v1\n        with:\n          snapcraft-args: --target-arch amd64\n\n      - name: Set path to snap binary\n        shell: bash\n        run: |\n          echo \"SNAP_BINARY_PATH=$(find ${{ github.workspace }} -name '*.snap')\" >> $GITHUB_ENV\n\n      - name: Publish snap\n        uses: snapcore/action-publish@v1\n        env:\n          SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_RELEASE_TOKEN }}\n        with:\n          snap: ${{ env.SNAP_BINARY_PATH }}\n          release: stable\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\npytest-coverage.txt\npytest-junit.xml\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Ruff (linter)\n.ruff_cache/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n.idea/\n!.idea/runConfigurations\n\n# macOS\n.DS_Store\n\n# we use this file for quickly editing run/debug args, it shouldn't be committed\n/args.in\n\n# Received approval test files\n*.received.*\n\n#Sphinx\n.doctrees/\ndocs/cli/temp.md\n\n# Miscellaneous\nci_token.txt\n\n.algokit/\n\ntemp/\n"
  },
  {
    "path": ".idea/runConfigurations/Run_AlgoKit_CLI.xml",
    "content": "<component name=\"ProjectRunConfigurationManager\">\n  <configuration default=\"false\" name=\"Run AlgoKit CLI\" type=\"PythonConfigurationType\" factoryName=\"Python\">\n    <module name=\"algokit-cli\" />\n    <option name=\"INTERPRETER_OPTIONS\" value=\"\" />\n    <option name=\"PARENT_ENVS\" value=\"true\" />\n    <envs>\n      <env name=\"PYTHONUNBUFFERED\" value=\"1\" />\n    </envs>\n    <option name=\"SDK_HOME\" value=\"\" />\n    <option name=\"WORKING_DIRECTORY\" value=\"\" />\n    <option name=\"IS_MODULE_SDK\" value=\"true\" />\n    <option name=\"ADD_CONTENT_ROOTS\" value=\"true\" />\n    <option name=\"ADD_SOURCE_ROOTS\" value=\"true\" />\n    <option name=\"SCRIPT_NAME\" value=\"debug\" />\n    <option name=\"PARAMETERS\" value=\"\" />\n    <option name=\"SHOW_COMMAND_LINE\" value=\"false\" />\n    <option name=\"EMULATE_TERMINAL\" value=\"false\" />\n    <option name=\"MODULE_MODE\" value=\"true\" />\n    <option name=\"REDIRECT_INPUT\" value=\"false\" />\n    <option name=\"INPUT_FILE\" value=\"\" />\n    <method v=\"2\" />\n  </configuration>\n</component>\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n  - repo: local\n    hooks:\n      - id: ruff-format\n        name: ruff-format\n        description: \"Run 'ruff format' for extremely fast Python formatting\"\n        entry: poetry run ruff format\n        language: system\n        types: [python]\n        args: [--no-cache]\n        require_serial: true\n        exclude: ^src/.*core/_vendor/\n      - id: ruff\n        name: ruff\n        description: \"Run 'ruff' for extremely fast Python linting\"\n        entry: poetry run ruff check\n        language: system\n        \"types\": [python]\n        args: [--fix, --no-cache]\n        require_serial: false\n        files: \"^(src|tests)/\"\n        exclude: \"^src/algokit/core/_vendor/\"\n      - id: mypy\n        name: mypy\n        description: \"`mypy` will check Python types for correctness\"\n        entry: poetry run mypy\n        language: system\n        types_or: [python, pyi]\n        require_serial: true\n        files: \"^(src|tests)/\"\n        exclude: \"^src/algokit/core/_vendor/\"\n"
  },
  {
    "path": ".vscode/extensions.json",
    "content": "{\n  \"recommendations\": [\n    \"esbenp.prettier-vscode\",\n    \"ms-python.python\",\n    \"ms-python.vscode-pylance\",\n    \"charliermarsh.ruff\",\n    \"tamasfe.even-better-toml\",\n    \"editorconfig.editorconfig\",\n    \"matangover.mypy\"\n  ]\n}\n"
  },
  {
    "path": ".vscode/launch.json",
    "content": "{\n  // Use IntelliSense to learn about possible attributes.\n  // Hover to view descriptions of existing attributes.\n  // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n  \"version\": \"0.2.0\",\n  \"configurations\": [\n    {\n      \"name\": \"Python: Module\",\n      \"type\": \"debugpy\",\n      \"cwd\": \"${workspaceFolder}\",\n      \"request\": \"launch\",\n      \"module\": \"debug\",\n      \"justMyCode\": false,\n      \"console\": \"integratedTerminal\"\n    },\n    {\n      \"name\": \"Python: Debug Pytest\",\n      \"type\": \"debugpy\",\n      \"request\": \"launch\",\n      \"program\": \"${file}\",\n      \"purpose\": [\"debug-test\"],\n      \"console\": \"integratedTerminal\",\n      \"justMyCode\": false,\n      \"env\": {\n        \"PYTHONPATH\": \"${workspaceFolder}/src\",\n        \"PYTEST_ADDOPTS\": \"--no-cov\"\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": ".vscode/settings.json",
    "content": "{\n  // General - see also /.editorconfig\n  \"editor.formatOnSave\": true,\n  \"editor.codeActionsOnSave\": {\n    \"source.fixAll\": \"explicit\"\n  },\n  \"editor.defaultFormatter\": \"esbenp.prettier-vscode\",\n  \"files.exclude\": {\n    \"**/.git\": true,\n    \"**/.DS_Store\": true,\n    \"**/Thumbs.db\": true,\n    \".mypy_cache\": true,\n    \".pytest_cache\": true,\n    \".ruff_cache\": true,\n    \"**/__pycache__\": true,\n    \".idea\": true\n  },\n\n  // Python\n  \"python.defaultInterpreterPath\": \"${workspaceFolder}/.venv\",\n  \"python.analysis.extraPaths\": [\"${workspaceFolder}/src\"],\n  \"[python]\": {\n    \"editor.defaultFormatter\": \"charliermarsh.ruff\"\n  },\n  \"python.analysis.typeCheckingMode\": \"basic\",\n  \"ruff.enable\": true,\n  \"ruff.lint.run\": \"onSave\",\n  \"ruff.lint.args\": [\"--config=pyproject.toml\"],\n  \"ruff.importStrategy\": \"fromEnvironment\",\n  \"ruff.fixAll\": true, //lint and fix all files in workspace\n  \"ruff.organizeImports\": true, //organize imports on save\n  \"ruff.codeAction.disableRuleComment\": {\n    \"enable\": true\n  },\n  \"ruff.codeAction.fixViolation\": {\n    \"enable\": true\n  },\n\n  \"mypy.configFile\": \"pyproject.toml\",\n  // set to empty array to use config from project\n  \"mypy.targets\": [],\n  \"mypy.runUsingActiveInterpreter\": true,\n  \"python.testing.unittestEnabled\": false,\n  \"python.testing.pytestEnabled\": true,\n\n  // PowerShell\n  \"[powershell]\": {\n    \"editor.defaultFormatter\": \"ms-vscode.powershell\"\n  },\n  \"powershell.codeFormatting.preset\": \"Stroustrup\",\n  \"python.testing.pytestArgs\": [\".\"]\n}\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\n<!--next-version-placeholder-->\n\n## v2.10.2 (2026-01-30)\n\n\n\n## v2.10.1 (2026-01-29)\n\n### Fix\n\n* Update versioning constraint to support 3.14 ([#706](https://github.com/algorandfoundation/algokit-cli/issues/706)) ([`b6be55a`](https://github.com/algorandfoundation/algokit-cli/commit/b6be55ac219fcc6f46bc938644a272586efa4ff8))\n\n## v2.10.0 (2025-12-12)\n\n### Feature\n\n* Update to use the localnet specific conduit algod importer ([#700](https://github.com/algorandfoundation/algokit-cli/issues/700)) ([`c9b423a`](https://github.com/algorandfoundation/algokit-cli/commit/c9b423afa96e6ae91d29064fea09a4b5293c4c55))\n* Add check flag to control when image update checks are run ([`dc39e49`](https://github.com/algorandfoundation/algokit-cli/commit/dc39e495b489bb110261cc5199d12a684fae2870))\n* Reduce frequency of localnet image update checks ([`3de015e`](https://github.com/algorandfoundation/algokit-cli/commit/3de015e60a32f810891b7f8d6d475f9595ff9bda))\n\n### Documentation\n\n* Updated invalid links and fixed some typos in the docs ([#693](https://github.com/algorandfoundation/algokit-cli/issues/693)) ([`35518c3`](https://github.com/algorandfoundation/algokit-cli/commit/35518c332b789ca7b3a70ed3321e8d295d5c3ab9))\n* Removed obsolete info and fixed typos ([#687](https://github.com/algorandfoundation/algokit-cli/issues/687)) ([`330cc05`](https://github.com/algorandfoundation/algokit-cli/commit/330cc059bf7bf1966a093edc333d3d4d87047276))\n\n## v2.9.1 (2025-09-11)\n\n### Fix\n\n* Dynamically detect Windows SDK tools for GitHub Actions compatibility ([#686](https://github.com/algorandfoundation/algokit-cli/issues/686)) ([`db88fb9`](https://github.com/algorandfoundation/algokit-cli/commit/db88fb9129061f7fcb8282c7745a9bd3178b872d))\n* Enables package manager command translation ([#684](https://github.com/algorandfoundation/algokit-cli/issues/684)) ([`a08eae5`](https://github.com/algorandfoundation/algokit-cli/commit/a08eae534cd9898687ba80400ba132221fcea69d))\n\n## v2.9.0 (2025-09-03)\n\n### Feature\n\n* Pnpm and uv support ([#672](https://github.com/algorandfoundation/algokit-cli/issues/672)) ([`52293f4`](https://github.com/algorandfoundation/algokit-cli/commit/52293f43e7d0c441fb2863f5871a3ae7461119cd))\n\n### Fix\n\n* Pin prompt_toolkit as the latest version breaks algokit init ([`d1dc658`](https://github.com/algorandfoundation/algokit-cli/commit/d1dc658f4fada935d8e8ccc31067ce695bd487fb))\n\n## v2.8.0 (2025-07-29)\n\n### Feature\n\n* Replace GITHUB_TOKEN with engineering-ci bot token in release workflows ([`4070cb0`](https://github.com/algorandfoundation/algokit-cli/commit/4070cb013b9af77fc4ae66cbb6edb8de7c431ce2))\n\n### Fix\n\n* Update the brew cask to not require sudo on install ([`a82f74b`](https://github.com/algorandfoundation/algokit-cli/commit/a82f74ba90d3ab61ddba98356ed512579d158e38))\n* Bust cache by tweaking the poetry lock ([#662](https://github.com/algorandfoundation/algokit-cli/issues/662)) ([`a137989`](https://github.com/algorandfoundation/algokit-cli/commit/a137989f1d679ed47e6219998f488bbf5571a74c))\n\n## v2.7.1 (2025-06-19)\n\n### Fix\n\n* Adds health check for indexer ([#653](https://github.com/algorandfoundation/algokit-cli/issues/653)) ([`ceed902`](https://github.com/algorandfoundation/algokit-cli/commit/ceed902315770a8af68219e1eca52a37d4af7cf2))\n\n## v2.7.0 (2025-05-15)\n\n### Feature\n\n* Add algokit init example command ([`70ed744`](https://github.com/algorandfoundation/algokit-cli/commit/70ed744574a8689374b3396c224c636966848e47))\n* Added the choose example selector ([`da5cc9e`](https://github.com/algorandfoundation/algokit-cli/commit/da5cc9e538dae458230a0ff78ff3ab7671777ddb))\n* Adding new init command structure with example subcommand ([`3fb110a`](https://github.com/algorandfoundation/algokit-cli/commit/3fb110afa867a65d67aeb605a3a5c23f651d278f))\n\n### Fix\n\n* Add fallback for browser opening in explore command ([`71088cf`](https://github.com/algorandfoundation/algokit-cli/commit/71088cf031c6575c7368f60ad2d2ea20311fe1cd))\n* Add fallback for browser opening in explore command ([`d1be606`](https://github.com/algorandfoundation/algokit-cli/commit/d1be606cc252affb5de0516c9c973ab176b574b8))\n* Compile ts hangs when puya-ts is detected but not installed in the project ([#628](https://github.com/algorandfoundation/algokit-cli/issues/628)) ([`f3cd199`](https://github.com/algorandfoundation/algokit-cli/commit/f3cd19967068e4310eaae5acbf85d8ec445e5181))\n\n## v2.6.2 (2025-03-28)\n\n### Fix\n\n* Raise min required copier version to 3.6.0 ([#623](https://github.com/algorandfoundation/algokit-cli/issues/623)) ([`9ef7b51`](https://github.com/algorandfoundation/algokit-cli/commit/9ef7b51112cdf4e27a4ef89d1df6dedeecf9448e))\n\n## v2.6.1 (2025-03-27)\n\n### Fix\n\n* Handle non zero exit code when running npm ls ([#622](https://github.com/algorandfoundation/algokit-cli/issues/622)) ([`3d6f3c9`](https://github.com/algorandfoundation/algokit-cli/commit/3d6f3c967302398dbbfab41ba96216e412d068a8))\n* Resolve project run issue by forcing utf-8 encoding on windows ([#620](https://github.com/algorandfoundation/algokit-cli/issues/620)) ([`b837417`](https://github.com/algorandfoundation/algokit-cli/commit/b837417e7f6b01c6ea5abda3dc664cd7c4531e85))\n\n### Documentation\n\n* AK-194: Updated dev portal links ([#615](https://github.com/algorandfoundation/algokit-cli/issues/615)) ([`52bb519`](https://github.com/algorandfoundation/algokit-cli/commit/52bb5191fd7153a7f2ec32b6646a33d3deabdec3))\n* Clarify when python is needed ([#617](https://github.com/algorandfoundation/algokit-cli/issues/617)) ([`4661f71`](https://github.com/algorandfoundation/algokit-cli/commit/4661f71a97293f3697476a90fea4054b1f516dec))\n\n## v2.6.0 (2025-03-19)\n\n### Feature\n\n* Add typescript template integration ([#614](https://github.com/algorandfoundation/algokit-cli/issues/614)) ([`7fc345c`](https://github.com/algorandfoundation/algokit-cli/commit/7fc345c783258325de3231de9488191e4b2932aa))\n* Puya-ts support under `compile` command group ([#612](https://github.com/algorandfoundation/algokit-cli/issues/612)) ([`9bd9056`](https://github.com/algorandfoundation/algokit-cli/commit/9bd90560da69cf511b0452d771d53a0c588214c2))\n* Npm runs `ci` instead of `install` when CI flag is present or passed explicitly ([#605](https://github.com/algorandfoundation/algokit-cli/issues/605)) ([`cc8a95d`](https://github.com/algorandfoundation/algokit-cli/commit/cc8a95d529452467d3e2849f9d998ae55178a2b5))\n\n### Fix\n\n* Fix configured localnet ([#610](https://github.com/algorandfoundation/algokit-cli/issues/610)) ([`ba91e82`](https://github.com/algorandfoundation/algokit-cli/commit/ba91e82a226867cb764bf36fc43654924ea5ba4c))\n\n### Documentation\n\n* Removed mentions to dappflow from ADR document. ([#601](https://github.com/algorandfoundation/algokit-cli/issues/601)) ([`6691cc7`](https://github.com/algorandfoundation/algokit-cli/commit/6691cc70e1cbf812e95cbab6b3ce3143a3b6fd77))\n* Update map image ([#602](https://github.com/algorandfoundation/algokit-cli/issues/602)) ([`eb03bf7`](https://github.com/algorandfoundation/algokit-cli/commit/eb03bf7f7541286097627fe424c64070bd59f108))\n\n## v2.5.2 (2025-01-07)\n\n### Fix\n\n* Handle client generation in a dir containing multiple app spec types ([#599](https://github.com/algorandfoundation/algokit-cli/issues/599)) ([`1a479a1`](https://github.com/algorandfoundation/algokit-cli/commit/1a479a1e33ae4ba0e03a12c12210372cb8f59be1))\n\n### Documentation\n\n* Add info about keyring in wsl2 ([#600](https://github.com/algorandfoundation/algokit-cli/issues/600)) ([`5e5df31`](https://github.com/algorandfoundation/algokit-cli/commit/5e5df31c79b99f375726a57780c0732f2bafcf73))\n\n## v2.5.1 (2024-11-28)\n\n### Fix\n\n* Duplicate code-workspace declarations ([#596](https://github.com/algorandfoundation/algokit-cli/issues/596)) ([`505c6c6`](https://github.com/algorandfoundation/algokit-cli/commit/505c6c624d869d4bef9adaa973dad7d735b7bae0))\n\n## v2.5.0 (2024-11-25)\n\n### Feature\n\n* Add support for ARC56 typed client generation ([#595](https://github.com/algorandfoundation/algokit-cli/issues/595)) ([`72807c3`](https://github.com/algorandfoundation/algokit-cli/commit/72807c3b7e8114528e026a0fa34b8ba07a49fc8a))\n\n### Documentation\n\n* Adding extra troubleshooting case; extra prereq for winget installation ([#591](https://github.com/algorandfoundation/algokit-cli/issues/591)) ([`84d7690`](https://github.com/algorandfoundation/algokit-cli/commit/84d769014bf71ddd798cace122bad141b1429416))\n\n## v2.4.3 (2024-10-29)\n\n### Fix\n\n* Edge cases for running interactive goal commands with --tty flag ([#589](https://github.com/algorandfoundation/algokit-cli/issues/589)) ([`e280a7f`](https://github.com/algorandfoundation/algokit-cli/commit/e280a7f9e6208b4a15f384809a2f0c8607ed9fb7))\n\n## v2.4.2 (2024-10-10)\n\n### Documentation\n\n* More details on snap install in README.md ([#577](https://github.com/algorandfoundation/algokit-cli/issues/577)) ([`44f900f`](https://github.com/algorandfoundation/algokit-cli/commit/44f900ffdf7a9021aa2ddcf97a45a384852ce6f8))\n\n## v2.4.1 (2024-09-19)\n\n### Fix\n\n* Pinning requests dependency ([#575](https://github.com/algorandfoundation/algokit-cli/issues/575)) ([`2fc2dfe`](https://github.com/algorandfoundation/algokit-cli/commit/2fc2dfe71d195aaadd3c0d20017ce3ceb2f0d414))\n\n## v2.4.0 (2024-09-19)\n\n### Feature\n\n* Adding flags to enable dev mode; flags for custom path to localnet config ([#569](https://github.com/algorandfoundation/algokit-cli/issues/569)) ([`09c2c10`](https://github.com/algorandfoundation/algokit-cli/commit/09c2c107d037c40a85c545bce9fc97de79c2c157))\n\n## v2.3.0 (2024-08-23)\n\n### Feature\n\n* Explicit sequential execution; extra args param for run/deploy commands ([#557](https://github.com/algorandfoundation/algokit-cli/issues/557)) ([`41f5c7b`](https://github.com/algorandfoundation/algokit-cli/commit/41f5c7b631a90a06d51676efd1f437416c8cb750))\n\n## v2.2.3 (2024-08-02)\n\n### Fix\n\n* Add warning for cases when `algokit explore` is used within wsl ([#549](https://github.com/algorandfoundation/algokit-cli/issues/549)) ([`3e2d017`](https://github.com/algorandfoundation/algokit-cli/commit/3e2d0170bcc8870dda3d62141b93866fe8bcad1d))\n* Build binaries with the correct version information ([#550](https://github.com/algorandfoundation/algokit-cli/issues/550)) ([`0725d8f`](https://github.com/algorandfoundation/algokit-cli/commit/0725d8f16c5999f706de486852c96a2f0d16603e))\n\n## v2.2.2 (2024-07-29)\n\n\n\n## v2.2.1 (2024-07-23)\n\n### Documentation\n\n* Extra notes on edge case with python installation on debian ([#539](https://github.com/algorandfoundation/algokit-cli/issues/539)) ([`f358e24`](https://github.com/algorandfoundation/algokit-cli/commit/f358e24122df759585034c7e3541f5a8f4c446f1))\n\n## v2.2.0 (2024-07-08)\n\n### Feature\n\n* Adding default algorand network configs to use when no .env.{network} found ([#533](https://github.com/algorandfoundation/algokit-cli/issues/533)) ([`a726756`](https://github.com/algorandfoundation/algokit-cli/commit/a726756b2f96557a8e54e1f89c4926dc206265d3))\n\n### Fix\n\n* Given that .copier-answers.yml is now expected at .algokit folder, improve defaults lookup ([#535](https://github.com/algorandfoundation/algokit-cli/issues/535)) ([`5d319d3`](https://github.com/algorandfoundation/algokit-cli/commit/5d319d323a5a38e77d65895048e6a2a30e8b64d8))\n\n## v2.1.4 (2024-06-27)\n\n### Fix\n\n* Filter null values from asset metadata ([#529](https://github.com/algorandfoundation/algokit-cli/issues/529)) ([`05411d6`](https://github.com/algorandfoundation/algokit-cli/commit/05411d638c0ef7600df3428a5da74f8666d226a7))\n\n## v2.1.3 (2024-06-25)\n\n### Fix\n\n* Some localnet proxy tweaks ([#526](https://github.com/algorandfoundation/algokit-cli/issues/526)) ([`2c7999d`](https://github.com/algorandfoundation/algokit-cli/commit/2c7999daefc5071dd89a6dcdc2f0a3f7f3ef819b))\n\n## v2.1.2 (2024-06-20)\n\n### Fix\n\n* Localnet status and proxy dns issue ([#525](https://github.com/algorandfoundation/algokit-cli/issues/525)) ([`a0c5bc6`](https://github.com/algorandfoundation/algokit-cli/commit/a0c5bc69e71fc64864f9f10ddd92f8c967f03416))\n* Add localnet proxy to add Access-Control-Allow-Private-Network header ([#523](https://github.com/algorandfoundation/algokit-cli/issues/523)) ([`2267e9e`](https://github.com/algorandfoundation/algokit-cli/commit/2267e9e2ff2b707dc246021578b8dbc6bd43a021))\n\n### Documentation\n\n* Moving descriptions of workspace vs standalone to project.md ([#522](https://github.com/algorandfoundation/algokit-cli/issues/522)) ([`946c53a`](https://github.com/algorandfoundation/algokit-cli/commit/946c53a3d90fad983a21856c0cad969f37d90e6f))\n* Minor revamp in project/config docs ([#521](https://github.com/algorandfoundation/algokit-cli/issues/521)) ([`872f6b1`](https://github.com/algorandfoundation/algokit-cli/commit/872f6b1dd01f843255fa2b76cb7e857c23aee4aa))\n\n## v2.1.1 (2024-06-17)\n\n### Fix\n\n* Ensure utf-8 is used as part of cli animate method invocation (windows compatibility) ([#518](https://github.com/algorandfoundation/algokit-cli/issues/518)) ([`ba9e090`](https://github.com/algorandfoundation/algokit-cli/commit/ba9e0902880298beb6a883096d26b25e77d31422))\n\n## v2.1.0 (2024-06-12)\n\n### Feature\n\n* GitHub Codespaces support in LocalNet command group ([#456](https://github.com/algorandfoundation/algokit-cli/issues/456)) ([`7eeaead`](https://github.com/algorandfoundation/algokit-cli/commit/7eeaeadc577dd12fa93c87cae18da497770d6f35))\n\n### Documentation\n\n* Updated docs to include updated links for project features ([`7c56b18`](https://github.com/algorandfoundation/algokit-cli/commit/7c56b181d984f3301e2abc59ea91128f2845ec66))\n\n## v2.0.6 (2024-05-22)\n\n### Fix\n\n* Remove ConsensusProtocol = future; unpin algod ([#505](https://github.com/algorandfoundation/algokit-cli/issues/505)) ([`55fbda5`](https://github.com/algorandfoundation/algokit-cli/commit/55fbda511310c094675dca7ff45131006083df66))\n\n## v2.0.5 (2024-05-21)\n\n### Fix\n\n* Pin localnet algod container to fix conduit issue in latest algod ([#502](https://github.com/algorandfoundation/algokit-cli/issues/502)) ([`6e760e9`](https://github.com/algorandfoundation/algokit-cli/commit/6e760e9c887187f053ea6a11b969ddc8cda3fb6a))\n\n## v2.0.4 (2024-05-20)\n\n### Fix\n\n* Task transfer on rekeyed account and update dependencies ([#498](https://github.com/algorandfoundation/algokit-cli/issues/498)) ([`8592cbf`](https://github.com/algorandfoundation/algokit-cli/commit/8592cbff4b9c45b4394eb853761255a8d11b7510))\n\n### Documentation\n\n* Add troubleshooting section ([#496](https://github.com/algorandfoundation/algokit-cli/issues/496)) ([`ef1a504`](https://github.com/algorandfoundation/algokit-cli/commit/ef1a5046a7b3ba6ddf22e65a4b1fe7868196e625))\n* Refine quick start ([#487](https://github.com/algorandfoundation/algokit-cli/issues/487)) ([`1964dec`](https://github.com/algorandfoundation/algokit-cli/commit/1964decd340a7847005cab4be408f6273fd5cb53))\n* Remove spelling mistake in intro.md instalation docs ([#491](https://github.com/algorandfoundation/algokit-cli/issues/491)) ([`70c55e0`](https://github.com/algorandfoundation/algokit-cli/commit/70c55e050caf4309c76ae5aae1dd7a284db93382))\n\n## v2.0.3 (2024-04-16)\n\n### Fix\n\n* Remove deprecated version from localnet compose file ([#476](https://github.com/algorandfoundation/algokit-cli/issues/476)) ([`4a3b1f0`](https://github.com/algorandfoundation/algokit-cli/commit/4a3b1f0c2dec5d03d12b42617ff546bd94d1da57))\n\n### Documentation\n\n* Minor refinements in npm min version spec ([#474](https://github.com/algorandfoundation/algokit-cli/issues/474)) ([`a887430`](https://github.com/algorandfoundation/algokit-cli/commit/a8874304319efc3449336a4bbb817471613d6743))\n\n## v2.0.2 (2024-04-02)\n\n### Fix\n\n* Pin pyyaml-include transitive dep ([#472](https://github.com/algorandfoundation/algokit-cli/issues/472)) ([`970536c`](https://github.com/algorandfoundation/algokit-cli/commit/970536cb7246fcaf6b84aacda95a74f8ff9bc285))\n\n### Documentation\n\n* Adding node.js to prerequisites for installation as FYI ([`8147173`](https://github.com/algorandfoundation/algokit-cli/commit/8147173ba827ac3be4c4dec28a97abc16abbd2cf))\n\n## v2.0.1 (2024-03-29)\n\n### Documentation\n\n* Few tweaks post release ([#465](https://github.com/algorandfoundation/algokit-cli/issues/465)) ([`a4a5645`](https://github.com/algorandfoundation/algokit-cli/commit/a4a5645c11790eba1162f7d80ade26fe40f83944))\n\n## v2.0.0 (2024-03-27)\n\n### Feature\n\n* Algokit-cli v2 ([#462](https://github.com/algorandfoundation/algokit-cli/issues/462)) ([`182c449`](https://github.com/algorandfoundation/algokit-cli/commit/182c449544e4a23e17919e9629dfdc5ddbf399a5))\n* LocalNet should run as an archival node so that you can access all blocks (useful for testing) ([#461](https://github.com/algorandfoundation/algokit-cli/issues/461)) ([`794cccc`](https://github.com/algorandfoundation/algokit-cli/commit/794cccce2bb4aeccfe56813af754406b87ba5112))\n\n### Breaking\n\n* 2.0 release ([`182c449`](https://github.com/algorandfoundation/algokit-cli/commit/182c449544e4a23e17919e9629dfdc5ddbf399a5))\n\n## v1.13.1 (2024-03-20)\n\n### Fix\n\n* Create the npm dir in the app data directory on windows, as npx needs it ([#458](https://github.com/algorandfoundation/algokit-cli/issues/458)) ([`3195a1c`](https://github.com/algorandfoundation/algokit-cli/commit/3195a1c8cd21835d04a472d0d156ca08ef9030ec))\n\n## v1.13.0 (2024-03-13)\n\n### Feature\n\n* Add command to compile python to TEAL with Puyapy ([`1030799`](https://github.com/algorandfoundation/algokit-cli/commit/10307990a07fd3fa8ba60f6886f5b4be722dc065))\n\n### Fix\n\n* Adjust how we run npx, so it supports all windows versions ([#454](https://github.com/algorandfoundation/algokit-cli/issues/454)) ([`a997953`](https://github.com/algorandfoundation/algokit-cli/commit/a997953871251b0f1dfed3ad6e2cb8901c2c5cd3))\n\n### Documentation\n\n* Ref commit for snapcraft ([#452](https://github.com/algorandfoundation/algokit-cli/issues/452)) ([`0ab21bc`](https://github.com/algorandfoundation/algokit-cli/commit/0ab21bcd2b7a6d188791a3480eab7fe1b885667d))\n* Update playground init docs ([#451](https://github.com/algorandfoundation/algokit-cli/issues/451)) ([`1a15d5d`](https://github.com/algorandfoundation/algokit-cli/commit/1a15d5def4e610f0b10a41918f5d4055dea19efc))\n* Change last name in 2023-07-19_advanced_generate_command.md ([#448](https://github.com/algorandfoundation/algokit-cli/issues/448)) ([`8df02df`](https://github.com/algorandfoundation/algokit-cli/commit/8df02df981c0b68cfea2a4dc4698759d5e393974))\n\n## v1.12.3 (2024-03-06)\n\n### Fix\n\n* Path resolution to ensure git is initialized at workspace level ([#447](https://github.com/algorandfoundation/algokit-cli/issues/447)) ([`4fa1eaf`](https://github.com/algorandfoundation/algokit-cli/commit/4fa1eafe604129bb0595d0774ee4eb1484d3c13b))\n\n### Documentation\n\n* Updating dockerhub links on localnet docs ([#445](https://github.com/algorandfoundation/algokit-cli/issues/445)) ([`9d4df31`](https://github.com/algorandfoundation/algokit-cli/commit/9d4df31abe1de07909c7d03de1dc2dcc4334d7dc))\n\n## v1.12.2 (2024-03-01)\n\n### Fix\n\n* Algod container proper SIGTERM handling ([#438](https://github.com/algorandfoundation/algokit-cli/issues/438)) ([`1a654ca`](https://github.com/algorandfoundation/algokit-cli/commit/1a654ca6b1519beda0a1d23fefb9673591cd5eea))\n\n### Documentation\n\n* Update named localnet documents on config file locations ([#444](https://github.com/algorandfoundation/algokit-cli/issues/444)) ([`643ab01`](https://github.com/algorandfoundation/algokit-cli/commit/643ab011bae488d24c18e8351e29de439a31c24e))\n* Minor patch in the badge ([#440](https://github.com/algorandfoundation/algokit-cli/issues/440)) ([`7d82db0`](https://github.com/algorandfoundation/algokit-cli/commit/7d82db08a127ef486f31687affca184f1229039b))\n\n## v1.12.1 (2024-02-26)\n\n\n\n## v1.12.0 (2024-02-26)\n\n### Feature\n\n* Init wizard v2 ([#415](https://github.com/algorandfoundation/algokit-cli/issues/415)) ([`55d6922`](https://github.com/algorandfoundation/algokit-cli/commit/55d6922e5ae1c8b1f6e42a910f387739344f53a5))\n\n### Fix\n\n* Upload windows artifact to release ([#429](https://github.com/algorandfoundation/algokit-cli/issues/429)) ([`d922a49`](https://github.com/algorandfoundation/algokit-cli/commit/d922a493f8f92c61ae56df0043a356f8fd523f4d))\n\n## v1.11.4 (2024-02-19)\n\n### Fix\n\n* Fix issue of goal command interacting with filename containing dot ([#424](https://github.com/algorandfoundation/algokit-cli/issues/424)) ([`22ece81`](https://github.com/algorandfoundation/algokit-cli/commit/22ece811af63c69734169029db1a477730d1e0ad))\n\n### Documentation\n\n* Adr init wizard v2 and related improvements ([#411](https://github.com/algorandfoundation/algokit-cli/issues/411)) ([`8c5445a`](https://github.com/algorandfoundation/algokit-cli/commit/8c5445a558e503590fee636a9e5826026a5aacaf))\n\n## v1.11.3 (2024-02-08)\n\n### Fix\n\n* Binary execution mode compatibility ([#406](https://github.com/algorandfoundation/algokit-cli/issues/406)) ([`5cb9b1f`](https://github.com/algorandfoundation/algokit-cli/commit/5cb9b1f8e1f7fc3cc7114cba5cef78c9fcc7df95))\n\n### Documentation\n\n* ADR - native binaries distribution via snap/winget/brew ([#404](https://github.com/algorandfoundation/algokit-cli/issues/404)) ([`b7301bf`](https://github.com/algorandfoundation/algokit-cli/commit/b7301bf7ef4d776aa0b0b16e061f2a546780cabb))\n* Improve onboarding experience  ([`a4d6bb5`](https://github.com/algorandfoundation/algokit-cli/commit/a4d6bb502ed5bbfe87682ea863590c47393aed6a))\n\n## v1.11.2 (2024-02-01)\n\n### Fix\n\n* Bump algokit-client-generator to 1.1.1 ([#403](https://github.com/algorandfoundation/algokit-cli/issues/403)) ([`28dd709`](https://github.com/algorandfoundation/algokit-cli/commit/28dd709314b5557b1351a6db6f2305e168438d28))\n\n## v1.11.1 (2024-01-30)\n\n### Fix\n\n* Patching tealer 3.12 compatibility ([#401](https://github.com/algorandfoundation/algokit-cli/issues/401)) ([`05ea554`](https://github.com/algorandfoundation/algokit-cli/commit/05ea554fcad9fdf3eb5b038231eaf1155f9e5ce7))\n* Patch cd pipeline merge conflict ([#399](https://github.com/algorandfoundation/algokit-cli/issues/399)) ([`806d0e2`](https://github.com/algorandfoundation/algokit-cli/commit/806d0e269f9e621c3ed8f47a8fd3d4e24a5a366c))\n\n## v1.11.0 (2024-01-28)\n\n### Feature\n\n* Upgrading to latest version of algokit-client-generator-ts ([#398](https://github.com/algorandfoundation/algokit-cli/issues/398)) ([`1b6773b`](https://github.com/algorandfoundation/algokit-cli/commit/1b6773b1bbfcc5191e0da86af4e445de29ae3058))\n\n### Documentation\n\n* Adr on native binaries ([#395](https://github.com/algorandfoundation/algokit-cli/issues/395)) ([`42f61d1`](https://github.com/algorandfoundation/algokit-cli/commit/42f61d1a5ffdb411947a7581a36df1ed53786a25))\n\n## v1.10.0 (2024-01-24)\n\n### Feature\n\n* Adding algokit analyze - perform static analysis with tealer integration ([#370](https://github.com/algorandfoundation/algokit-cli/issues/370)) ([`3e56a4b`](https://github.com/algorandfoundation/algokit-cli/commit/3e56a4b4e1f59d747cd7eb4e2cfea8b8d9c7c670))\n\n### Fix\n\n* Installation process for tealer (windows compatibility) ([#396](https://github.com/algorandfoundation/algokit-cli/issues/396)) ([`971aff4`](https://github.com/algorandfoundation/algokit-cli/commit/971aff46a6b5502135122bff951ee2d9c15fa80f))\n\n## v1.9.3 (2024-01-11)\n\n\n\n## v1.9.2 (2024-01-09)\n\n### Fix\n\n* Run localnet on goal command ([#380](https://github.com/algorandfoundation/algokit-cli/issues/380)) ([`5a06ddc`](https://github.com/algorandfoundation/algokit-cli/commit/5a06ddce965716ea4fb47a1d8a19b8cb65d17b77))\n\n### Documentation\n\n* Update the list of AlgoKit CLI high-level features in the docs ([`8e3b827`](https://github.com/algorandfoundation/algokit-cli/commit/8e3b8273d65a1fbcd7e3bf600b96c82500eef538))\n\n## v1.9.1 (2023-12-29)\n\n\n\n## v1.9.0 (2023-12-29)\n\n### Feature\n\n* Add support for a customisable named localnet ([#373](https://github.com/algorandfoundation/algokit-cli/issues/373)) ([`41c4946`](https://github.com/algorandfoundation/algokit-cli/commit/41c4946fce6894a9f6548bf4a2cbdd499dec4cb4))\n\n## v1.8.2 (2023-12-20)\n\n\n\n## v1.8.1 (2023-12-19)\n\n### Fix\n\n* Update multiformats version as it needs to be in sync with multiformats-config ([#372](https://github.com/algorandfoundation/algokit-cli/issues/372)) ([`67a5966`](https://github.com/algorandfoundation/algokit-cli/commit/67a59662c67d7f1d2e5eedeb1e8d62289e0ad5ac))\n\n## v1.8.0 (2023-12-14)\n\n### Feature\n\n* Update generators to support generating typed clients with simulate functionality ([#368](https://github.com/algorandfoundation/algokit-cli/issues/368)) ([`90c876b`](https://github.com/algorandfoundation/algokit-cli/commit/90c876b819f4f9bba040e8630584cedc13678f5a))\n* Use Pinata ipfs instead of web3.storage ([#367](https://github.com/algorandfoundation/algokit-cli/issues/367)) ([`fc7ee5d`](https://github.com/algorandfoundation/algokit-cli/commit/fc7ee5d36c09b91251c46ab2be670015ac106164))\n\n### Fix\n\n* Replacing Yaspin with Simplified Spinners for Windows Systems ([#369](https://github.com/algorandfoundation/algokit-cli/issues/369)) ([`e12311e`](https://github.com/algorandfoundation/algokit-cli/commit/e12311e3be087e78aed092dd9b2670f8183afea3))\n\n## v1.7.3 (2023-12-08)\n\n### Fix\n\n* Adding confirmation prompt prior to execution of algokit generators ([#366](https://github.com/algorandfoundation/algokit-cli/issues/366)) ([`eeb5bae`](https://github.com/algorandfoundation/algokit-cli/commit/eeb5bae18c4ffb2384f92627d19a4308a46bfdf0))\n\n## v1.7.2 (2023-12-04)\n\n### Fix\n\n* Removing outdated reference to `algokit sandbox` command ([#362](https://github.com/algorandfoundation/algokit-cli/issues/362)) ([`e6cd395`](https://github.com/algorandfoundation/algokit-cli/commit/e6cd395bf600485be6edbe4e68c9ba4885598000))\n* Fixing Localnet status ([#365](https://github.com/algorandfoundation/algokit-cli/issues/365)) ([`8277572`](https://github.com/algorandfoundation/algokit-cli/commit/8277572db58d14bfcbda5e8bda18673d536b84a0))\n* Update vulnerable package dependency versions ([#361](https://github.com/algorandfoundation/algokit-cli/issues/361)) ([`450e02d`](https://github.com/algorandfoundation/algokit-cli/commit/450e02ddba02c98d9c8fe8a6baedaf84ef7e9460))\n\n## v1.7.1 (2023-11-22)\n\n### Fix\n\n* Hotfixing conduit path for localnet windows compatibility ([#360](https://github.com/algorandfoundation/algokit-cli/issues/360)) ([`897e335`](https://github.com/algorandfoundation/algokit-cli/commit/897e33554252083ed2b0d8a18a49969ef82a097b))\n\n## v1.7.0 (2023-11-22)\n\n### Feature\n\n* Migrating localnet to latest indexer v3.x images ([#351](https://github.com/algorandfoundation/algokit-cli/issues/351)) ([`04ef300`](https://github.com/algorandfoundation/algokit-cli/commit/04ef3008366028118358e342c0e83e08f3c095ba))\n\n## v1.6.3 (2023-11-14)\n\n### Fix\n\n* Correctly convert list of tuple to dictionary ([#353](https://github.com/algorandfoundation/algokit-cli/issues/353)) ([`ad71719`](https://github.com/algorandfoundation/algokit-cli/commit/ad717190f5822964d726555b7d7f8e1f5453cdfa))\n\n## v1.6.2 (2023-11-10)\n\n### Fix\n\n* Support detect ~/test.txt as valid goal paths ([#347](https://github.com/algorandfoundation/algokit-cli/issues/347)) ([`8ac5ec5`](https://github.com/algorandfoundation/algokit-cli/commit/8ac5ec5843cf243fb051e504d821b719c37cbe38))\n* Support the multiple file outputs of goal clerk split ([#346](https://github.com/algorandfoundation/algokit-cli/issues/346)) ([`fd9cd54`](https://github.com/algorandfoundation/algokit-cli/commit/fd9cd54137ca40595220fe916799eb682971387b))\n\n## v1.6.1 (2023-11-08)\n\n### Documentation\n\n* Typo resolved ([#341](https://github.com/algorandfoundation/algokit-cli/issues/341)) ([`e71ff96`](https://github.com/algorandfoundation/algokit-cli/commit/e71ff964a9879a30689be049d3af3ec3002c3198))\n* Fixing typo in docs ([#339](https://github.com/algorandfoundation/algokit-cli/issues/339)) ([`e8eba42`](https://github.com/algorandfoundation/algokit-cli/commit/e8eba421b32767ae9d57d8bbe75f86c268f5cbf7))\n\n## v1.6.0 (2023-10-26)\n\n### Feature\n\n* Algokit tasks - 1.6.0 release ([#334](https://github.com/algorandfoundation/algokit-cli/issues/334)) ([`e35f4f8`](https://github.com/algorandfoundation/algokit-cli/commit/e35f4f836f5433449a6685d1aeca01b8fd416fe2))\n\n### Fix\n\n* Pinning aiohttp beta to hotfix 3.12 support ([#338](https://github.com/algorandfoundation/algokit-cli/issues/338)) ([`96fc7e6`](https://github.com/algorandfoundation/algokit-cli/commit/96fc7e668c3a5b4fb9f00216ee6278b8ded1cf87))\n\n## v1.5.3 (2023-10-23)\n\n\n\n## v1.5.2 (2023-10-20)\n\n### Fix\n\n* Docker compose ps parsing for version >= 2.21 ([#336](https://github.com/algorandfoundation/algokit-cli/issues/336)) ([`06ba5e9`](https://github.com/algorandfoundation/algokit-cli/commit/06ba5e908a879a45ab793ffbc6c9436eeeb5b370))\n\n### Documentation\n\n* Updating docs for the issue on python 3.12 ([#332](https://github.com/algorandfoundation/algokit-cli/issues/332)) ([`288b561`](https://github.com/algorandfoundation/algokit-cli/commit/288b5617f284b5135f272cdb4c1c160c2aa6fc33))\n\n## v1.5.1 (2023-10-17)\n\n\n\n## v1.5.0 (2023-10-04)\n\n### Feature\n\n* Algokit `dispenser` ([#309](https://github.com/algorandfoundation/algokit-cli/issues/309)) ([`6b7a514`](https://github.com/algorandfoundation/algokit-cli/commit/6b7a51421d42d90192c866ff7ce7307a4b180b9c))\n\n### Documentation\n\n* Explicit reference on how to obtain the dispenser address ([#321](https://github.com/algorandfoundation/algokit-cli/issues/321)) ([`d7db09c`](https://github.com/algorandfoundation/algokit-cli/commit/d7db09c50e41ec8840f908f6a3db223622562269))\n\n## v1.4.2 (2023-09-29)\n\n### Documentation\n\n* Adding tealscript template ([#318](https://github.com/algorandfoundation/algokit-cli/issues/318)) ([`a855530`](https://github.com/algorandfoundation/algokit-cli/commit/a855530923a308e3826d4203b851cfbc49420bed))\n* Fixed links to tutorials ([`8207043`](https://github.com/algorandfoundation/algokit-cli/commit/820704305d7bb66d3f5e7c6627e53594a74f9e45))\n\n## v1.4.1 (2023-08-21)\n\n### Fix\n\n* Localnet displays a warning when image is out of date ([#308](https://github.com/algorandfoundation/algokit-cli/issues/308)) ([`be5a5df`](https://github.com/algorandfoundation/algokit-cli/commit/be5a5df0883b378a0dd889b9996ff68850df5698))\n* Adding fixes to allow working with local filesystem files when interacting with algokit goal commands ([#304](https://github.com/algorandfoundation/algokit-cli/issues/304)) ([`caca2b5`](https://github.com/algorandfoundation/algokit-cli/commit/caca2b59b07817648ae7d8f208fe02f895cee92e))\n\n## v1.4.0 (2023-08-14)\n\n### Feature\n\n* Advanced algokit generate command ([#306](https://github.com/algorandfoundation/algokit-cli/issues/306)) ([`0381862`](https://github.com/algorandfoundation/algokit-cli/commit/038186239c6787b0e80d49ea6a0e5e4135ce4240))\n\n## v1.3.0 (2023-08-01)\n\n### Feature\n\n* Add new \"deploy\" command to execute user/template defined logic to deploy smart contracts to an Algorand network ([#295](https://github.com/algorandfoundation/algokit-cli/issues/295)) ([`6673f80`](https://github.com/algorandfoundation/algokit-cli/commit/6673f8062989172674471056baf1e8a7f34753b7))\n\n### Fix\n\n* Pip-audit dependencies ([#307](https://github.com/algorandfoundation/algokit-cli/issues/307)) ([`142dba3`](https://github.com/algorandfoundation/algokit-cli/commit/142dba3651731003936c32ff9a6144c58289c829))\n* Handle deploy commands on windows that are actually `.cmd` files or similar ([#303](https://github.com/algorandfoundation/algokit-cli/issues/303)) ([`17791c7`](https://github.com/algorandfoundation/algokit-cli/commit/17791c7ca7f5aabe510b1dcaa1d09b9ed403233b))\n\n### Documentation\n\n* Advanced algokit generate command ADR ([#305](https://github.com/algorandfoundation/algokit-cli/issues/305)) ([`cb0ac17`](https://github.com/algorandfoundation/algokit-cli/commit/cb0ac17e9afda66e74ae2c63d0729c3b34f2a4b7))\n* Adding algokit template documentation ([#300](https://github.com/algorandfoundation/algokit-cli/issues/300)) ([`6e19743`](https://github.com/algorandfoundation/algokit-cli/commit/6e19743bacf3856f91e2610cff58676a17e99deb))\n\n## v1.2.0 (2023-07-04)\n\n### Feature\n\n* Detecting whether opening folder contains *.code-workspace file ([#294](https://github.com/algorandfoundation/algokit-cli/issues/294)) ([`e902d55`](https://github.com/algorandfoundation/algokit-cli/commit/e902d55a6077d60eb3c5b3fa809e1ba80b61b37e))\n* Adding react and fullstack templates ([#291](https://github.com/algorandfoundation/algokit-cli/issues/291)) ([`5af81f1`](https://github.com/algorandfoundation/algokit-cli/commit/5af81f100b16ce1281980ff3067df648fb5c9b4f))\n\n### Fix\n\n* Hotfixing a bug that is caused by pydantic v2 being installed as a copier dependency ([#297](https://github.com/algorandfoundation/algokit-cli/issues/297)) ([`31b580b`](https://github.com/algorandfoundation/algokit-cli/commit/31b580b2364e123fd81fdab14da93b704ea4bdda))\n* Update algokit-client-generators ([#293](https://github.com/algorandfoundation/algokit-cli/issues/293)) ([`cf0f46f`](https://github.com/algorandfoundation/algokit-cli/commit/cf0f46ffba9c3a33e75bad56195589bed3c5dc3a))\n\n### Documentation\n\n* Switching to more reliable visitors badge provider ([`51dce8b`](https://github.com/algorandfoundation/algokit-cli/commit/51dce8be83f0b72765ddffc9ea67886235d83fbb))\n* Fixing underline caused by whitespace in html tags on readme ([`f824596`](https://github.com/algorandfoundation/algokit-cli/commit/f824596d845fce31fe6d32fd9dc14fa0086746ca))\n\n## v1.1.6 (2023-06-21)\n\n### Fix\n\n* Increase timeout when doing algod health check ([#290](https://github.com/algorandfoundation/algokit-cli/issues/290)) ([`2b39970`](https://github.com/algorandfoundation/algokit-cli/commit/2b39970c53d358050639fbcb02ab6e99c1808d98))\n\n### Documentation\n\n* Adding readme assets ([`e300fa9`](https://github.com/algorandfoundation/algokit-cli/commit/e300fa929850b1f3677cb3545b8f284e8f3a7ef9))\n\n## v1.1.5 (2023-06-15)\n\n### Fix\n\n* Update typescript algokit-client-generator to 2.2.1 ([#286](https://github.com/algorandfoundation/algokit-cli/issues/286)) ([`bbb86b4`](https://github.com/algorandfoundation/algokit-cli/commit/bbb86b4049ed6d81f3bd73a0e207deae8f200459))\n\n## v1.1.4 (2023-06-14)\n\n### Fix\n\n* Update typescript algokit-client-generator ([#284](https://github.com/algorandfoundation/algokit-cli/issues/284)) ([`37d5082`](https://github.com/algorandfoundation/algokit-cli/commit/37d5082f86fd3db43ebb7c96558a484267883067))\n\n## v1.1.3 (2023-06-13)\n\n### Fix\n\n* Update python algokit-client-generator ([#283](https://github.com/algorandfoundation/algokit-cli/issues/283)) ([`5330baa`](https://github.com/algorandfoundation/algokit-cli/commit/5330baaf8617a1b5b640d5efecdcbe05fd2ea2a3))\n\n## v1.1.2 (2023-06-13)\n\n### Fix\n\n* Use /v2/status for algod health check ([#282](https://github.com/algorandfoundation/algokit-cli/issues/282)) ([`91e5e36`](https://github.com/algorandfoundation/algokit-cli/commit/91e5e36886edfa5d90f6aaf77f9db6a666bbdc43))\n\n## v1.1.1 (2023-06-13)\n\n### Fix\n\n* Add check for localnet start to wait for algod to be ready ([#281](https://github.com/algorandfoundation/algokit-cli/issues/281)) ([`dff0a5d`](https://github.com/algorandfoundation/algokit-cli/commit/dff0a5d45fb79317509f6a44fa3fc37b93a2d8af))\n\n### Documentation\n\n* Updating adr header to reflect status and deciders ([`b84a07d`](https://github.com/algorandfoundation/algokit-cli/commit/b84a07dee0c1b5fc9060b806feeb07c41b4cd4fd))\n\n## v1.1.0 (2023-06-07)\n\n### Feature\n\n* Adding minimum required version for algokit. ([#273](https://github.com/algorandfoundation/algokit-cli/issues/273)) ([`10aacc2`](https://github.com/algorandfoundation/algokit-cli/commit/10aacc2c17acc55c47d69674e9ace780313aee46))\n* Use official Algorand Docker images for LocalNet ([#268](https://github.com/algorandfoundation/algokit-cli/issues/268)) ([`fc5106c`](https://github.com/algorandfoundation/algokit-cli/commit/fc5106cc773a4672eb1ec8614bb60ed2dc61be42))\n* Add generate client command ([#266](https://github.com/algorandfoundation/algokit-cli/issues/266)) ([`b885fb1`](https://github.com/algorandfoundation/algokit-cli/commit/b885fb16b3b9a49231a6b786d1156bd7b202fb12))\n\n### Fix\n\n* Don't reset localnet if only algod_config.json is missing ([#269](https://github.com/algorandfoundation/algokit-cli/issues/269)) ([`ff3ef56`](https://github.com/algorandfoundation/algokit-cli/commit/ff3ef560565bdb92951fa7d6e3bbf4437db873a0))\n* Bootstrap failure during init now shows the error to avoid confusion ([`8a36e82`](https://github.com/algorandfoundation/algokit-cli/commit/8a36e82497cc342082d71df5c327837ddad221a4))\n* Workaround ValueError raised when using --defaults flag with copier 7.1 ([#256](https://github.com/algorandfoundation/algokit-cli/issues/256)) ([`e224070`](https://github.com/algorandfoundation/algokit-cli/commit/e22407074bf8c8ce2b1576379c90df76a70f6df9))\n\n### Documentation\n\n* Document typed client dependency ([#275](https://github.com/algorandfoundation/algokit-cli/issues/275)) ([`87d7233`](https://github.com/algorandfoundation/algokit-cli/commit/87d7233bd35a1b896d15e0aea3f63e738738254f))\n* Add example usage for typed clients ([`16d91f5`](https://github.com/algorandfoundation/algokit-cli/commit/16d91f5d3e5ef0115826780bc1daa918fbf031a8))\n* Add generate docs ([#270](https://github.com/algorandfoundation/algokit-cli/issues/270)) ([`da8e46d`](https://github.com/algorandfoundation/algokit-cli/commit/da8e46dfda97e514be17955ad39986f42b93b5e2))\n* Update localnet docs to include links to AlgoKit Utils ([`6e937a8`](https://github.com/algorandfoundation/algokit-cli/commit/6e937a8ff487955063ac1023c51ac3c83f5cbb01))\n* README update ([`8702e45`](https://github.com/algorandfoundation/algokit-cli/commit/8702e45c25eb2ec422587e307151037fbf6c1914))\n* Changes to wording of output stability snippet ([`f378013`](https://github.com/algorandfoundation/algokit-cli/commit/f378013db7b52c5d69f71ba606fbe3f8f50fa843))\n* Added output stability article content ([`c3a89f1`](https://github.com/algorandfoundation/algokit-cli/commit/c3a89f14b461ec2a22b23f3e423d107bff72fbb9))\n* Include note about pipx ensurepath ([`847013d`](https://github.com/algorandfoundation/algokit-cli/commit/847013d1733f3b11ed6d3c64b223e84dd7bc1124))\n* Link to repo search, fixes #240 ([`2550f6f`](https://github.com/algorandfoundation/algokit-cli/commit/2550f6ff147fd3de9f5da6dee470e9f214500c20))\n\n## v1.0.1 (2023-03-29)\n### Documentation\n* Reference overview image with absolute url ([`c987f84`](https://github.com/algorandfoundation/algokit-cli/commit/c987f84d3079cf88c262e21a542e60c74a71829a))\n* Added overview image ([`4c387ee`](https://github.com/algorandfoundation/algokit-cli/commit/4c387ee7fbbae2c01498dec83fa76ffd4d4990fa))\n* Make README.md links absolute ([`8435bc7`](https://github.com/algorandfoundation/algokit-cli/commit/8435bc7faaddd33e4bb204f8c965365aee097b42))\n\n## v1.0.0 (2023-03-29)\n### Feature\n* **localnet:** Changing the default reset behaviour to not pull images ([`6d3f10e`](https://github.com/algorandfoundation/algokit-cli/commit/6d3f10e5690f15d04d03bc38290f2c670905ba24))\n\n### Breaking\n* 1.0 release ([`68b02ad`](https://github.com/algorandfoundation/algokit-cli/commit/68b02ad49de0c8da083fcce542a76f6342ac0020))\n\n### Documentation\n* Remove mention of virtualenv when describing bootstrap command ([`302498f`](https://github.com/algorandfoundation/algokit-cli/commit/302498f21cb59eea4e01b482d3f07eecada34909))\n* Explain what running bootstrap during init will do ([`4bd58bd`](https://github.com/algorandfoundation/algokit-cli/commit/4bd58bd981eef9a382c2a16f9d0bd06ebe397ba3))\n* Added file copy advice ([`2b4882c`](https://github.com/algorandfoundation/algokit-cli/commit/2b4882c1878c07da7933d9a266f80560395fbb5f))\n* Added note about algokit explore after a localnet start ([`7a068b1`](https://github.com/algorandfoundation/algokit-cli/commit/7a068b1d90592e5eebd3f7af5a5f6d216c45ec1f))\n* Provide feedback when calling algokit explore ([`def4ef5`](https://github.com/algorandfoundation/algokit-cli/commit/def4ef5ce5bb2eb3e7b7c0dbcdbf0d4c4f8f6b1d))\n* Update --bootstrap help text on algokit init ([`ce9ebea`](https://github.com/algorandfoundation/algokit-cli/commit/ce9ebeaf9bb636f6af4cd2b98d6a32a4b4e10f15))\n* Updating auto-generated docs ([`637d534`](https://github.com/algorandfoundation/algokit-cli/commit/637d534b14c92fa3b4be27f58a0e3a950ad5d75e))\n* Command help text improvements for bootstrap and init ([`67eb3f6`](https://github.com/algorandfoundation/algokit-cli/commit/67eb3f6b79ac47abe4f4d1497db66acda5d0d4fd))\n* Update config and completions help text ([`7e0c087`](https://github.com/algorandfoundation/algokit-cli/commit/7e0c0872c1ca9ae531c69ae37dce43f44bc74634))\n* Update doctor help text ([`111bf2e`](https://github.com/algorandfoundation/algokit-cli/commit/111bf2e874fff0960ed7faf5c073dd44d8b487cd))\n* Removing incorrect references to sandbox ([`7179eb8`](https://github.com/algorandfoundation/algokit-cli/commit/7179eb8c67160671d549bd8f7114f54fb03b3fad))\n* Improving doctor command feature description ([`a8a2862`](https://github.com/algorandfoundation/algokit-cli/commit/a8a2862efc03448022df9fd2f9fa42e3ad883cde))\n* Updating bootstrap command descriptions ([`07a5fdb`](https://github.com/algorandfoundation/algokit-cli/commit/07a5fdbaa2653ab6505fd67432fac3c36ee5d711))\n* Fixing heading order in intro tutorial ([`46b5023`](https://github.com/algorandfoundation/algokit-cli/commit/46b50235b965f1e9736d3e8c67d1a95c9a13923c))\n* Getting README.md ready for 1.0 ([`815712f`](https://github.com/algorandfoundation/algokit-cli/commit/815712f83a2b56172c3cc51c7615bb33cae32b2f))\n\n## v0.6.0 (2023-03-28)\n### Feature\n* Prompt for template first ([`91326f3`](https://github.com/algorandfoundation/algokit-cli/commit/91326f339650f9ea33ff4746f6707f507364b81a))\n\n### Documentation\n* Autogen docs ([`9803ff7`](https://github.com/algorandfoundation/algokit-cli/commit/9803ff7764090b4d5b1d661f8e7ead3b74a49d0f))\n* Added the intro tutorial ([`087852b`](https://github.com/algorandfoundation/algokit-cli/commit/087852b9be034daddca3a609bb7fd9ff0b476b6e))\n\n## v0.5.0 (2023-03-24)\n### Feature\n* Change playground template to point to new repo ([`805d63c`](https://github.com/algorandfoundation/algokit-cli/commit/805d63c10be3c62a3ef8d78bd2d40d1e6d1c8c5c))\n* **init:** Added --no-ide flag to allow user to prevent IDE opening ([#211](https://github.com/algorandfoundation/algokit-cli/issues/211)) ([`cd9f015`](https://github.com/algorandfoundation/algokit-cli/commit/cd9f01549fc460641bd7a64f606963efb7f4082a))\n\n## v0.4.1 (2023-03-22)\n### Fix\n* **init:** Resolving issue with opening VS Code automatically on windows ([`691543d`](https://github.com/algorandfoundation/algokit-cli/commit/691543dfb7748dcb0495ceb0593dfe14e500d8fc))\n\n## v0.4.0 (2023-03-22)\n### Feature\n* Increase max content width to 120 for easier reading in wider terminals ([`cadc615`](https://github.com/algorandfoundation/algokit-cli/commit/cadc6150fb0a6c7d5f1ae60ccd7bccee89fb38fb))\n* Include \"extra version info\" in all commands not just docker compose ([`f1c1d69`](https://github.com/algorandfoundation/algokit-cli/commit/f1c1d6992faefa26c8656121e30b894dfce32c03))\n* Detect if code is on path && .vscode exists and try to launch ([`78f8b3f`](https://github.com/algorandfoundation/algokit-cli/commit/78f8b3f7f655d7286b4c090002e09edc52f3009a))\n* Add a command to see logs from localnet docker containers ([`31c9cc4`](https://github.com/algorandfoundation/algokit-cli/commit/31c9cc4d44f2df2abcb8014dad6633232781f6e0))\n\n### Fix\n* Make failure to run npm install during bootstrap error message more explicit ([`468a186`](https://github.com/algorandfoundation/algokit-cli/commit/468a18683ebe80f671438df6a2f4bcf1d0c7c4a5))\n* When executing goal/bash inside algod container only show localnet hint if it looks like the container doesn't exist or isn't running ([`b9dc57f`](https://github.com/algorandfoundation/algokit-cli/commit/b9dc57fe24a0f28df0b4399f0f579c39ab5336d8))\n* Allow going back to template selection from custom url ([`929eefb`](https://github.com/algorandfoundation/algokit-cli/commit/929eefbd9bb8b4e38b24423980d18c0bbc09e9a3))\n\n### Documentation\n* **localnet:** Removing known LocalNet issue that is fixed ([`6747642`](https://github.com/algorandfoundation/algokit-cli/commit/6747642cc2a6b79e5cfa7b71418dfbaadbbf6659))\n\n## v0.3.3 (2023-03-09)\n### Fix\n* Use /v2/status when querying localnet algod container ([#198](https://github.com/algorandfoundation/algokit-cli/issues/198)) ([`0fb0488`](https://github.com/algorandfoundation/algokit-cli/commit/0fb0488e7a5ebd7da22f764e9047df9c6ef7ac31))\n\n### Documentation\n* Fix references to renamed sandbox command ([#194](https://github.com/algorandfoundation/algokit-cli/issues/194)) ([`8b2910b`](https://github.com/algorandfoundation/algokit-cli/commit/8b2910b465e67c0e428cc4dde65e7a502f2fc7c0))\n* Added step in install instructions to restart terminal ([`f8e47a5`](https://github.com/algorandfoundation/algokit-cli/commit/f8e47a5ea47e6f78a39dee436381b615c794d5d5))\n* Update windows install instructions ([`e9d0a9d`](https://github.com/algorandfoundation/algokit-cli/commit/e9d0a9dc2ffc7f0998978e1fa5eceb6c94a9ce52))\n\n## v0.3.2 (2023-03-03)\n### Fix\n* Resolve config paths in case of folder redirection e.g. UWP python ([#191](https://github.com/algorandfoundation/algokit-cli/issues/191)) ([`0c2b291`](https://github.com/algorandfoundation/algokit-cli/commit/0c2b29179d003b17909a8dc22f655dc2b11bcdb8))\n\n## v0.3.1 (2023-02-24)\n### Fix\n* Git versions prior to 2.28 no longer fail on algokit init ([#184](https://github.com/algorandfoundation/algokit-cli/issues/184)) ([`0559582`](https://github.com/algorandfoundation/algokit-cli/commit/0559582ba9fb27df9ab98b2a66606ddaeeaf6da0))\n* Fix version comparison when checking for new versions ([#183](https://github.com/algorandfoundation/algokit-cli/issues/183)) ([`c272658`](https://github.com/algorandfoundation/algokit-cli/commit/c2726589b2699832844d2c67c452c01ecf742824))\n\n## v0.3.0 (2023-02-23)\n### Feature\n* Add init --template-url-ref option to allow using a specific commit, tag or branch ([`5bf19a3`](https://github.com/algorandfoundation/algokit-cli/commit/5bf19a38eee8b621010956a64e2d2f9e318af9e8))\n* Rename sandbox command to localnet ([`7ee55bd`](https://github.com/algorandfoundation/algokit-cli/commit/7ee55bdd5d0a87cd3aa7af1281c0867798be79ed))\n\n### Fix\n* **doctor:** Ensuring full docker version information is visible in Doctor output to improve debugging, fixes comment in #164 ([#173](https://github.com/algorandfoundation/algokit-cli/issues/173)) ([`a2c51e8`](https://github.com/algorandfoundation/algokit-cli/commit/a2c51e8018ba7b8049dc1230f7f9c1e02c24cd15))\n* Handle git not being installed when running algokit ([`ccc5eb0`](https://github.com/algorandfoundation/algokit-cli/commit/ccc5eb0369892bb640914a5cf370072d28502d7f))\n* **doctor:** Docker compose version parsing, fixes #164 ([`c3f4ef8`](https://github.com/algorandfoundation/algokit-cli/commit/c3f4ef80f7ca0e3da0d4841c06d81d8abe0c078d))\n* Updating gitpython to resolve pip-audit vulnerability warning ([#169](https://github.com/algorandfoundation/algokit-cli/issues/169)) ([`2a10d67`](https://github.com/algorandfoundation/algokit-cli/commit/2a10d676e20f4f7d3b7d28ea24d6f5ded099d3ae))\n\n### Documentation\n* Gave context to Sandbox and PyTEAL ([`0a96e13`](https://github.com/algorandfoundation/algokit-cli/commit/0a96e13c16284dfdc8d9525310119a1351ff862a))\n* Updated use cases  -> capabilities ([`ef0527a`](https://github.com/algorandfoundation/algokit-cli/commit/ef0527a87f11651efb3061711749049d36ed6d04))\n* Added missing recommendation for type-safe client ([`26a6717`](https://github.com/algorandfoundation/algokit-cli/commit/26a6717e763811e0f9c23818d842ab0ea2fb2a99))\n* Completed draft for architecture decision for smart contract deployment ([`40faf83`](https://github.com/algorandfoundation/algokit-cli/commit/40faf83cc97f75005f54bc344999848055f70b84))\n* First draft of architecture decision for smart contract deployment ([`9e77817`](https://github.com/algorandfoundation/algokit-cli/commit/9e778170e9b4924d10b73a2fbea08accf38e3b33))\n* Rename sandbox to localnet ([`aa35da7`](https://github.com/algorandfoundation/algokit-cli/commit/aa35da72f67f3dcd35adb0866a8b1ddad17fc4fe))\n* Update example output for Verify installation section ([`14f6f90`](https://github.com/algorandfoundation/algokit-cli/commit/14f6f9058c3ffd69d8c3ce9b4b1160bdeb017a0b))\n* Fixing incorrect description for Sandbox command ([`4bea5fa`](https://github.com/algorandfoundation/algokit-cli/commit/4bea5fa06be10a1282c1627b7979380aebc6e297))\n* Fix init description in algokit.md ([#156](https://github.com/algorandfoundation/algokit-cli/issues/156)) ([`39eee95`](https://github.com/algorandfoundation/algokit-cli/commit/39eee9508f28799856692b0729bebf8b923af687))\n\n## v0.2.0 (2023-01-16)\n### Feature\n* Update windows install instructions and bump version so PyPi will accept new release ([#154](https://github.com/algorandfoundation/algokit-cli/issues/154)) ([`5ff5223`](https://github.com/algorandfoundation/algokit-cli/commit/5ff52237172bddf06d3ff845b18e77c31dce9b11))\n\n## v0.1.3 (2023-01-16)\n### Documentation\n* Update pipx install instructions ([`e91a06a`](https://github.com/algorandfoundation/algokit-cli/commit/e91a06a38f5b278e9ac26dfef5d7c4833633e750))\n\n## v0.1.2 (2023-01-11)\n### Documentation\n* Approved ([`19eb063`](https://github.com/algorandfoundation/algokit-cli/commit/19eb063a2d8327884a5856939db4e0ea157ac26f))\n* Remove --cask from Option 3 ([`cfa3b73`](https://github.com/algorandfoundation/algokit-cli/commit/cfa3b73099f6da94420bdfc9541bbce4d521993d))\n\n## v0.1.1 (2023-01-10)\n### Fix\n* Adding installation documentation update re: pipx ([`75d3590`](https://github.com/algorandfoundation/algokit-cli/commit/75d359022f9fc3a3cf3ac8d21b16a449e42b1857))\n* Temporarily turning off PyPi publishing while we decide on the final package name ([`6c1a2e2`](https://github.com/algorandfoundation/algokit-cli/commit/6c1a2e25d2de00a9052b7db700d8681d75b09e6a))\n\n## v0.1.0 (2023-01-09)\n### Feature\n* Windows Chocolatey package ([#80](https://github.com/algorandfoundation/algokit-cli/issues/80)) ([`3f4bb04`](https://github.com/algorandfoundation/algokit-cli/commit/3f4bb04ee3ce09e7ca9ab843453f50f6a3eab98c))\n* **bootstrap:** Prompt for env tokens ([#114](https://github.com/algorandfoundation/algokit-cli/issues/114)) ([`a6fe18f`](https://github.com/algorandfoundation/algokit-cli/commit/a6fe18fded0bddec91959998916fc96ac6af5008))\n* **explore:** Add explore command for launching Dappflow Explorer ([#112](https://github.com/algorandfoundation/algokit-cli/issues/112)) ([`4db26b0`](https://github.com/algorandfoundation/algokit-cli/commit/4db26b08c6919fb980afa6afbb233d8793feeec1))\n* **version-check:** Added check to periodically check for new releases on GitHub and inform when found ([#111](https://github.com/algorandfoundation/algokit-cli/issues/111)) ([`1772439`](https://github.com/algorandfoundation/algokit-cli/commit/1772439b02190dce159e75da110c76b200774c00))\n* **doctor:** Use sys.version for fuller output (vs version_info tuple) ([#101](https://github.com/algorandfoundation/algokit-cli/issues/101)) ([`55fe4fc`](https://github.com/algorandfoundation/algokit-cli/commit/55fe4fc6984b7fecfe3e417a87cc3a1c0c0ee070))\n* **completions:** Add completions support for bash and zsh ([`e7c50e5`](https://github.com/algorandfoundation/algokit-cli/commit/e7c50e58c6b371475fb6d7d2f45e85790289eadb))\n* **doctor:** Tweak commands for windows ([`8f79629`](https://github.com/algorandfoundation/algokit-cli/commit/8f79629a358eb71cb28b4e739bbe445c2ec74646))\n* **doctor:** Fix timezone in tests ([`d9fe303`](https://github.com/algorandfoundation/algokit-cli/commit/d9fe303934f3c093d7548228fce74c83286fa1f2))\n* **doctor:** Adding tests ([`58d5708`](https://github.com/algorandfoundation/algokit-cli/commit/58d57080e53b24f60e6a4315e09e920032fdf0b0))\n* **doctor:** Refactor ([`b0fe39a`](https://github.com/algorandfoundation/algokit-cli/commit/b0fe39aafe34b74bf8aebba1e9ec5f4e8048923e))\n* **doctor:** Colouring output ([`6bfb300`](https://github.com/algorandfoundation/algokit-cli/commit/6bfb30093e005cb59e92aefe4714ff492ec2582b))\n* **doctor:** Address pr comments and add more logic ([`e7a3090`](https://github.com/algorandfoundation/algokit-cli/commit/e7a309024d2fc5704950e1138eb44b0f242f8bdb))\n* **.env file:** Add tests for default and custom values ([`58511dc`](https://github.com/algorandfoundation/algokit-cli/commit/58511dc598ac1ec4b388c45d680d9075a5650b98))\n* **init:** Add `.env` file to template and passing custom values (if required) ([`e77eca8`](https://github.com/algorandfoundation/algokit-cli/commit/e77eca8bde7e5c6faa80375d6b11f44c0579be6e))\n* **init:** Implemented ability to specify a commit hash so you can anchor templates from semi-trusted sources to a known good version ([#77](https://github.com/algorandfoundation/algokit-cli/issues/77)) ([`772d420`](https://github.com/algorandfoundation/algokit-cli/commit/772d420ea73a7878ef5ac9d446c79b9bfd1fbbf2))\n* **sandbox:** Added `algokit sandbox console` ([`95565df`](https://github.com/algorandfoundation/algokit-cli/commit/95565dff45a6751f960ebfba64fb9ed001a67260))\n* **goal:** Added algokit goal --console ([`8dd947b`](https://github.com/algorandfoundation/algokit-cli/commit/8dd947bdb4f7029bf043cb0cfebe494c17bf2729))\n* **algokit:** Implementing automated, semantic versioning ([`e4859d4`](https://github.com/algorandfoundation/algokit-cli/commit/e4859d496c61ea4e4c16e9a1c910dff4e896037a))\n\n### Fix\n* **docs:** Tweaks to the reference documentation ([`6d872a1`](https://github.com/algorandfoundation/algokit-cli/commit/6d872a17bf00820c78c5e8c52caf20cd9efe9c94))\n* Expression on Publish Release Packages action ([`1c08f95`](https://github.com/algorandfoundation/algokit-cli/commit/1c08f95d568b8a9264b319cf40b87b0af05a8c72))\n* Attempting to isolate main branch versioning ([`5ab7089`](https://github.com/algorandfoundation/algokit-cli/commit/5ab7089a21c431eba2965f1af895eb5d4b6c6ae6))\n* Improve documentation and argument values for version-prompt config ([`aee3a1a`](https://github.com/algorandfoundation/algokit-cli/commit/aee3a1a74a921659ec60e28a1b48c11d4a14d2da))\n* **completions:** Explicitly use utf8 for Windows compat ([`5033f8e`](https://github.com/algorandfoundation/algokit-cli/commit/5033f8e26cd27374228a983eebb3e5c88c841958))\n* **bootstrap:** Improving robustness and test coverage for bootstrap poetry command ([#89](https://github.com/algorandfoundation/algokit-cli/issues/89)) ([`a4a6823`](https://github.com/algorandfoundation/algokit-cli/commit/a4a6823b4c5015e5d0f2d361a7373820e641835d))\n* **init:** Don't prompt for project name in the template - take it from the directry name in the root init command ([`fc84791`](https://github.com/algorandfoundation/algokit-cli/commit/fc847911e58bd969428c0dc6e3117501181f545d))\n* Windows weird error on GitHub Actions ([`0b81808`](https://github.com/algorandfoundation/algokit-cli/commit/0b8180829be53e26b998e4c723c0d8a384d95b91))\n* **git:** Update gitattributes to ensure EOL=LF ([`b13a972`](https://github.com/algorandfoundation/algokit-cli/commit/b13a97202ddeefe81c8eda5d3b058cc4a136291e))\n* **build:** Run black with --check ([`e4e3875`](https://github.com/algorandfoundation/algokit-cli/commit/e4e3875b864557f86edabe2851a3f2f8f2071fa3))\n* **logging:** Ensure log files get opened in UTF-8 encoding ([`bc666fe`](https://github.com/algorandfoundation/algokit-cli/commit/bc666fe7e3aed8d250d997946188f8f70b01b4d5))\n* Removing deleted folder from beaker template from assert ([`8b4b46a`](https://github.com/algorandfoundation/algokit-cli/commit/8b4b46a75b6a4794d9ceb41f80f80415ef44d503))\n* Temporary fix for excessive build minutes consumption and commenting out PyPi publishing code since it errors out ([`399ca0e`](https://github.com/algorandfoundation/algokit-cli/commit/399ca0eca85751c245a07abe2cbe9a73cce4172b))\n\n### Breaking\n* --ok-exit-code no longer exists on algokit bootstrap poetry, no need for copier templates to call algokit now so no need for this feature ([`a4a6823`](https://github.com/algorandfoundation/algokit-cli/commit/a4a6823b4c5015e5d0f2d361a7373820e641835d))\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# AlgoKit CLI for contributors\n\n## Commits\n\nWe are using the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) standard for commit messages. This allows us to automatically generate release notes and version numbers. We do this via [Python Semantic Release](https://python-semantic-release.readthedocs.io/en/latest/) and [GitHub actions](.github/workflows/cd.yaml).\n\n## Guiding Principles\n\nAlgoKit development is done within the [AlgoKit Guiding Principles](./docs/algokit.md#guiding-principles).\n\n## Setup (AlgoKit CLI development)\n\n### Initial setup\n\n1. Clone this repository: `git clone https://github.com/algorandfoundation/algokit-cli`\n2. Install pre-requisites:\n\n   - Manually:\n     - Install `Python` - [Link](https://www.python.org/downloads/): The minimum required version is `3.10`, but `3.11` is recommended.)\n     - Install `Poetry` - [Link](https://python-poetry.org/docs/#installation): The minimum required version is `1.2`.\n     - If you're not using PyCharm, then run `poetry install` in the repository root directory (this should set up `.venv` and install all Python dependencies - PyCharm will do this for you on startup)\n   - Via AlgoKit CLI:\n\n     - [Install AlgoKit CLI](./README.md#install) and run `algokit project bootstrap poetry` in the root directory\n     - Install `tealer` - by running `pipx install tealer==0.1.2`. This is a prerequisite to running `pytest`, tealer is a third party tool for static analysis of TEAL code, algokit uses it in `task analyse` command. AlgoKit uses `pytest-xdist` to speed up the test suite execution by running tests in parallel, this requires `tealer` to be installed globally to avoid race conditions.\n\n3. Install pre-commit hooks (optional but recommended):\n\n   [pre-commit](https://pre-commit.com/) is configured in this repository. To enable it, make sure that `poetry install` has been run and the virtual-env is activated by running `poetry shell`. Then execute `pre-commit install` to install the git hook scripts.\n\n   Once it is done, git will ensure formatting, linting, and static typing (via `mypy`) is correct when you perform a commit.\n\n4. Open the project and start debugging / developing via:\n\n   - VS Code\n\n     1. Open the repository root in VS Code.\n     2. Install recommended extensions.\n     3. Hit F5 (or whatever you have debug mapped to) and it should start running with breakpoint debugging.\n\n        > **Note**\n        > The first time you run, VS Code may prompt you to select the Python Interpreter, or if you are having issues running you may need to select it via the `Python: Select Interpreter` pallette command. You should select the Python Interpreter with the ./.venv path)\n\n   - IDEA (e.g. PyCharm)\n     1. Open the repository root in the IDE\n     2. Hit Shift+F9 (or whatever you have debug mapped to) and it should start running with breakpoint debugging\n   - Other\n     1. Open the repository root in your text editor of choice\n     2. In a terminal run `poetry shell`\n     3. Run `./debug.py` through your debugger of choice\n   - In each of the above cases, an `args.in` file will be created in the source root.\n     Each line will be executed in order, with the arguments passed to the cli.\n     For example, you could have:\n\n     ```\n     version\n     --help\n     version --help\n     ```\n\n     Not a terribly useful sequence of commands, but hopefully this helps illustrate the usage.\n\n### Subsequently\n\n1. If you update to the latest source code and there are new dependencies you will need to run `poetry install` again\n2. Follow step 3 above\n\n### Documentation\n\nMarkdown documentation can be found within the docs directory of the repo, there is a mixture of handwritten documentation and autogenerated documentation for the CLI tool itself. \n\nTo autogenerate the CLI documentation from the click source:\n\n1. Install the docs dependencies: `poetry install --with docs`\n2. Run the docs generation: `poetry run poe docs`\n\nNote: this command won't work on Windows.\n\nThe CLI docs are generated using Sphinx, and its configuration can be found in `docs\\sphinx`. The generated Markdown output is post processed to add a Table of Contents and top level title and the final Markdown is output to `docs\\cli`. The commands to achieve this are defined in `pyproject.toml` under `[tool.poe.tasks]`\n\n### Libraries and Tools\n\nAlgoKit uses Python as a main language and many Python libraries and tools. This section lists all of them with a tiny brief.\n\n- [Poetry](https://python-poetry.org/): Python packaging and dependency management.\n- [pipx](https://github.com/pypa/pipx): Install and Run Python Applications in Isolated Environments\n- [Click](https://palletsprojects.com/p/click/): A Python package for creating beautiful command line interfaces.\n- [Black](https://github.com/psf/black): A Python code formatter.\n- [Tox](https://tox.wiki/en/latest/): Automate and standardize testing in Python.\n\n## Architecture decisions\n\nAs part of developing AlgoKit we are documenting key architecture decisions using [Architecture Decision Records (ADRs)](https://adr.github.io/). The following are the key decisions that have been made thus far:\n\n- [2022-11-14: AlgoKit sandbox approach](docs/architecture-decisions/2022-11-14_sandbox-approach.md)\n- [2022-11-22: Beaker testing strategy](docs/architecture-decisions/2022-11-22_beaker-testing-strategy.md)\n- [2023-01-11: HomeBrew install strategy](docs/architecture-decisions/2023-01-11_brew_install.md)\n- [2023-01-11: Beaker productionisation review](docs/architecture-decisions/2023-01-11_beaker_productionisation_review.md)\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Algorand Foundation\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n<a href=\"https://github.com/algorandfoundation/algokit-cli\"><img src=\"https://raw.githubusercontent.com/algorandfoundation/algokit-cli/main/docs/imgs/banner.png\" width=60%></a>\n</div>\n\n<p align=\"center\">\n    <a target=\"_blank\" href=\"https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md\"><img src=\"https://img.shields.io/badge/docs-repository-00dc94?logo=github&style=flat.svg\" /></a>\n    <a target=\"_blank\" href=\"https://dev.algorand.co/algokit/algokit-intro\"><img src=\"https://img.shields.io/badge/learn-AlgoKit-00dc94?logo=algorand&mac=flat.svg\" /></a>\n    <a target=\"_blank\" href=\"https://github.com/algorandfoundation/algokit-cli\"><img src=\"https://img.shields.io/github/stars/algorandfoundation/algokit-cli?color=00dc94&logo=star&style=flat\" /></a>\n    <a target=\"_blank\" href=\"https://dev.algorand.co/algokit/algokit-intro\"><img  src=\"https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgithub.com%2Falgorandfoundation%2Falgokit-cli&countColor=%2300dc94&style=flat\" /></a>\n</p>\n\n---\n\nThe Algorand AlgoKit CLI is the one-stop shop tool for developers building on the [Algorand network](https://www.algorand.com/).\n\nAlgoKit gets developers of all levels up and running with a familiar, fun and productive development environment in minutes. The goal of AlgoKit is to help developers build and launch secure, automated production-ready applications rapidly.\n\n[Install AlgoKit](#install) | [Quick Start Tutorial](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/tutorials/intro.md) | [Documentation](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md)\n\n## What is AlgoKit?\n\nAlgoKit compromises of a number of components that make it the one-stop shop tool for developers building on the [Algorand network](https://www.algorand.com/).\n\n![AlgoKit components](https://raw.githubusercontent.com/algorandfoundation/algokit-cli/main/docs/imgs/algokit-map.png)\n\nAlgoKit can help you [**learn**](#learn), [**develop**](#develop) and [**operate**](#operate) Algorand solutions. It consists of [a number of repositories](https://github.com/search?q=org%3Aalgorandfoundation+algokit-&type=repositories), including this one.\n\n### Learn\n\nThere are many learning resources on the [Algorand Developer Portal](https://dev.algorand.co/) and the [AlgoKit landing page](https://dev.algorand.co/algokit/algokit-intro) has a range of links to more learning materials. In particular, check out the [quick start tutorial](https://dev.algorand.co/getting-started/algokit-quick-start/).\n\nIf you need help you can access both the [Algorand Discord](https://discord.gg/84AActu3at) (pro-tip: check out the algokit channel!) and the [Algorand Forum](https://forum.algorand.org/).\n\nWe have also developed an [AlgoKit video series](https://www.youtube.com/@algodevs/playlists).\n\n### Develop\n\nAlgoKit helps you develop Algorand solutions:\n\n- **Interaction**: AlgoKit exposes a number of interaction methods, namely:\n  - [**AlgoKit CLI**](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md): A Command Line Interface (CLI) so you can quickly access AlgoKit capabilities\n  - [VS Code](https://code.visualstudio.com/): All AlgoKit project templates include VS Code configurations so you have a smooth out-of-the-box development experience using VS Code\n  - [lora](https://lora.algokit.io/): AlgoKit has integrations with lora; a web-based user interface that let's you visualise and interact with an Algorand network\n- **Getting Started**: AlgoKit helps you get started quickly when building new solutions:\n  - [**AlgoKit Templates**](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/init.md): Template libraries to get you started faster and quickly set up a productive dev experience\n- **Development**: AlgoKit provides SDKs, tools and libraries that help you quickly and effectively build high quality Algorand solutions:\n  - **AlgoKit Utils** ([Python](https://github.com/algorandfoundation/algokit-utils-py#readme) | [TypeScript](https://github.com/algorandfoundation/algokit-utils-ts#readme)): A set of utility libraries so you can develop, test, build and deploy Algorand solutions quickly and easily\n    - [Algorand SDKs](https://dev.algorand.co/reference/sdks/sdk-list/) - The core Algorand SDK providing Algorand protocol API calls, which AlgoKit Utils wraps, but still exposes for advanced scenarios\n  - [**Algorand Python**](https://github.com/algorandfoundation/puya): A semantically and syntactically compatible, typed Python language that works with standard Python tooling and allows you to express smart contracts (apps) and smart signatures (logic signatures) for deployment on the Algorand Virtual Machine (AVM).\n  - [**Algorand TypeScript (Beta)**](https://github.com/algorandfoundation/puya-ts): A semantically and syntactically compatible, typed TypeScript language that works with standard TypeScript tooling and allows you to express smart contracts (apps) and smart signatures (logic signatures) for deployment on the Algorand Virtual Machine (AVM). This language is currently in beta.\n  - [**TEALScript**](https://github.com/algorandfoundation/TEALScript): A subset of TypeScript that can be used to express smart contracts (apps) and smart signatures (logic signatures) for deployment on the Algorand Virtual Machine (AVM).\n  - [**AlgoKit LocalNet**](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/localnet.md): A local isolated Algorand network so you can simulate real transactions and workloads on your computer\n\n### Operate\n\nAlgoKit can help you deploy and operate Algorand solutions.\n\nAlgoKit comes with out-of-the-box [Continuous Integration / Continuous Deployment (CI/CD) templates](https://github.com/algorandfoundation/algokit-python-template) that help you rapidly set up best-practice software delivery processes that ensure you build quality in and have a solution that can evolve\n\n## What can AlgoKit help me do?\n\nThe set of capabilities supported by AlgoKit will evolve over time, but currently includes:\n\n- Quickly run, explore and interact with an isolated local Algorand network (LocalNet)\n- Building, testing, deploying and calling [Algorand Python](https://github.com/algorandfoundation/puya) / [Algorand TypeScript (Beta)](https://github.com/algorandfoundation/puya-ts) / [TEALScript](https://github.com/algorandfoundation/TEALScript) smart contracts\n\nFor a user guide and guidance on how to use AlgoKit, please refer to the [docs](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md).\n\nFuture capabilities are likely to include:\n\n- Quickly deploy [standardised](https://github.com/algorandfoundation/ARCs/#arcs-algorand-requests-for-comments), audited smart contracts\n- Building and deploying Algorand dApps\n\n## Is this for me?\n\nThe target audience for this tool is software developers building applications on the Algorand network. A working knowledge of using a command line interfaces and experience using the supported programming languages is assumed.\n\n## How can I contribute?\n\nThis is an open source project managed by the Algorand Foundation. See the [contributing page](https://github.com/algorandfoundation/algokit-cli/blob/main/CONTRIBUTING.md) to learn about making improvements to the CLI tool itself, including developer setup instructions.\n\n# Install\n\n> **Note** Refer to [Troubleshooting](#troubleshooting) for more details on mitigation of known edge cases when installing AlgoKit.\n\n## Prerequisites\n\nThe installation pre-requisites change depending on the method you use to install. Please refer to [Installation Methods](#installation-methods).\n\nDepending on the features you choose to leverage from the AlgoKit CLI, additional dependencies may be required.\nThe AlgoKit CLI will tell you if you are missing one for a given command. These optional dependencies are:\n\n- **Git**: Essential for creating and updating projects from templates. Installation guide available at [Git Installation](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).\n- **Docker**: Necessary for running the AlgoKit LocalNet environment. Docker Compose version 2.5.0 or higher is required. See [Docker Installation](https://docs.docker.com/get-docker/).\n- **Python**: For those installing the AlgoKit CLI via `pipx` or building contracts using Algorand Python. **Minimum required version is Python 3.12+ when working with Algorand Python**. See [Python Installation](https://www.python.org/downloads/).\n- **Node.js**: For those working on frontend templates or building contracts using Algorand TypeScript or TEALScript. **Minimum required versions are Node.js `v22` and npm `v10`**. See [Node.js Installation](https://nodejs.org/en/download/).\n\n> **Note**\n> If you have previously installed AlgoKit using `pipx` and would like to switch to a different installation method, please ensure that\n> you first uninstall the existing version by running `pipx uninstall algokit`. Once uninstalled, you can follow the installation instructions for your preferred platform.\n\n## Cross-platform installation\n\nAlgoKit can be installed using OS specific package managers, or using the python tool [pipx](https://pypa.github.io/pipx/).\nSee below for specific installation instructions.\n\n### Installation Methods\n\n- [Windows](#install-algokit-on-windows)\n- [Mac](#install-algokit-on-mac)\n- [Linux](#install-algokit-on-linux)\n- [Universal via pipx](#install-algokit-with-pipx-on-any-os)\n\n## Install AlgoKit on Windows\n\n> **Note**\n> AlgoKit is supported on Windows 10 1709 (build 16299) and later.\n> We only publish an x64 binary, however it also runs on ARM devices by default using the built in x64 emulation feature.\n\n1. Ensure prerequisites are installed\n\n   - [WinGet](https://learn.microsoft.com/en-us/windows/package-manager/winget/) (should be installed by default on recent Windows 10 or later)\n   - [Git](https://github.com/git-guides/install-git#install-git-on-windows) (or `winget install git.git`)\n   - [Docker](https://docs.docker.com/desktop/install/windows-install/) (or `winget install docker.dockerdesktop`)\n     > **Note**\n     > See [our LocalNet documentation](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/localnet.md#prerequisites) for more tips on installing Docker on Windows\n   - [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/)\n\n2. Install using winget\n\n   ```shell\n   winget install algokit\n   ```\n\n3. [Verify installation](#verify-installation)\n\n### Maintenance\n\nSome useful commands for updating or removing AlgoKit in the future.\n\n- To update AlgoKit: `winget upgrade algokit`\n- To remove AlgoKit: `winget uninstall algokit`\n\n## Install AlgoKit on Mac\n\n> **Note**\n> AlgoKit is supported on macOS Big Sur (11) and later for both x64 and ARM (Apple Silicon)\n\n1. Ensure prerequisites are installed\n\n   - [Homebrew](https://docs.brew.sh/Installation)\n   - [Git](https://github.com/git-guides/install-git#install-git-on-mac) (should already be available if `brew` is installed)\n   - [Docker](https://docs.docker.com/desktop/install/mac-install/), (or `brew install --cask docker`)\n     > **Note**\n     > Docker requires MacOS 11+\n\n2. Install using Homebrew\n\n   ```shell\n   brew install algorandfoundation/tap/algokit\n   ```\n\n3. Restart the terminal to ensure AlgoKit is available on the path\n4. [Verify installation](#verify-installation)\n\n### Maintenance\n\nSome useful commands for updating or removing AlgoKit in the future.\n\n- To update AlgoKit: `brew upgrade algokit`\n- To remove AlgoKit: `brew uninstall algokit`\n\n## Install AlgoKit on Linux\n\n> **Note**\n> AlgoKit is compatible with Ubuntu 16.04 and later, Debian, RedHat, and any distribution that supports [Snap](https://snapcraft.io/docs/installing-snapd), but it is only supported on x64 architecture; ARM is not supported.\n\n1. Ensure prerequisites are installed\n\n   - [Snap](https://snapcraft.io/docs/installing-snapd) (should be installed by default on Ubuntu 16.04.4 LTS (Xenial Xerus) or later)\n   - [Git](https://github.com/git-guides/install-git#install-git-on-linux)\n   - [Docker](https://docs.docker.com/desktop/install/linux-install/)\n\n2. Install using snap\n\n   ```shell\n   sudo snap install algokit --classic\n   ```\n\n   > For detailed guidelines per each supported linux distro, refer to [Snap Store](https://snapcraft.io/algokit).\n\n3. [Verify installation](#verify-installation)\n\n### Maintenance\n\nSome useful commands for updating or removing AlgoKit in the future.\n\n- To update AlgoKit: `snap refresh algokit`\n- To remove AlgoKit: `snap remove --purge algokit`\n\n## Install AlgoKit with pipx on any OS\n\n1. Ensure desired prerequisites are installed\n\n   - [Python 3.10 - 3.14](https://www.python.org/downloads/)\n   - [pipx](https://pypa.github.io/pipx/installation/)\n   - [Git](https://github.com/git-guides/install-git)\n   - [Docker](https://docs.docker.com/get-docker/)\n\n2. Install using pipx\n\n   ```shell\n   pipx install algokit\n   ```\n\n3. Restart the terminal to ensure AlgoKit is available on the path\n4. [Verify installation](#verify-installation)\n\n### Maintenance\n\nSome useful commands for updating or removing AlgoKit in the future.\n\n- To update AlgoKit: `pipx upgrade algokit`\n- To remove AlgoKit: `pipx uninstall algokit`\n\n## Verify installation\n\nVerify AlgoKit is installed correctly by running `algokit --version` and you should see output similar to:\n\n```\nalgokit, version 1.0.1\n```\n\n> **Note**\n> If you get receive one of the following errors:\n>\n> - `command not found: algokit` (bash/zsh)\n> - `The term 'algokit' is not recognized as the name of a cmdlet, function, script file, or operable program.` (PowerShell)\n>\n> Then ensure that `algokit` is available on the PATH by running `pipx ensurepath` and restarting the terminal.\n\nIt is also recommended that you run `algokit doctor` to verify there are no issues in your local environment and to diagnose any problems if you do have difficulties running AlgoKit. The output of this command will look similar to:\n\n```\ntimestamp: 2023-03-27T01:23:45+00:00\nAlgoKit: 1.0.1\nAlgoKit Python: 3.11.1 (main, Dec 23 2022, 09:28:24) [Clang 14.0.0 (clang-1400.0.29.202)] (location: /Users/algokit/.local/pipx/venvs/algokit)\nOS: macOS-13.1-arm64-arm-64bit\ndocker: 20.10.21\ndocker compose: 2.13.0\ngit: 2.37.1\npython: 3.10.9 (location:  /opt/homebrew/bin/python)\npython3: 3.10.9 (location:  /opt/homebrew/bin/python3)\npipx: 1.1.0\npoetry: 1.3.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.18\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n```\n\nPer the above output, the doctor command output is a helpful tool if you need to ask for support or [raise an issue](https://github.com/algorandfoundation/algokit-cli/issues/new).\n\n## Troubleshooting\n\nThis section addresses specific edge cases and issues that some users might encounter when interacting with the CLI. The following table provides solutions to known edge cases:\n\n| Issue Description                                                                                                                                   | OS(s) with observed behaviour                             | Steps to mitigate                                                                                                                                                                                                                                                                                                                      | References                                          |\n| --------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------- |\n| This scenario may arise if installed `python` was build without `--with-ssl` flag enabled, causing pip to fail when trying to install dependencies. | Debian 12                                                 | Run `sudo apt-get install -y libssl-dev` to install the required openssl dependency. Afterwards, ensure to reinstall python with `--with-ssl` flag enabled. This includes options like [building python from source code](https://medium.com/@enahwe/how-to-06bc8a042345) or using tools like [pyenv](https://github.com/pyenv/pyenv). | <https://github.com/actions/setup-python/issues/93> |\n| `poetry install` invoked directly or via `algokit project bootstrap all` fails on `Could NOT find PkgConfig (missing: PKG_CONFIG_EXECUTABLE)`.      | `MacOS` >=14 using `python` 3.13 installed via `homebrew` | Install dependencies deprecated in `3.13` and latest MacOS versions via `brew install pkg-config`, delete the virtual environment folder and retry the `poetry install` command invocation.                                                                                                                                            | N/A                                                 |\n"
  },
  {
    "path": "debug.py",
    "content": "\"\"\"\nThis script is for invoking algokit from your IDE with a dynamic set of args,\ndefined in args.in (which is in .gitignore)\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\ntry:\n    import click\nexcept ImportError:\n    print(  # noqa: T201\n        \"ERROR: Couldn't import click, make sure you've run 'poetry install' and activated the virtual environment.\\n\"\n        \"For tips on getting started with developing AlgoKit CLI itself see CONTRIBUTING.md.\\n\",\n        file=sys.stderr,\n    )\n    raise\n\nif sys.prefix == sys.base_prefix:\n    click.echo(\n        click.style(\n            \"WARNING: virtualenv not activated, this is unexpected and you probably want to activate it first\",\n            fg=\"red\",\n        ),\n        err=True,\n    )\n\nvcs_root = Path(__file__).parent\nargs_file = vcs_root / \"args.in\"\nif not args_file.exists():\n    click.echo(\n        click.style(\n            \"arg.in does not exist, creating an empty file.\\n\"\n            \"Edit this file to change what runs - each line should contain the command line arguments to algokit.\\n\"\n            \"\\n\",\n            fg=\"yellow\",\n        ),\n        err=True,\n    )\n    args_file.touch(exist_ok=False)\n    args_file.write_text(\"--version\")\n\ncommands_sequence = args_file.read_text().splitlines()\n\n# change to src directory so algokit is in path\nos.chdir(vcs_root / \"src\")\nfor command in commands_sequence or [\"\"]:\n    click.echo(click.style(f\"> algokit -v {command}\", bold=True), err=True)\n    run_result = subprocess.run([sys.executable, \"-m\", \"algokit\", \"-v\", *command.split()], check=False)\n    if run_result.returncode != 0:\n        click.echo(\n            click.style(\n                f\"command failed, return code was: {run_result.returncode}\",\n                bold=True,\n                fg=\"red\",\n            ),\n            err=True,\n        )\n        sys.exit(run_result.returncode)\n"
  },
  {
    "path": "docs/algokit.md",
    "content": "# AlgoKit\n\nThe Algorand AlgoKit CLI is the one-stop shop tool for developers building on the Algorand network. The goal of AlgoKit is to help developers build and launch secure, automated production-ready applications rapidly.\n\n## AlgoKit CLI commands\n\nFor details on how to use individual features see the following\n\n- [Bootstrap](./features/project/bootstrap.md) - Bootstrap AlgoKit project dependencies\n- [Compile](./features/compile.md) - Compile Algorand Python code\n- [Completions](./features/completions.md) - Install shell completions for AlgoKit\n- [Deploy](./features/project/deploy.md) - Deploy your smart contracts effortlessly to various networks\n- [Dispenser](./features/dispenser.md) - Fund your TestNet account with ALGOs from the AlgoKit TestNet Dispenser\n- [Doctor](./features/doctor.md) - Check AlgoKit installation and dependencies\n- [Explore](./features/explore.md) - Explore Algorand Blockchains using lora\n- [Generate](./features/generate.md) - Generate code for an Algorand project\n- [Goal](./features/goal.md) - Run the Algorand goal CLI against the AlgoKit Sandbox\n- [Init](./features/init.md) - Quickly initialize new projects using official Algorand Templates or community provided templates\n- [LocalNet](./features/localnet.md) - Manage a locally sandboxed private Algorand network\n- [Project](./features/project.md) - Manage an AlgoKit project workspace on your file system\n- [Tasks](./features/tasks.md) - Perform a variety of useful operations on the Algorand blockchain\n\n## Common AlgoKit CLI options\n\nAlgoKit has a number of global options that can impact all commands. Note: these global options must be appended to `algokit` and appear before a command, e.g. `algokit -v localnet start`, but not `algokit localnet start -v`. The exception to this is `-h`, which can be appended to any command or sub-command to see contextual help information.\n\n- `-h, --help` The help option can be used on any command to get details on any command, its sub-commands and options.\n- `-v, --verbose` Enables DEBUG logging, useful when troubleshooting or if you want to peek under the covers and learn what AlgoKit CLI is doing.\n- `--color / --no-color` Enables or disables output of console styling, we also support the [NO_COLOR](https://no-color.org) environment variable.\n- `--skip-version-check` Skips updated AlgoKit version checking and prompting for that execution, this can also be disabled [permanently on a given machine](./cli/index.md#version-prompt) with `algokit config version-prompt disable`.\n\nSee also the [AlgoKit CLI Reference](./cli/index.md), which details every command, sub-command and option.\n\n## AlgoKit Tutorials\n\nThe following tutorials guide you through various scenarios:\n\n- [AlgoKit quick start](https://dev.algorand.co/algokit/algokit-intro)\n- [Creating AlgoKit templates](https://dev.algorand.co/algokit/custom-algokit-templates)\n\n## Guiding Principles\n\nAlgoKit is guided by the following solution principles which flow through to the applications created by developers.\n\n1. **Cohesive developer tool suite**: Using AlgoKit should feel professional and cohesive, like it was designed to work together, for the developer; not against them. Developers are guided towards delivering end-to-end, high quality outcomes on MainNet so they and Algorand are more likely to be successful.\n2. **Seamless onramp**: New developers have a seamless experience to get started and they are guided into a pit of success with best practices, supported by great training collateral; you should be able to go from nothing to debugging code in 5 minutes.\n3. **Leverage existing ecosystem**: AlgoKit functionality gets into the hands of Algorand developers quickly by building on top of the existing ecosystem wherever possible and aligned to these principles.\n4. **Sustainable**: AlgoKit should be built in a flexible fashion with long-term maintenance in mind. Updates to latest patches in dependencies, Algorand protocol development updates, and community contributions and feedback will all feed in to the evolution of the software.\n5. **Secure by default**: Include defaults, patterns and tooling that help developers write secure code and reduce the likelihood of security incidents in the Algorand ecosystem. This solution should help Algorand be the most secure Blockchain ecosystem.\n6. **Extensible**: Be extensible for community contribution rather than stifling innovation, bottle-necking all changes through the Algorand Foundation and preventing the opportunity for other ecosystems being represented (e.g. Go, Rust, etc.). This helps make developers feel welcome and is part of the developer experience, plus it makes it easier to add features sustainably.\n7. **Meet developers where they are**: Make Blockchain development mainstream by giving all developers an idiomatic development experience in the operating system, IDE and language they are comfortable with so they can dive in quickly and have less they need to learn before being productive.\n8. **Modular components**: Solution components should be modular and loosely coupled to facilitate efficient parallel development by small, effective teams, reduced architectural complexity and allowing developers to pick and choose the specific tools and capabilities they want to use based on their needs and what they are comfortable with.\n"
  },
  {
    "path": "docs/architecture-decisions/2022-11-14_sandbox-approach.md",
    "content": "# AlgoKit sandbox approach\n\n- **Status**: Approved\n- **Owner:** Rob Moore\n- **Deciders**: Anne Kenyon (Algorand Inc.), Alessandro Cappellato (Algorand Foundation), Will Winder (Algorand Inc.)\n- **Date created**: 2022-11-14\n- **Date decided:** 2022-11-14\n- **Date updated**: 2022-11-16\n\n## Context\n\nIn order for AlgoKit to facilitate a productive development experience it needs to provide a managed Algorand sandbox experience. This allows developers to run an offline (local-only) private instance of Algorand that they can privately experiment with, run automated tests against and reset at will.\n\n## Requirements\n\n- The sandbox works cross-platform (i.e. runs natively on Windows, Mac and Linux)\n- You can spin up algod and indexer since both have useful use cases when developing\n- The sandbox is kept up to date with the latest version of algod / indexer\n- There is access to KMD so that you can programmatically fund accounts to improve the developer experience and reduce manual effort\n- There is access to the tealdbg port outside of algod so you can attach a debugger to it\n- The sandbox is isolated and (once running) works offline so the workload is private, allows development when there is no internet (e.g. when on a plane) and allows for multiple instances to be run in parallel (e.g. when developing multiple independent projects simultaneously)\n- Works in continuous integration and local development environments so you can facilitate automated testing\n\n## Principles\n\n- **[AlgoKit Guiding Principles](../../docs/algokit.md#Guiding-Principles)** - specifically Seamless onramp, Leverage existing ecosystem, Meet devs where they are\n- **Lightweight** - the solution should have as low an impact as possible on resources on the developers machine\n- **Fast** - the solution should start quickly, which makes for a nicer experience locally and also allows it to be used for continuous integration automation testing\n\n## Options\n\n### Option 1 - Pre-built DockerHub images\n\nPre-built application developer-optimised DockerHub images that work cross-platform; aka an evolved AlgoKit version of <https://github.com/MakerXStudio/algorand-sandbox-dev>.\n\n**Pros**\n\n- It's quick to download the images and quick to start the container since you don't need to compile Algod / indexer and the images are optimised for small size\n- The only dependency needed is Docker, which is a fairly common dependency for most developers to use these days\n- The images are reasonably lightweight\n- The images provide an optimised application developer experience with: (devmode) algo, KMD, tealdbg, indexer\n- It natively works cross-platform\n\n**Cons**\n\n- Some people have reported problems running WSL 2 on a small proportion of Windows environments (to get the latest Docker experience)\n- Docker within Docker can be a problem in some CI environments that run agents on Docker in the first place\n- Work needs to be done to create an automated CI/CD that automatically releases new versions to keep it up to date with latest algod/indexer versions\n\n### Option 2 - Lightweight algod client implementation\n\nWork with the Algorand Inc. team to get a lightweight algod client that can run outside of a Docker container cross-platform.\n\n**Pros**\n\n- Likely to be the most lightweight and fastest option - opening up better/easier isolated/parallelised automated testing options\n- Wouldn't need Docker as a dependency\n\n**Cons**\n\n- Indexer wouldn't be supported (Postgres would require Docker anyway)\n- Algorand Inc. does not distribute Windows binaries.\n\n### Option 3 - Sandbox\n\nUse the existing [Algorand Sandbox](https://github.com/algorand/sandbox).\n\n**Pros**\n\n- Implicitly kept up to date with Algorand - no extra thing to maintain\n- Battle-tested by the core Algorand team day-in-day-out\n- Supports all environments including unreleased feature branches (because it can target a git repo / commit hash)\n\n**Cons**\n\n- Sandbox is designed for network testing, not application development - it's much more complex than the needs of application developers\n- Slow to start because it has to download and built algod and indexer (this is particularly problematic for ephemeral CI/CD build agents)\n- It's not cross-platform (it requires bash to run sandbox.sh, although a sandbox.ps1 version could be created)\n\n## Preferred option\n\nOption 1 and Option 2.\n\nOption 1 provides a fully-featured experience that will work great in most scenarios, having option 2 as a second option would open up more advanced parallel automated testing scenarios in addition to that.\n\n## Selected option\n\nOption 1\n\nWe're aiming to release the first version of AlgoKit within a short timeframe, which won't give time for Option 2 to be developed. Sandbox itself has been ruled out since it's not cross-platform and is too slow for both development and continuous integration.\n\nOption 1 also results in a similar result to running Sandbox, so existing Algorand documentation, libraries and approaches should work well with this option making it a good slot-in replacement for Sandbox for application developers.\n\nAlgoKit is designed to be modular: we can add in other approaches over time such as Option 2 when/if it becomes available.\n"
  },
  {
    "path": "docs/architecture-decisions/2022-11-22_beaker-testing-strategy.md",
    "content": "# Beaker testing strategy\n\n- **Status**: Draft\n- **Owner:** Rob Moore\n- **Deciders**: Anne Kenyon (Algorand Inc.), Alessandro Cappellato (Algorand Foundation), Michael Diamant (Algorand Inc.), Benjamin Guidarelli (Algorand Foundation)\n- **Date created**: 2022-11-22\n- **Date decided:** TBD\n- **Date updated**: 2022-11-28\n\n## Context\n\nAlgoKit will be providing a smart contract development experience built on top of [PyTEAL](https://pyteal.readthedocs.io/en/stable/) and [Beaker](https://developer.algorand.org/articles/hello-beaker/). Beaker is currently in a pre-production state and needs to be productionised to provide confidence for use in generating production-ready smart contracts by AlgoKit users. One of the key things to resolve to productionisation of Beaker is to improve the automated test coverage.\n\nBeaker itself is currently split into the PyTEAL generation related code and the deployment and invocation related code (including interacting with Sandbox). This decision is solely focussed on the PyTEAL generation components of Beaker. The current automated test coverage of this part of the codebase is ~50% and is largely based on compiling and/or executing smart contracts against Algorand Sandbox. While it's generally not best practice to try and case a specific code coverage percentage, a coverage of ~80%+ is likely indicative of good coverage in a dynamic language such as Python.\n\nThe Sandbox tests provide a great deal of confidence, but are also slow to execute, which can potentially impair Beaker development and maintenance experience, especially as the coverage % is grown and/or features are added over time.\n\nBeaker, like PyTEAL, can be considered to be a transpiler on top of TEAL. When generating smart contracts, the individual TEAL opcodes are significant, since security audits will often consider the impact at that level, and it can have impacts on (limited!) resource usage of the smart contract. As such, \"output stability\" is potentially an important characteristic to test for.\n\n## Requirements\n\n- We have a high degree of confidence that writing smart contracts in Beaker leads to expected results for production smart contracts\n- We have reasonable regression coverage so features are unlikely to break as new features and refactorings are added over time\n- We have a level of confidence in the \"output stability\" of the TEAL code generated from a Beaker smart contract\n\n## Principles\n\n- **Fast development feedback loops** - The feedback loop during normal development should be as fast as possible to improve the development experience of developing Beaker itself\n- **Low overhead** - The overhead of writing and maintaining tests is as low as possible; tests should be quick to read and write\n- **Implementation decoupled** - Tests aren't testing the implementation details of Beaker, but rather the user-facing experience and output of it; this reduces the likelihood of needing to rewrite tests when performing refactoring of the codebase\n\n## Options\n\n### Option 1: TEAL Approval tests\n\nWriting [approval tests](https://approvaltests.com/) of the TEAL output generated from a given Beaker smart contract.\n\n**Pros**\n\n- Ensures TEAL output stability and focussing on asserting the output of Beaker rather than testing whether Algorand Protocol is working\n- Runs in-memory/in-process so will execute in low 10s of milliseconds making it easy to provide high coverage with low developer feedback loop overhead\n- Tests are easy to write - the assertion is a single line of code (no tedious assertions)\n- The tests go from Beaker contract -> TEAL approval so don't bake implementation detail and thus allow full Beaker refactoring with regression confidence without needing to modify the tests\n- Excellent regression coverage characteristics - fast test run and quick to write allows for high coverage and anchoring assertions to TEAL output is a very clear regression marker\n\n**Cons**\n\n- The tests rely on the approver to understand the TEAL opcodes that are emitted and verify they match the intent of the Beaker contract - anecdotally this can be difficult at times even for experienced (Py)TEAL developers\n- Doesn't assert the correctness of the TEAL output, just that it matches the previously manually approved output\n\n### Option 2: Sandbox compile tests\n\nWriting Beaker smart contracts and checking that the TEAL output successfully compiles against algod.\n\n**Pros**\n\n- Ensures that the TEAL output compiles, giving some surety about the intactness of it and focussing on asserting the output of Beaker rather than testing whether Algorand Protocol is working\n- Faster than executing the contract\n- Tests are easy to write - the assertion is a single line of code (no tedious assertions)\n\n**Cons**\n\n- Order of magnitude slower than asserting TEAL output (out of process communication)\n- Doesn't assert the correctness of the TEAL output, just that it compiles\n\n### Option 3: Sandbox execution tests\n\nExecute the smart contracts and assert the output is as expected. This can be done using dry run and/or actual transactions.\n\n**Pros**\n\n- Asserts that the TEAL output _executes_ correctly giving the highest confidence\n- Doesn't require the test writer to understand the TEAL output\n- Tests don't bake implementation detail and do assert on output so give a reasonable degree of refactoring confidence without modifying tests\n\n**Cons**\n\n- Tests are more complex to write\n- Tests take an order of magnitude longer to run than just compilation (two orders of magnitude to run than checking TEAL output)\n- Harder to get high regression coverage since it's slower to write and run the tests making it impractical to get full coverage\n- Doesn't ensure output stability\n- Is testing that the Algorand Protocol itself works (TEAL `x` when executed does `y`) so the testing scope is broader than just Beaker itself\n\n## Preferred option\n\nOption 1 (combined with Option 2 to ensure the approved TEAL actually compiles, potentially only run on CI by default to ensure fast local dev loop) for the bulk of testing to provide a rapid feedback loop for developers as well as ensuring output stability and great regression coverage.\n\n## Selected option\n\nCombination of option 1, 2 and 3:\n\n- While Option 1 + 2 provides high confidence with fast feedback loop, it relies on the approver being able to determine the TEAL output does what they think it does, which isn't always the case\n- Option 3 will be used judiciously to provide that extra level of confidence that the fundamentals of the Beaker output are correct for each main feature; this will involve key scenarios being tested with execution based tests, the goal isn't to get combinatorial coverage, which would be slow and time-consuming, but to give a higher degree of confidence\n- The decision of when to use Option 3 as well as Option 1+2 will be made on a per-feature basis and reviewed via pull request, over time a set of principles may be able to be revised that outline a clear delineation\n- Use of PyTest markers to separate execution so by default the dev feedback loop is still fast, but the full suite is always run against pull requests and merges to main\n"
  },
  {
    "path": "docs/architecture-decisions/2023-01-11_beaker_productionisation_review.md",
    "content": "# Beaker productionisation review\n\n- **Status**: Approved\n- **Owners:** Rob Moore, Adam Chidlow\n- **Deciders**: Anne Kenyon (Algorand Inc.), Alessandro Cappellato (Algorand Foundation), Jason Weathersby (Algorand Foundation), Benjamin Guidarelli (Algorand Foundation), Bob Broderick (Algorand Inc.)\n- **Date created**: 2023-01-11\n- **Date decided:** 2023-02-04\n- **Date updated**: 2023-02-04\n\n## Context\n\nBeaker is a smart contract development framework for [Algorand](https://www.algorand.com/) that provides a wrapper over [PyTeal](https://pyteal.readthedocs.io/en/stable/) that focusses on providing a great developer experience through terse, expressive language constructs and making common tasks easier. Beaker is useful because it creates a higher level programming construct from PyTEAL that is easier to get started when learning and results in code that is terser and easier to read and write.\n\nBeaker is an important part of the [AlgoKit strategy](https://github.com/algorandfoundation/algokit-cli/#algokit-cli). It helps create a more seamless onramp to Algorand development by providing an easier starting point for developers. As part of the lead up to releasing AlgoKit, it was desired to perform a v1.0 release of Beaker and explicitly mark it as being production ready. In order to provide confidence a productionisation review was conducted by [MakerX](https://www.makerx.com.au/); this document summarises the recommendations from that review.\n\nAn architecture decision was made in the lead up to this review on a [testing strategy for Beaker](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/architecture-decisions/2022-11-22_beaker-testing-strategy.md).\n\n## Goal\n\nThe goals of this productionisation review are to:\n\n- Get Beaker ready for production use\n- Gain confidence in Beaker's software architecture and maintainability\n- Reduce the likelihood of need for breaking changes soon after release by getting key recommended breaking changes identified now\n\n## Findings summary\n\nThe Beaker codebase is well factored and had a decent initial test coverage (albeit some of that test coverage is via a series of examples that while they provide high code coverage, don't actually validate all of the functionality).\n\nA series of changes have been landed to improve some of the fundamentals of Beaker in preparation for production launch:\n\n- [Various improvements](https://github.com/algorand-devrel/beaker/pull/142) - Improved test coverage, improved dev experience (setup + ongoing) via Poetry, improvements to the code quality setup (linting, automatic formatting, typing), allowed Windows development on Beaker itself, significantly improved CI/CD pipeline speed, removing the examples directory and tests from being distributed wit hthe PyPi package\n- [Typing improvements](https://github.com/algorand-devrel/beaker/pull/147)\n- [Removed inline imports](https://github.com/algorand-devrel/beaker/pull/148)\n- [Removed dead code](https://github.com/algorand-devrel/beaker/pull/149/files)\n- [Added automated release management and versioning](https://github.com/algorand-devrel/beaker/pull/161)\n\nIn addition, there are a remaining set of more major (breaking) changes that are recommended. The recommendations are split into 2 categories, recommendations for immediate improvement (i.e. included in v1.0) and future suggestions that can be addressed post v1.0 launch.\n\nThe recommended additional areas for immediate improvement are:\n\n- **Replace the class-based structure with an instance based one** - remove some areas of potential surprise for developers and simplify the Beaker codebase by moving to a composable instance-based structure rather than a static class-based structure\n- **Defer PyTEAL compilation** - improve flexibility and future contract output stability by deferring PyTEAL compilation (i.e. Beaker -> TEAL transpilation) to not happen when the Beaker contract is initialised\n- **Renamings** - There are some clear parameters that make sense to rename for various reasons\n- **Key decorator improvements** - Refactor some of the Beaker decorators to fix some bugs and improve user experience\n- **Beaker state refactor** - Refactor of the Beaker state interfaces to improve user extensibility and significantly simplify the Beaker codebase to improve maintainability\n\nThe recommended areas for future improvement are:\n\n- Typed client generation from app spec to improve deploy-time and run-time dev experience\n- `Tmpl` values in app spec so you can have type-safe deployment clients that substitute any template values reliably at contract deploy time\n- Refactor storage types (blob, reserved, etc.) to allow use of in-built Python types and operators (terser, more intuitive)\n- Box storage implementation improved to match local/global behaviour and also automatically delete itself on contract deletion\n- Composable and stackable authorization and `@authorize` as a standalone decorator\n- PyTEAL typings to be improved to support types beyond `Expr` where a more explicit type can be specified (improves typing and extensibility)\n- Support referencing an app/lsig via ID/address (deployed separately, potentially automatically as part reading a Directed Acyclic Graph (DAG) in application.json of application dependencies) or bytes (deployed inline, what was previously called precompile, noting this would be deploy-time substitution, not smart contract run-time substitute like `TemplateVariable`), this also may allow precompile to be deprecated (it's a very complex implementation for what we believe to be an advanced edge case)\n\n## Immediate recommendations\n\n### (1) Replace the class-based structure with an instance based one\n\n#### What?\n\nBeaker is currently structured around users sub-classing the `beaker.Application` class. They then hold state variables (from `beaker.state.*`) as [class variables](https://pynative.com/python-class-variables/) and also contain methods which are forwarded to the `pyteal.abi.Router` instance created during `Application.compile(...)` based on decorators from `beaker.decorators.*`. We propose replacing this with an \"instance based structure\", drawing inspiration from highly popular Python web frameworks such as `flask` ([example](https://flask.palletsprojects.com/en/2.2.x/quickstart/#a-minimal-application)).\n\nThis change will simplify Beaker's code (improving maintainability) and, more importantly, reduce the potential for end-user error and confusion.\n\n#### Why?\n\n**User-facing benefits**\n\n1. The current structure, by encouraging and supporting [bound instance methods](https://www.geeksforgeeks.org/bound-methods-python/) alongside [class variables](https://pynative.com/python-class-variables/), is a potential source of confusion for users new to writing smart contracts or PyTEAL. The distinction between what runs on `beaker.Application` instantiation, evaluation by PyTeal during compile, and finally what runs on-chain, can be difficult to grasp at first. One might assume (wrongly) that Beaker is somehow maintaining the state of `self.*` between methods, but this is not the case. Contrast this with Solidity, for example, where state can be directly manipulated because it's help within the class instance.\n2. Currently, actually using `self.*` can easily lead to problems, since if they are not defined before calling `super().__init__(...)` they won't be defined when compiling. This can be fixed by not automatically compiling in `Application.__init__()` (which is also proposed in (2) below) for simple constants, however another issue is that using `self.foo = <Some beaker.state object>`, would not currently work with the introspection beaker is performing. This could potentially be fixed by itself, but developers will still need to define these values _before_ calling `super().__init__()` which is a source of confusion. Usually, idiomatic Python will call super init sooner rather than later so this is something that can trip up experienced Python developers.\n3. In order to compose applications together, say if there were two ARC standard implementations that we wanted to combine into the same contract, the user doesn't need to understand Python's multiple-inheritance idiosyncrasies like [Method Resolution Order](https://www.geeksforgeeks.org/method-resolution-order-in-python-inheritance/). Additionally, by taking a functional composition approach, we can have easy to understand entry points where you can check any pre-conditions.\n4. Since state variables are currently defined as class variables, this makes them \"globals\", which can lead to errors/bugs that are non-obvious.\n\n   For instance, consider:\n\n   ```python\n   class MyBaseApp(beaker.Application):\n       counter = beaker.ApplicationStateValue(stack_type=pyteal.TealType.uint64)\n\n       @beaker.create\n       def create(self) -> pyteal.Expr:\n           return self.initialize_application_state()\n\n   class MyApp(MyBaseApp):\n       pass\n   MyApp.counter.default = pyteal.Int(10)\n\n   class MyOtherApp(MyBaseApp):\n       pass\n\n   app1 = MyApp()\n   app2 = MyOtherApp()\n   assert app1.approval_program != app2.approval_program  # fails\n   ```\n\n5. Setting parameters that control the program creation is awkward with the current approach of extending `beaker.Application`, currently this impacts just the `version` parameter (which specifies the TEAL version), but there are clear examples we can see for other variables that are useful to define at this point in the future (e.g. a state allocation override if you know that the state a contract will need grows in the future).\n6. There are bugs in beaker which are directly caused by the class-based structure. For example, bare methods are currently evaluated as a subroutine only once:\n\n   ```python\n   class MyApp(beaker.Application):\n       price = beaker.ApplicationStateValue(stack_type=pyteal.TealType.uint64)\n\n       def __init__(self, default_price: int, version: int = pyteal.MAX_TEAL_VERSION):\n           self.price.default = pyteal.Int(default_price)\n           super().__init__(version=version)\n\n\n   class CorrectApp(MyApp):\n       @beaker.create\n       def create(self, *, output: pyteal.abi.Uint64) -> pyteal.Expr:\n           return pyteal.Seq(self.initialize_application_state(), output.set(self.price))\n\n\n   class IncorrectApp(MyApp):\n       @beaker.create\n       def create(self) -> pyteal.Expr:\n           return self.initialize_application_state()\n\n\n   correct_app1 = CorrectApp(default_price=123)\n   correct_app2 = CorrectApp(default_price=456)\n\n   incorrect_app1 = IncorrectApp(default_price=123)\n   incorrect_app2 = IncorrectApp(default_price=456)\n\n   assert correct_app1.approval_program != correct_app2.approval_program  # success\n   assert incorrect_app1.approval_program != incorrect_app2.approval_program  # failure\n\n   ```\n\n**Beaker maintainability benefit**\n\nThe main benefit to Beaker is the removal the complex code that modifies function signatures to remove `self` before passing to PyTEAL. Removing the instance method implementation will significantly reduce the complexity of the code and likelihood of unknown bugs surfacing from that part of the codebase.\n\n#### Before & After (user's perspective)\n\nWhile the proposed changes are fairly substantial internally, and propose a radically different architecture conceptually for beaker Applications, the migration should actually be relatively straight forward for users with existing Beaker code:\n\nThe following examples assume the import of relevant names from `beaker` and/or `pyteal` are present to simplify the code.\n\n**Before:**\n\n```python\n\nclass CounterApp(Application):\n    counter = ApplicationStateValue(\n        stack_type=TealType.uint64,\n        descr=\"A counter for showing how to use application state\",\n    )\n\n    @create\n    def create(self):\n        return self.initialize_application_state()\n\n    @external(authorize=Authorize.only(Global.creator_address()))\n    def increment(self, *, output: abi.Uint64):\n        \"\"\"increment the counter\"\"\"\n        return Seq(\n            self.counter.set(self.counter + Int(1)),\n            output.set(self.counter),\n        )\n\n    @external(authorize=Authorize.only(Global.creator_address()))\n    def decrement(self, *, output: abi.Uint64):\n        \"\"\"decrement the counter\"\"\"\n        return Seq(\n            self.counter.set(self.counter - Int(1)),\n            output.set(self.counter),\n        )\n\n```\n\n**After:**\n\nThe changes are:\n\n- State is moved into a dedicated class `CounterState`\n- `beaker.Application` is directly instantiated (along with the state, and optionally the teal `version`)\n- Class methods are de-indented, `self` is removed and the decorator is prefixed with `app.` (which in turn reduces the number of imports needed from the `beaker` namespace and provides better exploratory intellisense for users)\n\n```python\n\nclass CounterState(beaker.State):\n    counter = ApplicationStateValue(\n        stack_type=TealType.uint64,\n        descr=\"A counter for showing how to use application state\",\n    )\n\n\napp = beaker.Application(state=CounterState())\n\n@app.create\ndef create():\n    return app.state.initialize_application_state()\n\n@app.external(authorize=Authorize.only(Global.creator_address()))\ndef increment(*, output: abi.Uint64):\n    \"\"\"increment the counter\"\"\"\n    return Seq(\n        app.state.counter.set(app.state.counter + Int(1)),\n        output.set(app.state.counter),\n    )\n\n@app.external(authorize=Authorize.only(Global.creator_address()))\ndef decrement(*, output: abi.Uint64):\n    \"\"\"decrement the counter\"\"\"\n    return Seq(\n        app.state.counter.set(app.state.counter - Int(1)),\n        output.set(app.state.counter),\n    )\n\n```\n\n### (2) Defer PyTEAL compilation\n\n#### What?\n\nCurrently, `beaker.Application.compile()` is called as part of `__init__()`, assuming there are no `precompiles` defined. We recommend that `compile()` always be deferred to a later point, and further that `compile()` does not mutate `Application` in any way, but instead returns a new object.\n\n#### Why?\n\nThe deferment of the `compile()` call is actually a necessary part of recommendation #1 that we have skipped over thus far, but would be recommended anyway.\n\nThe immediate `compile()` has issues such as requiring implementors (i.e. subclasses) to call `super().__init__()` as a **final** step in their own `__init__` method - any code that runs after the super init call will have no effect on the application produced!\n\nImmediate compilation also reduces the control the user has over the output. Although currently the only parameter that `compile` takes is a `client`, it might be useful to add (optional) parameters here to control the compilation. For example, if you can pass in the list of optimisations that should be applied, that allows you to have [output stability](../articles/output_stability.md) of your smart contract code if new optimisations are added in the future.\n\nThe separation of compiled state outside of `Application` simplifies the design, and can be done mostly transparently to end-users.\n\nThe separation of compiled state will also benefit future interoperability. It allows for more explicit decoupling of PyTEAL compilation (Beaker / PyTEAL transpilation -> TEAL) and deploment (TEAL -> compiled byte code -> Algorand network). Once `beaker.client` is split into a separate package, if the compiled state can be both generated from a beaker Application object _or_ loaded from disk (or similar), this means Beaker's ApplicationClient could be used in more situations, such as for a (say) tealish smart contract, or a C# smart contract, or a raw PyTEAL or TEAL smart contract, etc. This conforms better to the modularity principle in AlgoKit and also vice versa allows for a Beaker smart contract to be deployed by a TypeScript deployer, or C# deployer, etc.\n\n#### Before & After - user's perspective\n\nFor most use cases, this should be a relatively small and probably imperceptible change.\n\nWe believe there are two common usage scenarios that use the output of PyTEAL compilation currently:\n\n1. Output the `Application` via `Application.dump(...)`\n2. Interact with the `Application` by passing it to `ApplicationClient(app=..., ...)`.\n\nWe propose maintaining those two scenarios without any immediate external changes, but internally:\n\n1. `Application.dump(...)` will call `Application.compile().dump()`, and potentially trigger a `DeprecationWarning` if we decide that we want users to always explicitly call compile.\n2. `ApplicationClient(app=..., ...)` will call `Application.compile()` and not retain any reference to `app`.\n\nTo make use of scenarios 1 _and_ 2, or to control compilation parameters, a user should also be able to (for instance):\n\n```python\napp = Application(...)\ncompiled_app: CompiledApplication = app.compile(...)\ncompiled_app.dump(...)\nclient = ApplicationClient(app=compiled_app, ...)\n```\n\nWe suggest also potentially renaming `CompiledApplication.dump()`, perhaps to something along the lines of `serialize()`.\n\nThe `compile()` call is actually a transpilation call (Beaker / PyTEAL transpilation -> TEAL), although it's called compile in PyTEAL so consideration should be made to either keep consistency with PyTEAL or use the more accurate `transpile()` (which also reduces confusion around the fact that you then have to call `compile` on algod to compile the TEAL to byte code before deployment).\n\nThe exact details of what `CompiledApplication` will look like are TBD, but should be driven by the principles outlined in the \"Why?\" section above. Broadly, it stands to reason it would contain the approval and clear TEAL, the ABI spec and the app spec though at least.\n\nFinally, there is likely need to use metadata from transpilation such as the mapping of source code to line numbers, but we are confident these use cases will be able to be implemented on top of the proposed change.\n\n### (3) Renamings\n\nRenaming `version` parameter in `Application.__init__(version: int = pyteal.MAX_VERSION)` to (e.g.) `avm_version`, to be more explicit. Otherwise developers may be confused that it's the version of the specific smart contract. It may be desirable to allow `version` to continue to be specified for some time, but to raise a `DeprecationWarning`.\n\nRename methods in `beaker.lib.*` to start with an uppercase. Although going against PEP-8, this prevents collisions with `builtins` such as `min` and `max`, and also follows the useful convention from PyTeal where methods that produce TEAL code (vs just running Python code at transpilation time) start with uppercase such as `Add`, `Or`, `Concat`, etc.\n\n### (4) Key decorator improvements\n\nRefactor some of the Beaker decorators to fix some bugs and improve user experience.\n\nEnd state:\n\n```python\n# for user convenience, rather than having to import + use MethodConfig\nOnCompleteActionName: TypeAlias = Literal[\n    \"no_op\",\n    \"opt_in\",\n    \"close_out\",\n    \"clear_state\",\n    \"update_application\",\n    \"delete_application\",\n]\n\nHandlerFunc: TypeAlias = Callable[..., Expr]\nDecoratorFunc: TypeAlias = Callable[[HandlerFunc], HandlerFunc]\n\nclass Application:\n    # the main decorator, capable of handling both ABI and Bare method registration\n    def external(\n        self,\n        fn: HandlerFunc | None = None,\n        /,\n        *,\n        # note: retain existing behaviour of if method_config is None, default to no_op with CallConfig.CALL\n        method_config: MethodConfig | dict[OnCompleteActionName, CallConfig] | None = None,\n        name: str | None = None,\n        authorize: SubroutineFnWrapper | None = None,\n        bare: bool = False,\n        read_only: bool = False,\n        override: bool | None = False,\n    ) -> HandlerFunc | DecoratorFunc:\n        ...\n\n    # the below are just \"shortcuts\" to @external for simple/common use cases\n    def create(\n        self,\n        fn: HandlerFunc | None = None,\n        /,\n        *,\n        allow_call: bool = False,\n        name: str | None = None,\n        authorize: SubroutineFnWrapper | None = None,\n        bare: bool = False,\n        read_only: bool = False,\n        override: bool | None = False,\n    ) -> HandlerFunc | DecoratorFunc:\n        ...\n\n\n    def <delete|update|opt_in|clear_state|close_out|no_op>(\n        self,\n        fn: HandlerFunc | None = None,\n        /,\n        *,\n        allow_call: bool = True,\n        allow_create: bool = False,\n        name: str | None = None,\n        authorize: SubroutineFnWrapper | None = None,\n        bare: bool = False,\n        read_only: bool = False,\n        override: bool | None = False,\n    ) -> HandlerFunc | DecoratorFunc:\n        ...\n```\n\nFor reference, the current state:\n\n```python\ndef internal(\n    return_type_or_handler: TealType | HandlerFunc,\n) -> HandlerFunc | DecoratorFunc:\n    ...\n\ndef external(\n    func: HandlerFunc | None = None,\n    /,\n    *,\n    name: str | None = None,\n    authorize: SubroutineFnWrapper | None = None,\n    method_config: MethodConfig | None = None,\n    read_only: bool = False,\n) -> HandlerFunc | DecoratorFunc:\n    ...\n\ndef bare_external(\n    no_op: CallConfig | None = None,\n    opt_in: CallConfig | None = None,\n    clear_state: CallConfig | None = None,\n    delete_application: CallConfig | None = None,\n    update_application: CallConfig | None = None,\n    close_out: CallConfig | None = None,\n) -> Callable[..., HandlerFunc]:\n    ...\n\n\ndef create(\n    fn: HandlerFunc | None = None,\n    /,\n    *,\n    authorize: SubroutineFnWrapper | None = None,\n    method_config: Optional[MethodConfig] | None = None,\n) -> HandlerFunc | DecoratorFunc:\n    ...\n\n\ndef <delete|update|opt_in|clear_state|close_out|no_op>(\n    fn: HandlerFunc | None = None, /, *, authorize: SubroutineFnWrapper | None = None\n) -> HandlerFunc | DecoratorFunc:\n    ...\n```\n\nChanges:\n\n- Remove `@internal`:\n  - if you don't pass a TealType parameter to it, i.e. intend to create an ABI internal routine, it actually just inlines the code currently due to a bug\n  - when passing in a TealType parameter to it, i.e. intent to create a normal subroutine, then in combination with (1) it will be unneeded since you can use `Subroutine` from PyTEAL (since the methods don't need to be artificially modified to remove `self` anymore)\n- Add `bare: bool` option:\n  - Currently, this is not able to be controlled by the user - for `<create|delete|update|opt_in|clear_state|close_out|no_op>` decorators, they will create a bare method if the function takes no parameters other than maybe a `self` parameter. This has some down-sides:\n    1. The user might want an ABI method rather than a bare method. In this case, currently they could use `@external(method_config=...)`, but for simple cases this is not as easy to read/type and is not intuitive to discover in the first place.\n    2. The user might have more than one method that takes no parameters that is able to be called with a given `OnCompletionAction`, currently this would produce a `BareOverwriteError` in Beaker. Again, the work-around exists of calling `@external` instead, but it would be nicer and more intuitive to add a `bare` option to control this explicitly.\n  - The above Python methods have `bare: bool = False`. An alternative option would be to make this `bare: bool | None = None`, where `None` would retain the current behaviour of inspecting the method signature to see if it takes parameters or not.\n- Remove `@bare_external`:\n  - Mostly unused, and doesn't provide the same options as the other decorators (e.g. `authorize`)\n  - Instead, we can replace the case of a single option being passed to it, with the equivalent named method: for example `@bare_external(opt_in=CallConfig.CALL)` becomes `@opt_in(bare=True)`\n  - For the multi-argument case: `@bare_external(no_op=CallConfig.CREATE, opt_in=CallConfig.CALL)` becomes `@external(method_config={\"no_op\": CallConfig.CREATE, \"opt_in\": CallConfig.CALL}, bare=True)`\n\n* Add optional `name` option to all decorators, not just `@external`.\n* Add `allow_call` and `allow_create` options to shortcut methods (except `@create` shortcut which should always allow `CallConfig.CREATE`).\n* Remove `method_config` from `@create` shortcut - the default behaviour will remain unchanged, but any usages with `method_config` specified would be equivalent to just using `@external` directly.\n* Add `override: bool | None = False` parameter.\n  - If `False` (the suggested default), an error will be raised if an ABI or Bare method would replace one already registered in the Application. For bare methods, this would be keyed on the `OnCompleteAction`, and for ABI methods should be based on the method signature (ie `ABIReturnSubroutine.method_signature()`). This is suggested as the default to prevent unexpected cases of overriding, especially when using blueprints/templates from the future Smart Contracts Library.\n  - If `True`, then an error will be raised if it _does not_ replace an already registered ABI or Bare method. This is similar to Java's `@Override` annotation, and can allow the user to be explicit and thus prevent unexpectedly _not_ replacing an existing method.\n  - If `None`, then methods will be overwritten if present, and no error will be raised if not already present. This option is here for maximum flexibility, but should perhaps be discouraged.\n\n### (5) Beaker state refactor\n\nRefactor of the `beaker.state` internal interfaces to simplify Beaker code base, make it easier to add new state wrappers, and to pave the way for future enhancements. This will have a side effect of allowing users to create their own state wrappers without having to modify `beaker` itself, although we recommend marking these interfaces as internal and subject to change - at least initially.\n"
  },
  {
    "path": "docs/architecture-decisions/2023-01-11_brew_install.md",
    "content": "# HomeBrew install strategy\n\n- **Status**: Approved\n- **Owner:** Daniel McGregor\n- **Deciders**: Daniel McGregor, Rob Moore, John Woods, Alessandro Cappellato\n- **Date created**: 2023-01-11\n- **Date decided:** 2023-01-11\n\n## Context\n\nHomeBrew (brew) is a MacOS package manager, and is commonly used on MacOS systems to install applications and tools like AlgoKit. Brew offers two main installation methods.\n\n * Formula - A source based install, this is typically recommended for open source and command line based applications. Formula can also be \"bottled\" to provide pre built packages for quick installs on supported MacOS platforms.\n * Cask - A binary based install, this is typically recommended for closed source or GUI based applications.\n\nAdditionally there are also two options for how the brew install scripts could be distributed.\n\n* Homebrew repository - This is the official repository for homebrew installs, and provides bottle support for all moderns MacOS platforms (MacOS 11+, Intel and ARM)\n* Algorand hosted repository - A homebrew repository managed by Algorand Foundation.\n\nCreating a HomeBrew Formula initially seemed like the best option for AlgoKit as it meets the [criteria](https://docs.brew.sh/Acceptable-Formulae) for a Formula. However there is a much higher maintenance cost with this approach as everything is built from source. We encountered an issue where one of our newly added python dependencies (pyclip) did not build from source [correctly](https://github.com/algorandfoundation/homebrew-tap/actions/runs/3884956190/jobs/6628201057#step:8:2871). \n\nThe alternative install method of a cask was then considered, and while AlgoKit does not meet the [criteria](https://docs.brew.sh/Acceptable-Casks) for a cask, it does remove the need for a source build on each MacOS platform and the additional maintenance overhead of the Formula approach.\n\n## Requirements\n\n- **Low maintenance**: The ongoing maintenance for supporting brew installs of AlgoKit should be low and not require manual effort for each release.\n- **Fast and easy install experience**: The install experience for end users of AlgoKit should be easy and not require multiple complicated steps, additionally it should install in seconds, not minutes.\n\n## Options\n\n### Option 1: Formula on Official Homebrew Repo\n\nThis would be the preferred option except for the two notable issues. Firstly there is a high risk of ongoing maintenance overhead due to the need to support source building all the dependencies. Ideally this would not be an issue, but we have already hit a problem with a dependency (pyclip) early on in AlgoKit's development. Secondly inclusion into the official repo is subject to Homebrew's criteria, which AlgoKit won't reach until it is more mature.\n\n**Pros**\n* Most discoverable option for end users `brew install algokit`.\n* Homebrew supports automatically bottling on all modern MacOS platforms (MacOS 11+ both Intel and ARM variants) meaning fast installs for users.\n\n**Cons**\n* Inclusion is subject to Homebrew's approval process, which algokit won't meet for now at least.\n* Higher maintenance cost given the source build is more fragile and is likely to break and require investigation, plus build and install approach differs significantly from Chocolatey and pipx\n* Longer build time on release\n* Not possible to fully automate release, it relies on a Brew maintainer approving the pull request, so there's extra operational overhead to keep track of the release pull requests\n\n### Option 2: Formula on Algorand Repo\n\nThis option is similar to Option 1, but allows Algorand to self publish the installer without meeting Homebrews formula criteria. However one issue is that Platform support is more limited, GitHub provides action runners for intel variants for MacOS 11 + 12, but [MacOS 13](https://github.com/github/roadmap/issues/620) and [ARM](https://github.com/github/roadmap/issues/528) support are not yet available. Additional platforms could be supported by using a combination of self-hosted runners and/or third party solutions. This means pre-built bottles aren't easy to build for ARM or MacOS 13 and installation on those environments will take 5+ minutes.\n\n**Pros**\n* Algorand Foundation has control over this process and it can be fully automated\n* It's what we have already implemented and working today\n* Easier to move to the official Brew core repository once AlgoKit is stable and demonstrably popular (thus meeting the constraints Brew place)\n\n**Cons**\n* Supporting all modern MacOS platforms may require use of a 3rd party service and more effort, in the meantime the installation experience on ARM and MacOS 13 is slow (5+ min install)\n* Less discoverable install for end users `brew install algorandfoundation/algokit` (relies on them following documentation)\n* Higher maintenance cost given the source build is more fragile and is likely to break and require investigation, plus build and install approach differs significantly from Chocolatey and pipx\n* Longer build time on release\n\n### Option 3: Cask on Algorand Repo\n\nThis option uses a cask which does not have the maintenance overhead of a formula, and can be hosted in an Algorand Foundation repo to get around the fact AlgoKit does not meet the normal cask criteria.\n\n**Pros**\n* Algorand Foundation has control over this process and it can be fully automated\n* Lower maintenance cost as we do not need to support source builds of dependencies and it's consistent with how algokit cli is installed via Chocolatey and pipx\n* Fast install for all MacOS platforms\n* Fast build time on release\n\n**Cons**\n* Less discoverable install for end users `brew install algorandfoundation/algokit`\n* AlgoKit does not meet the stated criteria for a cask and as such it would unlikely to be approved as a cask in the official Homebrew Repo if that was a desired future state\n* More effort to implement a new way of installing via brew\n\n### Option 4: Cask on Official Homebrew Repo\n\nThis is not a viable option as AlgoKit does not meet the criteria for an official cask.\n\n## Preferred option\n\nOption 1 because it would be the best end user experience.\n\n## Selected option\n\nOption 3 because Option 1 isn't possible right now and it's also a higher overhead to maintain. The install experience for end users is similar with option 3 (just with a bit more typing).\n"
  },
  {
    "path": "docs/architecture-decisions/2023-01-12_smart-contract-deployment.md",
    "content": "# Smart Contract Deployment\n\n- **Status**: Approved\n- **Owner:** Rob Moore\n- **Deciders**: Anne Kenyon (Algorand Inc.), Alessandro Cappellato (Algorand Foundation), Fabrice Benhamouda (Algorand Foundation)\n- **Date created**: 2023-01-12\n- **Date decided:** 2023-02-04\n- **Date updated**: 2023-02-04\n\n## Context\n\nAlgoKit will provide an end-to-end development and deployment experience that includes support for the end-to-end smart contract development lifecycle:\n\n1. Development\n   1. **Write** smart contracts\n   2. **Transpile** smart contracts with development-time parameters to TEAL Templates\n   3. **Verify** the TEAL Templates maintain [output stability](../articles/output_stability.md) and any other static code quality checks\n2. Deployment\n   1. **Substitute** deploy-time parameters into TEAL Templates to create final TEAL code\n   2. **Compile** the TEAL to create byte code using algod\n   3. **Deploy** the byte code to one or more Algorand networks (e.g. LocalNet, TestNet, MainNet) to create Deployed Application(s)\n3. Runtime\n   1. **Validate** the deployed app via automated testing of the smart contracts to provide confidence in their correctness\n   2. **Call** deployed smart contract with runtime parameters to utilise it\n\n![Smart Contract Development Lifecycle](./lifecycle.jpg)\n\nThe default Development experience that AlgoKit exposes will be via Beaker, however AlgoKit is modular and extensible so other tooling can also be used.\n\nThis decision record covers the different options and high level design for how AlgoKit aims to cover Deployment and Runtime.\n\n## Requirements\n\n- We support the different activities defined above under Deployment and Runtime: Substitute, Compile, Deploy, Validate and Call\n- We support the ability to provide dev-time (e.g. static values that are passed into instances of a contract that get output), deploy-time (e.g. network specific addresses or IDs, etc.) and run-time (e.g. call arguments) values to smart contracts\n- We support deploying smart contracts that have been output by any means (Beaker or otherwise) that creates TEAL templates (logic signature or approval & clear) and (for an app) an [ABI](https://github.com/algorandfoundation/ARCs/blob/main/ARCs/arc-0004.md) and an [app spec](https://github.com/algorandfoundation/ARCs/pull/150)\n- We support calling smart contracts with multiple languages / programming ecosystems (with AlgoKit providing Python and TypeScript implementations)\n- We support generating type-safe smart contract clients based on the smart contract definition\n- We support deploying smart contracts to AlgoKit LocalNet, TestNet and Mainnet\n- We support deploying manually and via continuous deployment pipeline\n\n## Principles\n\n- [AlgoKit Guiding Principles](../../docs/algokit.md#Guiding-Principles) - specifically:\n  - **Cohesive developer tool suite**\n  - **Seamless onramp**\n  - **Secure by default**\n  - **Modular components**\n- **Continuous Delivery** - support the ability for software developers to adopt a [Continuous Delivery](https://continuousdelivery.com/) approach to reduce risk, namely by supporting:\n  - [Deployment pipelines](https://continuousdelivery.com/implementing/patterns/#the-deployment-pipeline) that build once and deploy to similar environments (that bit is nicely facilitated by the blockchain!) consistently\n  - [Automated testing](https://continuousdelivery.com/implementing/architecture/)\n- **Facilitate correctness** - smart contract development is higher risk than many other types of development, standard practice involves deploying an immutable contract that must be right from the beginning; AlgoKit should help developers fall into the pit of success and produce higher quality output that is more likely to be correct while having flexibility to opt-in to other behaviours as needed\n\n## Decisions and design\n\nThe following design decisions need to be considered, and are discussed below:\n\n- TEAL Templates and deploy-time parameter substitution\n- Generated / Type-safe clients\n- Deployment and development decoupling\n- Upgradeable and deletable contracts\n- Mnemonic storage and retrieval\n- Contract identification\n- Automated vs manual deployments\n- Output stability testing\n- Validation testing\n\n### TEAL Templates and deploy-time parameter substitution\n\nThe above diagram includes a TEAL Templates step separate from the final TEAL that gets deployed. A fair question may be to ask if this extra step is really needed?\n\nThere are two key considerations to help answer this question:\n\n1. Should development and deployment be decoupled from each other (i.e. happen at a separate time)?\n   - If we couple development and deployment together then it necessitates that at deploy time you have the same programming environment running that's needed for the smart contract development. So, if you (for instance) were building a smart contract in Python using PyTEAL or Beaker, but deploying the smart contract using TypeScript that means you need a deployment environment that supports both Node.js _and_ Python. This makes it harder to follow the Modular components principle.\n   - If development and deployment are coupled together it rules out using Continuous Delivery since it forces you to build the deployment artifact at the same time as you are deploying it. This means you miss out on the confidence and risk benefit of knowing that when you are deploying to (say) MainNet you are deploying the same artifact that was successfully deployed and tested on (say) TestNet and AlgoKit LocalNet (let alone passes any other checks you decide to run as part of Continuous Integration like automated tests, static code analysis, etc.).\n   - If development and deployment are coupled together it means we aren't perform an [output stability](../articles/output_stability.md) test so we don't get notified if we make a change that results in a different smart contract (which may then affect things like hashes for smart contract auditing review comparison, unintended introduction of security vulnerabilities, etc.).\n   - Based on all of this, decoupling development and deployment is a very helpful thing for a smart contract and aligns with all of the above-stated principles more closely.\n2. Do we need to provide deploy-time parameters?\n   - When deploying a smart contract to a network (say MainNet), there are likely to be certain parameters that will be different from deploying to a different network (say TestNet), e.g.:\n     - If you are calling another smart contract, say an Oracle, then the application ID will change between networks.\n     - If you have standard prices you may decide to make them smaller on the TestNet contract given it's much harder to get access to a reasonable number of ALGOs on TestNet (without knowing the right people, or painfully clicking repeatedly on a CAPTCHA on one of the dispensers repeatedly to get 10 ALGOs at a time).\n     - If you are providing admin permissions for a statically defined account (hardcoded for security reasons) then it's likely you would use a different account address for MainNet vs TestNet so you don't expose a production mnemonic in test infrastructure.\n     - etc.\n   - Based on all of this, being able to provide deploy-time parameters is an important feature.\n\nBecause it makes sense to decouple development and deployment, but also important to be able to provide deploy-time parameters, that means it's necessary to support deploy-time parameter substitution and thus: TEAL that is output from the development stage should be considered a template that may have deploy-time substitutions performed on it.\n\nThankfully, this is supported as a first-class concept in PyTEAL via the [`Tmpl` feature](https://pyteal.readthedocs.io/en/stable/api.html?highlight=TMPL#pyteal.Tmpl) and could be similarly mimicked in any other TEAL transpilation language.\n\n### Generated / Type-safe clients\n\nSmart contract development results in an on-chain program that can be invoked from a \"client\". The development of the client itself has two broad options:\n\n1. Hand-code the client for a given smart contract using basic primitives (such as being able to issue a smart contract call of a certain type with an array of arguments)\n2. Generate the client based on the smart contract definition and then call methods on the client that correspond to methods in the smart contract\n\nThe second option, while more complex, results in an easier, faster, and safer developer experience:\n\n- You don't need to understand as much about the underlying blockchain calls since they will be constructed for you so the frontends (e.g. dApps) don't have to be constructed by smart contract / web3 experts\n- You can have type-safe / intellisensed client SDKs, in multiple programming languages with no extra effort beyond writing the smart contract - making the developer experience much easier and meeting devs where they are\n- Using a typed client means that smart contract calls (against the same version of the smart contract the client was generated from) will always be correct and should succeed so the client code is more likely to be correct and can be statically checked for correctness\n\nBecause of this, the desired experience for AlgoKit is to encourage and directly support a generated / type-safe client experience. The intention is to drive this from a combination of [ARC-0004](https://github.com/algorandfoundation/ARCs/blob/main/ARCs/arc-0004.md) and [ARC-0032](https://github.com/algorandfoundation/ARCs/pull/150).\n\nTo illustrate what the end result looks like consider the following Beaker smart contract:\n\n```python\nfrom beaker.application import Application\nfrom beaker.decorators import Authorize, delete, external\nfrom pyteal import Approve, Bytes, Concat, Expr, Global\nfrom pyteal.ast import abi\n\n\nclass HelloWorld(Application):\n    @external(read_only=True)\n    def hello(self, name: abi.String, *, output: abi.String) -> Expr:\n        return output.set(Concat(Bytes(\"Hello, \"), name.get()))\n\n    @delete(authorize=Authorize.only(Global.creator_address()))\n    def delete(self) -> Expr:\n        return Approve()\n```\n\nLet's say you wanted to deploy and interact with that smart contract using TypeScript; if you didn't have a client generated from that code then you would need to construct the method call:\n\n```typescript\n// Assume `appId`, `algod` and `senderAccount` are already in scope\nconst composer = new AtomicTransactionComposer();\ncomposer.addMethodCall({\n  appID: appId,\n  method: new ABIMethod({\n    name: \"hello\",\n    args: [{ name: \"name\", type: \"string\" }],\n    returns: { type: \"string\" },\n  }), // Not type-safe, no intellisense\n  sender: senderAccount.addr,\n  signer: makeBasicAccountTransactionSigner(senderAccount),\n  suggestedParams: await algod.getTransactionParams().do(),\n  methodArgs: [\"World!\"], // Not type-safe, no intellisense\n});\nconst result = await composer.execute(algod, 5);\nconsole.log(result.methodResults[0].returnValue); // Hello, World!\n```\n\nIf instead you generated a client you could having something like this, which gives you intellisense and is type-safe:\n\n```typescript\n// Assume `appId`, `algod` and `senderAccount` are already in scope\n// HelloWorldAppClient is generated from the smart contract definition (ABI json and app spec json)\nconst app = new HelloWorldAppClient(appId, algod, senderAccount);\nconst result = app.hello({ name: \"World!\" }); // Type-safe and intellisense\nconsole.log(result); // Hello, World!\n```\n\nTo be fair, you could have a middle-ground and load the ABI json to populate the `method` parameter of the `addMethodCall` call, but the `methodArgs` are still problematic and there is still no intellisense.\n\nThe suggested implementation for AlgoKit v1 is to provide a basic type-safe TypeScript client (leveraging either the MakerX TypeScript generator or [beaker-ts](https://github.com/algorand-devrel/beaker-ts)) and leave Python with the semi-typed implementation that Beaker currently exposes (with implementing a fully typed Python client as a future implementation effort).\n\n### Deployment and development decoupling\n\nAs discussed above, decoupling deployment and development of smart contracts is a useful technique.\n\nOne of the advantages is it allows you to use separate programming languages for the writing of a smart contract and the deployment and automated testing of it. Separate, but related, it also makes it easier to generate type-safe clients (per the previous point) because there is an intermediate output from the development stage that can then be used to generate a client, in a separate (or the same) programming language, before using that client to then deploy and interact with the smart contract (for an end user experience, a programmatic interaction or an automated test).\n\nThis helps us follow the \"Meet devs where they are\" principle and provide optionality for developers to select the programming environment they are most comfortable with for writing clients and automated tests.\n\n### Upgradeable and deletable contracts\n\nSmart contracts are a powerful capability that can be used for anything from locking billions of dollars of value to implementing the mechanics of a game's state storage. This means there are different risk profiles, rigour and functionality evolution characteristics in different circumstances.\n\nThese different risk profiles have an impact on the functionality that is exposed within a smart contract. Two key examples of this are:\n\n- **Upgradeable smart contracts** - Whether or not a smart contract can be updated inline (and keep existing state and app ID / address characteristics) or they are immutable\n- **Deletable smart contracts** - Whether or not a smart contract can be deleted or is permanent\n\nImmutability and permanence are useful architectural properties in certain circumstances (similarly mutability and impermanence in others). For example:\n\n- If you have a smart contract that locks billions of dollars of value, then allowing that smart contract to be upgradeable allows for the manager of the smart contract to make a change that lets them steal the value.\n- If you have a smart contract that provides an Oracle service (say for betting odds), then allowing that smart contract to be deletable could break many other applications that are hard-coded to call that Oracle contract's app ID.\n- If you have a smart contract that runs the mechanics of a game engine the players of said game may have a reasonable expectation that the game engine is evolved and enhanced over time, but they don't want to lose their state (so upgrading the smart contract is useful).\n- If you have a smart contract that handles a one-off trade of value across a cross-chain bridge then it makes sense to delete it (only once trade has been concluded) to remove noise, potential confusion and operational overhead for the operators of said bridge.\n\nAll 4 scenarios above provide different situations where immutability and permanence are desired or undesired. If you choose incorrectly for your circumstance, particularly for high-risk scenarios, the consequences could be major.\n\nIf a contract is immutable, it does limit the ability to evolve the functionality over time based on user feedback and also discourages use of best practice software delivery techniques that encourage evolution and rapid deployment like Continuous Delivery.\n\nAnother consideration in favour of immutability is smart contract auditing. If a smart contract is audited for security, but then the smart contract is upgradeable or is changed between when the audit occurred and when the contract was deployed to MainNet then the smart contract audit is somewhat invalidated and certainly any hashes that are provided of the smart contract code in an audit report will no longer match.\n\nThere are techniques that can be used to allow for immutable smart contracts, but let them be evolved somewhat:\n\n- You could release a new version of a smart contract and include an ability for that smart contract to communicate to/from the existing smart contract to migrate state.\n- You could ensure that clients can dynamically find the latest smart contract so the application ID / address doesn't need to be hardcoded and the smart contract remains addressable.\n  - This could be done via some sort of on-chain or off-chain lookup, and/or by encoding information into the creation transaction note of the smart contract app.\n- You could limit MainNet releases to major upgrades that happen infrequently and let users opt-in to whether or not they use the new version or not, and having a set of calls the user can sign to migrate their state/value from one contract to the next.\n\nLastly, it's worth noting that having fast development feedback loops during development and testing of smart contracts is likely a very useful feature to improve the development experience and speed. For this reason, allowing contracts to be upgraded or at the very least deleted (and then recreated) is likely very useful, but potentially when deployed to MainNet a switch could be made to disallow upgrading / deleting (as relevant).\n\nThe goal of AlgoKit is to create a development experience that is productive and easy, but also one that is secure by default and helps developers fall into the pit of success. For that reason, and given the consequences of getting this wrong the suggested approach AlgoKit takes is:\n\n- All provided smart contract templates are by default immutable\n- An immutability automated test is included by default to ensure that smart contracts can't be upgraded by the contract creator (this would have to be deleted by a developer, who is then opting in to the consequences of that)\n- All provided smart contract templates are by default permanent when deployed to MainNet, but deletable elsewhere to facilitate an iterative development experience\n- Client code will include mechanisms to dynamically find deployed applications in LocalNet and TestNet environments to support delete/recreate flows and improve the developer experience\n- MainNet deployments will immediately (i.e. before any usage occurs) check that smart contracts are not upgradeable by the creator account by default (with an explicit opt-out option available for smart contracts that are meant to be upgradeable, which in turn will issue a warning to the developer to explain the implications)\n- MainNet deployments will immediately (i.e. before any usage occurs) check that smart contracts are not deletable by the creator account by default (with an explicit opt-out option available for smart contracts that are meant to be deletable, which in turn will issue a warning to the developer to explain the implications)\n\n### Mnemonic storage and retrieval\n\nWhen deploying and interacting with a smart contract, you need to have access to the private key of an account. This is a secret and must be handled with care, as exposing a private key can be disastrous, and while [rekeying](https://developer.algorand.org/docs/get-details/accounts/rekey/) is possible if it's not done fast enough you can still lose assets, be victim to malicious calls and experience a painful user experience going forward (wallet support for rekeyed accounts is limited).\n\nAnother consideration is the network being deployed to / called. If you are interacting with the LocalNet network then mnemonics are all, but meaningless since you can simply reset the LocalNet and regenerate new accounts on the fly (and fund them with essentially unlimited ALGOs). If you are interacting with TestNet then mnemonics may hold TestNet ALGOs, which while difficult to get in large numbers, are more an inconvenience than a serious commercial problem to lose.\n\nFinally, when interacting with LocalNet to create a smooth developer experience it's ideal to automatically generate and fund any accounts that are being used so the developer doesn't have to manually do this every time the LocalNet is reset. Even better, it's ideal if this can be done in a way that idempotently gets a consistently private key for a given \"named account\" so that subsequent calls use the same account (mimicking what happens in TestNet or MainNet when using a particular private key for a given \"named account\").\n\nGiven all of this, the suggested approach that AlgoKit takes is:\n\n- LocalNet accounts are by default automatically and idempotently generated against a named account by using a named wallet via [Kmd](https://developer.algorand.org/docs/clis/kmd/) and are automatically funded using the LocalNet faucet account (the private key for which is automatically retrieved using Kmd).\n- Where they are needed mnemonics will be provided using environment variables to follow [twelve factor app conventions](https://12factor.net/config), this is an industry standard approach to handling secrets and is easy to support cross-platform and cross-programming language as well as using encrypted secrets on CI/CD pipelines.\n- An option will be provided for deployments that allows for deployments using ephemeral accounts that then get rekeyed to a separate, known [break-glass](https://www.beyondtrust.com/blog/entry/provide-security-privileged-accounts-with-break-glass-process) account (the private key of which is not available to the deploying process) will be provided to allow for developers to deploy using a break-glass setup.\n- A `DISPENSER_MNEMONIC` environment variable will be expected when deploying to non-LocalNet environments, and will be encouraged to be a separate account just used for that purpose to limit [blast radius](https://www.lepide.com/blog/what-is-a-blast-radius-in-data-security-terms/), so that funds needed for deployments or calls can be automatically provided by convention, including for ephemeral accounts.\n- The Algokit CLI will allow for mnemonics to be provided to it for a given project, which will get stored in a encrypted form within the .gitignore'd `.env` file with a project-specific random encryption key stored on that machine. This prevents cursory exploitation and accidental exposure through screen-sharing, but won't protect users with an exploited machine from having them exposed. For this reason, developers will be discouraged from storing MainNet mnemonics in that way and will need to acknowledge that risk.\n- AlgoKit will provide example CI/CD templates that illustrate how to construct a deployment pipeline that includes MainNet deployments using secret storage for mnemonics so developers won't need to handle MainNet mnemonics on their local machine.\n\n### Contract identification\n\nBeing able to identify an existing deployed instance of a given smart contract is very useful:\n\n- It avoids the need to hardcode application IDs\n- It makes things easier to automate, including automated deployments and testing of smart contracts and the apps that call them\n- It allows the deployer of a smart contract to detect if that smart contract is already deployed and if so handle it appropriately (e.g. do nothing vs upgrade vs delete and create vs leave alone and create) depending on whether that smart contract is immutable and/or permanent and the network being deployed to (e.g. LocalNet vs TestNet vs MainNet)\n\nAs soon as a contract is not immutable, or is immutable and not permanent then the application ID of the smart contract for a given network will change over time. And, if a smart contract is immutable and permanent then net new versions may still be deployed, or at the very least the contract will change across networks (e.g. LocalNet vs TestNet vs MainNet). Because of this it's important to support dynamic resolution of application IDs.\n\nIt's important to consider whether the smart contract needs to be resolved on-chain or off-chain.\n\nResolving on-chain is harder to achieve dynamically, but there are some patterns that can be used, e.g.:\n\n- Storing the application ID in (e.g. Global) state and providing a creator-only ABI method that can be called as part of deployment of the dependant contract to update the stored application ID.\n- [Lookup/registry contract](https://research.csiro.au/blockchainpatterns/general-patterns/contract-structural-patterns/contract-registry/) that returns the ID of a named contract and that lookup contract allows said ID to be updated.\n- [Proxy contract](https://blog.openzeppelin.com/proxy-patterns/) that mirrors the interface of the parent contract but delegates calls to an underlying contract whose ID can be updated.\n- [Other patterns](https://ethereum.org/en/developers/docs/smart-contracts/upgrading/#what-is-a-smart-contract-upgrade).\n\nResolving off-chain can be done through a variety of ways, e.g.:\n\n- Create a contract discovery service API that allows new contracts to be registered by API call after being deployed.\n- If the contract creator account is always known (i.e. ephemeral creator accounts aren't being used) then it's possible to identify a contract by name by encoding a payload into the application creation transaction note and then using the indexer to find the application creation transactions for a given account and working backwards to find the relevant application ID.\n- If the contract creator account is always known (i.e. ephemeral creator accounts aren't being used) then it's possible to identify a contract by name by encoding the name in to one of the [application params](https://developer.algorand.org/docs/rest-apis/algod/v2/#applicationparams) (e.g. approval program or using a global state variable) and looking up the `created-apps` property when [retrieving an account via algod](https://developer.algorand.org/docs/rest-apis/algod/v2/#account).\n- Create a contract discovery service API that scans the blockchain transactions ([example](https://developer.algorand.org/articles/developer-preview-of-conduit-a-new-way-to-access-algorand-chain-data/)) to automatically maintain a record of smart contract IDs.\n- Use CI/CD variables ([example](https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter)) to propagate the deployed app ID to the configuration on the deployment of the client (assuming the same pipeline is used for deployment of smart contract and client(s)).\n- Manually set the application ID via the CI/CD pipeline of the clients with environment-specific values (that are passed in via environment variables) and change them to reflect any changes to the smart contracts (note: this isn't sustainable for frequently changing contracts, but is a simple method for infrequently changing contracts).\n\nIn order to provide the simplest implementation possible to handle this complexity, AlgoKit v1 will:\n\n- Add payloads to application creation transactions to allow them to be resolved by name for a known creator account using the indexer.\n- Allow application IDs to be resolved by name using environment variables for situations where a developer is using ephemeral creator accounts.\n- On-chain resolution will be left for developers to implement (likely by hardcoding or Global-state-based configurable application IDs).\n\nMore sophisticated options could potentially by implemented in the future.\n\n### Automated vs manual deployments\n\nIn order to support Continuous Delivery for smart contracts there is a need to support automated deployments of smart contracts via deployment pipeline.\n\nThis presents a number of challenges to solve for though:\n\n- The deployment pipeline needs to determine if the smart contract already exists or not in the network being deployed to\n- Depending on whether the contract is immutable and/or permanent and if it already exists or not in the network being deployed to, the pipeline needs to handle upgrade, deletion and/or creation of the contract (and this behaviour may need to be switched depending on the network being deployed to)\n- In order to protect integrity of MainNet contracts the deployment process should probably have some safeguards for MainNet that by default prevent destructive operations and require some kind of human opt-in to perform those operations\n- Exposing private key mnemonics for MainNet necessitates they are \"hot wallets\" which may be undesirable for privileged accounts responsible for high value smart contracts (e.g. smart contracts locking $m's or $b's), alternatively low privilege scenarios may benefit from (encrypted secret) mnemonic storage to improve developer experience and operational overhead\n- Supporting cold storage wallets for high privilege accounts, without resorting the manual deployments (which present their own risks) is tricky\n- There are many deployment pipeline technologies available on the market and providing support for many of them is an impractical amount of effort.\n- Should manual deployments be supported as well as automated deployments to make it easier for developers to perform ad hoc deployments (e.g. when quickly testing concepts, or doing a one-off deployment) and improve developer flexibility\n\nThe proposed AlgoKit implementation for v1 that provides a balance of flexibility vs implementation effort, while aligning to the principles is as follows:\n\n- A GitHub Actions implementation is provided in the default template, since GitHub is a highly capable and prevalent option and is free for Open Source and basic private usage (the community can contribute other CI/CD implementations if desired)\n- The \"Contract identification\" v1 implementation suggested above is followed to determine if a contract already exists for the network being deployed to\n- The \"Mnemonic storage and retrieval\" v1 implementation suggested above is followed to facilitate a secure, but flexible implementation for how to handle private keys for ad hoc manual deployments and CI/CD pipelines\n- Cold wallets won't be supported for now, but is recommended as a future exploration, in the meantime that scenario can be implemented by using the rekey technique mentioned in \"Mnemonic storage and retrieval\"\n\n### Output stability testing\n\nEnsuring [output stability](../articles/output_stability.md) is useful for smart contract development. It helps ensure that you can refactor code without making an inadvertent change to the smart contract, ensure that a smart contract output isn't changed from any output that is audited, and ensure that there is a clear mechanism to manually review the smart contract output before it's (re-)deployed.\n\nThere are three broad approaches that could be taken by AlgoKit to help facilitate output stability testing:\n\n1. Include documentation that recommends this kind of testing and provides examples for how to implement it\n2. Include a Git-based approach in default AlgoKit template(s), that requires you to stage changes to the existing smart contract output (per decoupling development and deployment) using normal Git workflows otherwise the test will fail (meaning it will fail locally and on continuous integration pipeline)\n3. Include an [approval-testing](https://approvaltests.com/) based approach in default AlgoKit template(s), that results in an approved TEAL file that is committed\n\nA documentation-only approach strays away from the \"Facilitate correctness\" and \"Secure by default\" principles where we want to help developers fall into the pit of success by default.\n\nApproval testing is a handy technique that is established in the industry, but in this case results in the TEAL output being committed twice, which is a confusing duplication. Furthermore, by ensuring the automated test is against the output that will get deployed it ensures there is a coherence between the TEAL being tested and the TEAL being deployed.\n\nWith this in mind, the proposal is for AlgoKit to include a Git-based output stability test by default, but per the \"Modular components\" principle there is a template option to exclude those tests.\n\n### Validation testing\n\nIn order to provide confidence in the correctness of a smart contract it's important to exercise testing to validate the smart contract operates as expected.\n\nThere are a few possible approaches that could be taken by AlgoKit to help facilitate this:\n\n1. **Documentation** - Include documentation that recommends this kind of testing and provides examples for how to implement it\n2. **Manual testing** - Encourage a manual testing approach using (for example) the App Lab in Lora, by providing an AlgoKit CLI command that sends the user there along with the ABI definition and contract ID resulting in a manual testing experience for the deployed contract with low friction\n3. **Automated integration tests** - Facilitate automated testing by issuing real transactions against a LocalNet and/or TestNet network\n4. **Automated dry run tests** - Facilitate automated testing using the [Dry Run endpoint](https://developer.algorand.org/docs/rest-apis/algod/v2/#post-v2tealdryrun) to simulate what would happen when executing the contract under certain scenarios (e.g. [Graviton](https://github.com/algorand/graviton/blob/main/graviton/README.md))\n5. **TEAL emulator** - Facilitate automated testing against a TEAL emulator (e.g. [Algo Builder Runtime](https://algobuilder.dev/api/runtime/index.html))\n\n#### Documentation\n\n**Pros**\n\n- Least effort to implement\n\n**Cons**\n\n- Doesn't follow the principles of \"Seamless onramp\", \"Continuous Delivery\" or \"Facilitate correctness\"\n- Easy for users to miss\n\n#### Manual testing\n\n**Pros**\n\n- Low effort to implement\n- Facilitates a great manual testing experience for exploratory testing or situations where you want/need to manual test a contract in addition to automated testing\n\n**Cons**\n\n- Doesn't follow the principle of \"Continuous Delivery\" or \"Facilitate correctness\"\n- Doesn't provide regression coverage as the smart contract evolves during development\n\n#### Automated integration tests\n\n**Pros**\n\n- Prior art can be leveraged from MakerX (TypeScript) and [algopytest](https://github.com/DamianB-BitFlipper/algopytest) (Python), both of which provide abstractions to make it easier to produce tests that avoid intermittent failures\n- High degree of confidence - exercising the smart contract in a similar way to real users means we have a high degree of confidence in the validation\n- Good regression coverage\n\n**Cons**\n\n- This type of testing is naturally slower making it impractical to provide combinatorial coverage (relevant: [first 5 minutes of the Microtesting presentation Rob and Matt delivered in 2016](https://www.youtube.com/watch?v=pls1Vk_bw_Y))\n- This type of testing can be hard to make reliable (it's easy to get intermittent timing errors if you aren't careful)\n\n#### Automated dry run tests\n\n**Pros**\n\n- Faster test runs allows for testing a larger proportion of combinatorial coverage and adopting approaches like property-based testing to provide higher degree of confidence\n- Allows for validation of additional properties like opcode usage etc.\n\n**Cons**\n\n- Verbose / unfamiliar test setup to specify the desired state of the blockchain before the dry run (which won't be shareable code with any clients like dApps since it will be test specific)\n- Highly likely there will be a coherence gap between test setup and real-life call since\n- High effort to implement, particularly cross-platform since there isn't existing TypeScript-based prior art\n- Dry run endpoint is being replaced with a new simulate endpoint, but there isn't much available about what that endpoint will look like yet so need to decide between implementing something that is\n\n#### TEAL emulator\n\n**Pros**\n\n- Likely to be the best speed properties allowing for full combinatorial coverage\n\n**Cons**\n\n- Requires implementation and/or maintenance of a TEAL emulator, which would duplicate effort being put in by Algorand Inc. on the simulate endpoint\n- Algo Builder implementation, while being an existing solution and OpenSource requires TypeScript (so is harder to use for Python testing) and also requires a highly bespoke syntax to interact with it that won't allow for easy interoperability with AlgoKit or other things, thus not confirming with \"Modular components\" principle)\n\n#### Selected option\n\nBased on all of this the suggested option for AlgoKit v1 is **Automated integration tests** since it conforms to the principles well, has prior art across TypeScript and Python that can be utilised and provides developers with a lot of confidence.\n\nPost v1, it's recommended that Lora integration for exploratory testing and Graviton (or similar) support should be explored to provide a range of options to empower developers with a full suite of techniques they can use.\n"
  },
  {
    "path": "docs/architecture-decisions/2023-06-06_frontend-templates.md",
    "content": "# Frontend Templates\n\n- **Status**: Approved\n- **Owner:** Altynbek Orumbayev\n- **Deciders**: Rob Moore, Daniel McGregor, Adam Chidlow\n- **Date created**: 2023-06-06\n- **Date decided:** 2023-06-09\n- **Date updated**: 2023-06-08\n\n## Context\n\nAlgoKit v2 aims provide an end-to-end development and deployment experience that includes support for the end-to-end smart contract and dApp development lifecycle. With the release of the typed clients feature - developers are now able to reduce the time it takes to fully integrate the interactions between the contract and the frontend components powering the end-to-end dapp user experience. Hence, as a logical continuation, the following Architecture Decision Record aims to expand on the current capabilities of AlgoKit to support the `frontend` templates, an [AlgoKit Principles](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md#guiding-principles) compliant approach towards simplifying integrations of smart contracts with the dApp frontend components.\n\n## Requirements\n\n### Independent Frontend Templates\n\n1. Create official frontend template that complies with AlgoKit's principles of modularity, maintainability, and flexibility while also serving as a reference for template builders.\n\n2. Expand algokit functionality to provide a clear process for developers to link frontend templates with the typed clients generated by backend templates.\n\n3. Design frontend templates such that backend and frontend have no dependencies on each other, ensuring high modularity.\n\n### End-to-End Starter Repositories\n\n1. Establish a starter repository that allows developers to bootstrap end-to-end dApp projects within a single command, while also serving as an example for template builders on how to efficiently couple their backend and frontend templates.\n2. Implement a bundling process for backend and frontend templates in the starter repositories that reduce code duplication and maintenance overhead.\n3. Develop deployment pipelines for frontend components inside end-to-end starter repository that supports TestNet, and MainNet deployments to hosting providers of choice (Netlify, Vercel and etc to be decided).\n4. Ensure that the end-to-end starter repository supports both manual deployment and deployment via a continuous deployment pipeline.\n\n## Principles\n\n- **Modularity**: The dApp templating feature should follow guiding AlgoKit principles and expand on approaches already utilized in existing smart contract templates feature. This implies that giving developers flexibility to mix and match different `smart contract` templates with `frontend` templates should serve as key consideration.\n- **Maintainability**: The dApp templating feature should be easy to maintain and extend. This implies that the feature should be implemented in a way that allows for easy addition of new templates and/or modification of existing ones as the complexity and variety of templates scale.\n- **Seamless onramp**: The dApp templating feature should provide a seamless onramp for developers to get started with dApp development. This implies that the feature should provide a simple and intuitive way to get started with dApp development and deployment. Providing developers a choice on whether they want more flexibility or rely on default recommended practices.\n\nAll of the aforementioned requirements should be met in a way that is consistent with the guiding principles of AlgoKit or attempt to find a balanced trade of between the principles that satisfies the requirements. Refer to [AlgoKit Guiding Principles](../../docs/algokit.md#Guiding-Principles) for detailed reference on the principles.\n\n## Explored options\n\nEnhancing AlgoKit's templating capabilities involves a notable shift from managing individual templates to co-locating multiple templates or projects together. This is a marked increase in complexity that requires careful consideration of the varying trade-offs between different approaches. The subsequent sections explore these options and their associated trade-offs, providing a concise overview of this new challenge and our proposed solutions.\n\n### Option 1: Monolithic template\n\nThis option suggests that the frontend and backend templates should be bundled together in a single repository. This approach aims to simplify the maintenance of the templates by reducing the number of repositories that need to be maintained. Both official and community templates should be build on top of this approach where frontend is always tailored for specific needs of the smart contracts that are being used.\n\nDrawbacks:\n\n- **Lack of flexibility**: The monorepo approach does not allow developers to choose their preferred frontend technology stack. This is because the frontend is tightly coupled with the backend and the two cannot be separated. This is a major drawback as it limits the flexibility of the templates and does not allow developers to choose their preferred frontend technology stack.\n- **Increased maintenance overhead**: Despite monorepos generally being easier to maintain and collaborate on, this approach has risks of increasing the maintenance overhead as it forces for code duplication when creating new template variations.\n\n### Option 2: Separate individual frontend templates and official full-stack starter templates as an end-to-end reference\n\nThis option suggests that the frontend and backend templates should be maintained in separate repositories. This approach aims to provide developers with the flexibility to choose their preferred frontend technology stack. It also allows the templates to be maintained independently, thereby reducing the maintenance overhead.\n\nDrawbacks:\n\n- **Compatibility overhead**: The separate repositories approach increases the maintenance overhead as it requires the templates to be updated independently. This can lead to issues with compatibility between the templates and the typed clients.\n- **Lack of guidance**: The separate repositories approach does not provide developers with a clear process for linking the frontend templates with the typed clients generated by the backend templates. This can lead to confusion and issues with the integration of the templates.\n\nThe outlined drawbacks can be mitigated by providing a clear process or reference for template builders on how to link various combinations of frontend and backend templates. The compatibility overhead can be mitigated by ensuring that backend and frontend modules are self sufficient and completely decoupled from each other. This will ensure that the templates can be updated independently. The implementation proposal below is a detailed exploration of this approach and ways to mitigate the drawbacks.\n\n### Implementation proposal for Option 2\n\nThe proposal consists of 2 main parts. On a high level, the ideas revolve around giving developers a choice on how they want to couple AlgoKit backend and frontend templates if they like to use custom templates. The other is focused on providing an official full-stack template that bundles both backend and frontend templates together, while also serving as an example for template builders on how to efficiently couple any combination of templates.\n\nThe addendums to the proposal also explores orthogonal ideas that can further improve the CLI tooling itself by providing a way for any existing non web-3 frontend project be converted into a full-stack dApp with minimal efforts. Lastly, it expands on improving incentives for developers to build and maintain their own templates.\n\n---\n\n### Part 1. Independent frontend templates\n\n> TLDR: Independent frontend templates will be created to provide developers with a highly customizable dApp starter project, built on AlgoKit's principles of modularity, maintainability, and flexibility. The templates will also serve as a reference for template builders.\n\nThe following aims to provide a seamless onramp for developers to get started with highly customizable dApp starter projects. The idea is to create a set of separate official frontend template repositories to serve as:\na) A reference for template builders on how to create standalone frontend templates that can be then further coupled with any backend template.\nb) Expand on AlgoKit principles of modularity, maintainability and flexibility by giving developers a choice of preferred technological stack.\n\nThe official standalone frontend templates can be build by reusing already established best practices and templates from official backend repositories and by continuing reliance on `copier` for template automation. Another important consideration to keep in mind is that with the introduction of frontend templates we need to establish a clear separation of concerns between the backend and frontend templates to ensure modularity.\n\n![Diagram 1](assets/2023-06-06_frontend-templates/modular_templates.jpg)\n\nAs demonstrated on the diagram above, the only glue connecting the backend and frontend is the generated typed client. Neither backend or frontend templates should be concerned with the other but instead provide modular interfaces that clearly indicate to developers on how to integrate the two. From a perspective of a backend template, the typed client shall be seen as a static asset that can be reused by any frontend template. Frontend templates on the other hand are mostly standard web projects with an additional layer of utilities that optionally allow them to be integrated with typed clients produced by backend templates.\n\n#### Higher level overview\n\nThe main scenario to support for this part is to allow developers to use official starter templates to bootstrap end to end dApp projects.\n\n![Diagram 2](assets/2023-06-06_frontend-templates/scenario_1.jpg)\n\nAs demonstrated above the dev experience will consist of executing an `algokit init` command for the preferred backends and frontends.\n\nIt gives user a choice and responsibility to then decide how to integrate the two components depending on their project needs. To improve this however, we should **additionally introduce a new utility** that will serve as a tool to automate linking with the typed client that backend templates will be generating. Implementation specific details can be discussed separately is it goes out of scope of this Architecture Decision Record.\n\n---\n\n### Part 2. End-to-end starter repositories\n\n> TLDR: End-to-end starter repositories are designed to offer developers an official starter template for bootstrapping dApp projects. This is achieved by efficiently bundling backend and frontend templates to facilitate easy maintenance and smooth onboarding.\n\n#### Higher level overview\n\nThe main scenario to support for this part is to allow developers to use official starter templates to bootstrap end to end dApp projects.\n\n![Diagram 2](assets/2023-06-06_frontend-templates/scenario_2.jpg)\n\nAs demonstrated above the user experience will consist of a single execution of `algokit init` command pointed at official full-stack template repository. The full stack templates are responsible for bundling both backend and frontend templates together and providing a seamless onramp for developers to get started with dApp development. The way repositories are bundled should be easy to maintain and should not duplicate individual backend and frontend repositories to avoid redundant maintenance, instead it should expand on metatemplating capabilities of `copier` to allow for efficient reuse of existing standalone backend/frontend templates.\n\n### Addendum 1. Converting _ANY_ frontend projects into dApps.\n\n> TLDR: The approach aims to enhance the algokit-cli codebase's adaptability, enabling easy transformation of existing frontend projects into web3 dApps.\n\nThis orthogonal approach proposes to improve capabilities of the algokit-cli codebase by making it adaptable to various frontend stacks to allow anyone to easily convert their existing frontend projects into web3 dApps.\n\nThe implementation specific details will consist of deriving a set of bare minimum requirements for such feature to scan and understand the structure of frontend where algokit is being embedded and performing necessary modifications to project's files. A detailed discussion can be held in a scope separate from this Architecture Decision Record.\n\n> This approach can be explored and maintained without overlapping with main proposal on frontend templates.\n\n### Addendum 2. Website for choosing preferred frontend and backend repositories.\n\n> TLDR: The approach proposes a website to enhance the discoverability of official and community-based algokit templates, thereby incentivizing template builders to create and maintain their own templates.\n\nThis orthogonal approach proposes to improve discoverability of official and community based algokit templates by providing a simple static website. The website can consist of minimalistic UI components for picking preferred backend, frontend and then a `Generate` button that will output copyable algokit CLI commands to spin up a project with the selected templates.\n\nAs a specific example, the website can be hosted on [AwesomeAlgo](https://awesomealgo.com) website, thus ensuring that this is an open-source community maintained entry-point for discovering and using algokit templates. Removing the need and maintenance overhead on our teams to maintain it as official resource.\n\nLastly, community template builders will get a platform to increase discoverability of their templates and further incentivize them to build and maintain them. While developers using the templates can support creators of templates by donating to their projects (a simple tipping mechanism for Algo and ASAs can be embedded into the website) or by contributing to the templates themselves.\n\n> This approach can be explored and maintained without overlapping with main proposal on frontend templates.\n\n---\n\n## Final decision\n\nAfter several review rounds team reached a conclusion to expand on [Option 2](#option-2-separate-individual-frontend-templates-and-official-full-stack-starter-templates-as-an-end-to-end-reference) and respective [implementation proposal](#implementation-proposal-for-option-2) given better alignment with AlgoKit design principles such as flexibility and modularity. Hence, the following proposal is further exploration of ideas based on this approach and propose solution to mitigate the potential drawbacks and risks outlined.\n\n## Open questions\n\n- Python typed clients are not going to be too relevant for scenarios where they need to be integrated with frontends given that in majority of cases a developer would prefer a TS typed client. Hence, how do we ensure that user gets a clear information and indication on when to use Python typed clients vs TS typed clients?\n  - `Answer: This is a fair comment, but to my mind it's a clear dilineation in that it's dependant on what language you are using. While there are Python website libraries, we aren't using them we will use JavaScript (TypeScript).`\n- Would we want to introduce a notion of non smart contract based backend templates that can be used to plug in the python typed clients and spin up servers that can be used to build APIs that interact with the smart contracts?\n  - `Answer: This is definitely out of scope of dApp stuff.`\n\n## Next steps\n\nAfter the final decision is made, the action items necessary to implement the described proposal can be outlined as follows:\n\n1. **Design and Development of Independent Frontend Template**: This involves selecting appropriate technology stacks and building out the templates. The template should be able to work with the generated typed client from the backend and will be used as a dependency for end-to-end starter repository.\n\n2. **Development of End-to-End Starter Repository**: This involves building comprehensive template that bundles both frontend and backend components. The template should be designed to be easy to maintain and should leverage the metatemplating capabilities of `copier` for efficient reuse of existing standalone templates.\n\n3. **Integration and Testing**: Ensure proper integration between frontend and backend templates. Also, extensive testing should be done to ensure smooth functioning and a seamless onramp experience for the developers.\n\n4. **Documentation**: Write comprehensive documentation covering the use of the new templates, how to integrate them, and the utility for linking them with the typed client. This documentation should also include how-to guides and sample applications to help developers get started.\n\n5. **Addendum 1 - Converting ANY Frontend Projects into dApps**: Start a separate discussion and potentially a project to research the feasibility of this idea. If feasible, design and implement a method that allows developers to convert their existing non-web3 frontend projects into dApps using AlgoKit.\n\n6. **Addendum 2 - Development of Template Selection Website**: Plan and execute the development of a minimalist UI that allows developers to easily discover and select their preferred templates. This should also allow them to easily generate the necessary AlgoKit CLI commands for their project setup.\n\n7. **Community Engagement**: Engage the developer community to drive contributions to the template repositories. This includes encouraging template builders to contribute and supporting developers using the templates with issues and suggestions.\n\n8. **Continuous Review and Maintenance**: Regularly review the templates to ensure they are up to date with changes in technology and AlgoKit principles. Continuous maintenance should also be carried out to ensure the templates remain functional and relevant.\n\nRegarding the open questions, these should be discussed in detail to clarify how to handle Python typed clients and the potential introduction of non-smart contract based backend templates. These discussions may lead to additional actions as required.\n"
  },
  {
    "path": "docs/architecture-decisions/2023-07-19_advanced_generate_command.md",
    "content": "# Advanced `algokit generate` command\n\n- **Status**: Approved\n- **Owner:** Altynbek Orumbayev, Inaie Ignacio\n- **Deciders**: Rob Moore, Daniel McGregor, Alessandro Cappellato\n- **Date created**: 2023-07-19\n- **Date decided:** 2023-07-24\n- **Date updated**: 2023-07-24\n\n## Context\n\nThe [Frontend Templates ADR](./2023-06-06_frontend-templates.md) introduced and expanded on AlgoKit's principles of Modularity and Maintainability by introducing a new set of official templates for quickly bootstrapping standalone `react` and `fullstack` projects showcasing best practices and patterns for building frontend and fullstack applications with Algorand. As a logical next step, we want to enable developers to extend existing projects instantiated from official templates with new files and features.\n\n## Requirements\n\n### 1. AlgoKit user should be able to use generate command to extend existing algokit compliant projects with new `files` of any kind\n\nThis implies scenarios like:\n\n- Adding new contracts into existing algokit compliant projects.\n  > Algokit compliant projects are projects that were instantiated from official or community templates and follow the same structure and conventions.\n- Overriding existing files with new ones.\n- Adding new files into existing projects.\n\nOverall, we want to introduce a notion of `generators` which can be viewed as a modular self-sufficient template units that are hosted within template repositories and describe how to create or update files within projects instantiated from AlgoKit templates.\n\nRuby on Rails has a similar concept of [generators](https://guides.rubyonrails.org/generators.html) which are used to create or update files within Rails projects. This can be used as a reference for inspiration.\n\n### 2. Template builder should be able to access a clear guideline and refer to official templates for examples on how to create `generators`\n\nThis implies extension of existing starter guidelines available for template builders on [AlgoKit Docs](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/tutorials/algokit-template.md) and using one or several official templates as a reference point.\n\n## Principles\n\n- **Modularity**: Artifacts dependant on `advanced algokit generate` command capabilities embedded into templates should follow guiding AlgoKit principles and expand on approaches already utilized in `react`, `fullstack` and `beaker` templates. This implies that giving developers flexibility to define any extra templating logic, allowing to create or update any files within projects instantiated from algokit templates.\n- **Maintainability**: The `advanced algokit generate` capabilities on `algokit-cli` and related artifacts on respective official templates should be easy to maintain and extend.\n- **Seamless onramp**: Great developer experience for template builders to create their own `generators` and user experience to use them via `advanced algokit generate` command should be a priority.\n\nAll of the aforementioned requirements should be met in a way that is consistent with the guiding principles of AlgoKit or attempt to find a balanced trade of between the principles that satisfies the requirements. Refer to [AlgoKit Guiding Principles](../../docs/algokit.md#Guiding-Principles) for detailed reference on the principles.\n\n## Considered Options\n\nBased on preliminary research, all of the options below assume that:\nA `generator` is a self contained copier/jinja template that is hosted within a template repository and describes how to create or update files within projects instantiated from algokit templates. Hosting it along with the template is a necessity given that community based templates can follow different conventions, patterns and structure making it hard to attempt to generalize the logic of `generators` and make them work for all templates.\n\n### Option 1: Wrapping generators into self contained copier templates hidden within algokit templates\n\nThis option implies that `generators` are self contained copier templates that are hidden within algokit templates and are not exposed to the end user. This option is inspired by [Ruby on Rails generators](https://guides.rubyonrails.org/generators.html) and [Yeoman generators](https://yeoman.io/authoring/).\n\nThe main idea is to rely on `_templates_suffix` in copier.yamls to define 2 separate types of suffixes for `templates` and for `generators`:\n\n- Existing templates under all official algokit templates are already prefixed with `.jinja` hence we just need to explicitly prefix it with `.jinja` on root copier\n- The new generators jinja templates can be prefixed (for example) with alternative file extension for jinja files such as `.j2`. Which is also a common convention for jinja templates.\n- - This only works for files though for regular folders and cases like `{% if %}folder_name{% endif %}.j2` we need to wrap them into {% raw %} to that first pass when template initially initialized unwraps the content allowing second pass via generator to then use them as jinja templates. The only downside here is slightly longer file names for folders, but I think it's a reasonable tradeoff considering simplicity of the solution.\n\nOverview of the proposal can be summarized via the following diagram:\n\n```mermaid\ngraph TB\n  T[\"Template Folder\"]\n  T --> C[\"copier.yaml\"]\n  T --> C1[\"...\"]\n  T --> G[\".algokit/generators\"]\n\n  G --> G1[\"Generator 1\"]\n  G1 --> E1[\"copier.yaml\"]\n  G1 --> E2[\"...\"]\n\n  G --> G2[\"...\"]\n  G2 --> E3[\"...\"]\n\n  G --> G3[\"Generator N\"]\n  G3 --> E[\"copier.yaml\"]\n  G3 --> E4[\"...\"]\n```\n\n#### Pros\n\n- Generators are hidden within algokit templates and are not exposed to the end user. When user runs `algokit generate` command, cli presents a list of available generators to choose from. This makes it easier for user to understand what generators are available and what they do.\n- Generators are self contained copier templates giving template builders flexibility to do any kind of templating logic similar to what they can already do with regular templates.\n- Majority of implementation complexity is reduced by relying on copier as a backbone for generators feature.\n\n#### Cons\n\n- Generators are somewhat tightly coupled with individual algokit templates, which implies its not necessarily a matter of copy pasting generators from one template to another. This can be a problem for community template builders who want to reuse generators from official templates. However, this can be mitigated by providing clear guidelines on how to create generators and referring to official templates as a reference point. Additionally, it seems like a reasonable tradeoff given that templates can vastly differ in type, structure, conventions and patterns and it's significantly harder to generalize the logic of generators to make them work for all templates.\n\n#### Implementation details\n\n**1. Adjusting templates structure**\n\nFor instance, if we assume existing `beaker` template, the new file/folder structure can look as follows:\n\n```\ntemplate_content/.algokit # alternatively could be just `.algokit-generators`\n└── generators\n    └── {generator_name} # generator name can be anything\n        ├── copier.yaml # copier config for generator\n        └── smart_contracts # logic for adding new contracts to beaker template\n            └── {% raw %}{{ contract_name }}{% endraw %}\n                ├── contract.py.j2\n                ├── {% raw %}{% if language == 'python' %}deploy_config.py{% endif %}{% endraw %}.j2\n                └── {% raw %}{% if language == 'typescript' %}deploy-config.ts{% endif %}{% endraw %}.j2\n...rest of the template is left as is\ncopier.yml\n```\n\nThe `index.ts` and `config.py` files on beaker template need to be updated to auto import all contracts from sub folders at `smart_contracts` to eliminate the need for developers to manually import them after running the smart contract generator.\n\n> Please note, above is just an example that assumes the generator for adding new contracts, but the proposal is generic enough to support any kind of jinja-based templating logic.\n\n**2. Adjusting `.algokit.toml`**\n\nThe proposal for new structure for defining generators in root algokit toml is as follows:\n\n```toml\n[generators.create_contract] # [generators.<generator_name>]\ndescription = \"Adds new smart contract to existing project\" # description of the generator, can appear in cli for extra info\npath = \".algokit/generators/create_contract\"  # path that cli should grab to forward to copier copy\n```\n\n**3. Adjusting `algokit generate` command on cli**\n\nNext step in implementation part of the proposal is adjusting the `generator` command on `algokit-cli` to make sure it knows how to look for generators. The available generators can be provided to user via list picker (a.k.a ruby on rails style) by letting algokit scan contents of `algokit.toml` and look for `.generators` folder.\n\na) Has no generators\nIf algokit-cli can't find any generator configured then nothing change from current implementation.\n\nb) Has generators and user runs `algokit generate` command to see the list of available generators\nA new `click` command per each generator in `.algokit.toml` is added to the list of generate commands, that will list all available generators for the user to choose from.\n\n```bash\nalgokit-cli generate\n\n---\n\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client  Create a typed ApplicationClient from an ARC-32 application.json\n  smart-contract  Adds new smart contract to existing project\n```\n\nThen, to invoke the interactive flow via copier user runs:\n\n```bash\nalgokit-cli generate smart-contract `name of the contract`\n```\n\nc) Has generators, user knows what to pick and wants to run it non interactively\nUsing the generator configuration the `generator` will automatically add new `click` commands to the existing list of generate commands allowing the user to run them in the command line.\n\n```bash\nalgokit-cli generate smart-contract -a contract_name=`name of the contract` -a language=python # passing extra arguments to generator similar to algokit init\n```\n\n**4. Testing and documentation**\n\nLastly we need to make sure that the new feature is properly tested and documented. This includes:\n\n- Testing the new feature works as expected on all official templates that will host generators. This will imply adding a separate test suite that picks some default template state and run generators on top of them confirming that created artifacts are placed in expected locations and have expected content. Tests should be easy to follow and expand on existing tests suite on templates without introducing extra complexity.\n- Introduce new tutorial sections on [AlgoKit Docs](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/tutorials/algokit-template.md) on how to create generators and refer to official templates as a reference point.\n\n### Option 2: Wrapping generators into self contained copier templates hosted on separate repositories\n\nThis option proposes to host `generators` on separate set of repositories and use them as a dependency for algokit templates.\n`algokit.toml` can be extended on template repositories to list generators they depend on.\n\nThe only distinction between this option and option 1 is that generators are hosted on separate repositories and are not hidden within algokit templates. Implying that they are not tightly coupled with algokit templates and can be reused/forked by community template builders to build their own generators.\n\n#### Pros\n\n- Generators are not tightly coupled with algokit templates and can be reused/forked by community template builders to build their own generators.\n- Generators can be versioned and updated independently from algokit templates.\n\n#### Cons\n\n- Developing and maintaining generators is significantly more complex due to the need to maintain separate repositories and versioning.\n- Official maintainers and community template builders need to be put extra effort at keeping generators generic enough to be reused by other templates. Given that templates can vastly differ in type, structure, conventions and patterns, this can be a challenge.\n- Given that copier is being considered as a backbone for generators feature, drawbacks outlined for hosting templates on monorepos in the [previous adr](2023-06-06_frontend-templates.md#option-1-monolithic-template) apply here as well.\n\n## Open questions for reviewers\n\n1. What kinds of generators other than `add new contract` do we want to support on initial release (if any)?\n2. Are there any other template repositories that we want to integrate with generators other than `beaker` (`fullstack` will contain those as well as it uses `beaker` as a dependency)?\n\n> Please note an MVP PoC is already implemented and available to play around on algokit-cli and beaker template repo under `advanced-generate-command` and `generators` branches respectively.\n> To test it out checkout the branche on cli do `pipx install . --force`, navigate to beaker template repo and checkout the branch as well, then navigate to any of the sub folders in `tests_generated`. Lastly do `algokit bootstrap all`, build the contract and execute `algokit generate` from root of that folder to play around with the feature based on the implementation proposal from Option 1.\n\n## Final Decision\n\nThe team approved the proposal for Option 1: Wrapping generators into self contained copier templates hidden within algokit templates.\n\n## Next steps\n\n1. Polishing the PoC on algokit-cli and adding tests\n2. Polishing the PoC on beaker template and adding tests\n3. Adding documentation for new capabilities of the generate command\n4. Adding documentation for template builders on how to create generators\n"
  },
  {
    "path": "docs/architecture-decisions/2024-01-13_native_binaries.md",
    "content": "# AlgoKit CLI native binaries\n\n- **Status**: Approved\n- **Owner:** Altynbek Orumbayev (MakerX), Negar Abbasi (MakerX)\n- **Deciders**: Alessandro (Algorand Foundation), MakerX\n- **Date created**: 2024-01-13\n- **Date decided:** 2024-01-25\n- **Date updated**: 2024-01-16\n\n## Context\n\nThe primary motivation for this decision is to streamline the installation process of AlgoKit CLI and reduce the friction associated with installing it on various operating systems. Currently, users often encounter minor environment-specific bugs during installation, which can be a significant deterrent. By providing native binaries, we aim to speed up the installation time and eliminate these bugs by **removing requirements to install python by the user**, thereby improving the overall user experience.\n\nThe north star for this decision is to provide a distribution model that can be described as:\n\n```mermaid\ngraph TD\n    A[GitHub Runners] -->|Windows| B[Packaging tool]\n    A -->|Mac| C[Packaging tool]\n    A -->|Linux| D[Packaging tool]\n    B --> E[Windows Binary]\n    C --> F[Mac Binary]\n    D --> G[Linux Binary]\n    E -->|Winget| H[Windows Users]\n    F -->|Brew| I[Mac Users]\n    G -->|Snap| J[Linux Users]\n    A -->|Wheel Build| K[Poetry]\n    K --> L[PyPi]\n    L -->|pipx| M[Python Users]\n```\n\n> ⚠️⚠️⚠️ Please note diagram above is a draft and is to be separately discussed in a follow up ADR that will focus on distribution of the binaries.\n\nThe scope of this ADR only concerns the packaging for the CLI. The distribution via `snap`, `winget` and etc will be handled separately/in-parallel after decision and implementation of this ADR is in place.\n\n## Requirements\n\n- The native binaries should be easy to maintain and understand from a CI/CD deployment perspective.\n- The solution should support a wide variety of Linux distributions, macOS (both Apple Silicon and Intel architectures), and Windows.\n- The solution should integrate seamlessly with existing installation options, including Homebrew, or provide an easier alternative.\n- The solution should be designed with future scalability in mind, allowing for the addition of support for other variations of architectures or else as needed.\n- The solution should not significantly increase the complexity of the build process.\n- The solution should provide clear error messages and debugging information to assist in troubleshooting any issues that may arise.\n- You don't need to install Python on your system to use AlgoKit if you aren't creating a Python project.\n\n## Options\n\n### Option 1 - PyInstaller\n\n**Pros**\n\n- Easy to use and configure\n- Supports multiple platforms and architectures\n- Can handle complex packages and dependencies\n- Generates a single file executable\n- Active development and community support\n- Fairly fast build time via ci - ~3-4 minutes\n- Fairly small executable size (see benchmarking results below)\n- Marginally equal executable load time in `onedir` mode compared to `onefile` mode\n\n**Cons**\n\n- Occasionally requires manual configuration for more complex packages\n- Requires complex build packaging matrix to support multiple platforms and architectures\n- Requires minor tweaks in algokit cli to account for the fact that features relying on `sys.executable` will point to algokit cli executable instead of python interpreter. This is a minor change and can be done in a backwards compatible way however still a con to consider.\n- Requires minor tweaks in algokit cli to introduce `multiprocessing.freeze_support()` to avoid issues with `vanity-address` task when executing via binary.\n\n#### PoC\n\nThe PoC is available [here](https://github.com/algorandfoundation/algokit-cli/pull/382). It outlines a simple github action with extra setup that compiles algokit cli as a single file executable on latest versions of Windows, Mac and Linux github runners.\n\n### Option 2 - Nuitka\n\n**Pros**\n\n- Nuitka translates Python code into C and then compiles it, which can result in performance improvements.\n- Cross-Platform: Supports multiple platforms including Windows, macOS, and Linux.\n- More cross compilations options than PyInstaller\n- Official github action simplifies the process of building executables for different platforms.\n\n**Cons**\n\n- Compilation Time: The process of converting Python to C and then compiling can be time-consuming. Up to ~30 minutes on github with 3 parallel jobs.\n- Size of Executable: The resulting executables can be larger due to the inclusion of the Python interpreter and the compiled C code (see benchmarking results below).\n- Does not support Python 3.12.\n- Requires minor tweaks in algokit cli to account for the fact that features relying on `sys.executable` will point to algokit cli executable instead of python interpreter. This is a minor change and can be done in a backwards compatible way however still a con to consider.\n- Requires minor tweaks in algokit cli to introduce `multiprocessing.freeze_support()` to avoid issues with `vanity-address` task when executing via binary.\n\n#### PoC\n\nThe PoC is available [here](https://github.com/algorandfoundation/algokit-cli/pull/393). It outlines a simple github action with extra setup that compiles algokit cli as a single file executable on latest versions of Windows, Mac and Linux github runners.\n\n### Benchmarking `pyinstaller` vs `nuitka` vs pipx installed `algokit`\n\n#### Methodology\n\n`hyperfine` was used to benchmark 5 different executables:\n\n- Nuitka Onefile - Nuitka compiled executable with `--onefile` flag, which produces a single file executable.\n- Nuitka Onedir - Nuitka compiled executable with `--onedir` flag, which produces a directory with the executable and other dependencies unzipped.\n- PyInstaller Onedir - PyInstaller compiled executable with `--standalone` flag, which produces a directory with the executable and other dependencies unzipped.\n- PyInstaller Onefile - PyInstaller compiled executable with `--onefile` flag, which produces a single file executable.\n- AlgoKit from `pipx` - AlgoKit CLI installed via `pipx` with all dependencies frozen (current latest stable release).\n\nThe benchmarking was performed on a MacBook M2 running macOS 14.2.1 and an ARM based Ubuntu 20.04.3 LTS running on a Parallels Desktop on the same machine.\n\n#### Results\n\n| Method              | macOS M2 | Ubuntu 20 ARM Linux VM | Windows 11 ARM |\n| ------------------- | -------- | ---------------------- | -------------- |\n| nuitka_onefile      | 3.634    | 1.465                  | 3.874          |\n| nuitka_onedir       | 0.2515   | 0.6200                 | 0.5136         |\n| pyinstaller_onedir  | 0.3228   | 0.7927                 | 0.6668         |\n| pyinstaller_onefile | 3.031    | 1.466                  | 1.875          |\n| algokit             | 0.3126   | 0.6111                 | 0.7579         |\n\n![Benchmarking Results](./assets/2024-01-13_native_binaries/image_1.png)\n_Figure: Benchmarking results comparing the performance of Nuitka (onefile, onedir modes), PyInstaller (onefile, onedir modes), and pipx installed Algokit CLI on macOS M2, Windows 11 ARM VM, Ubuntu 20 ARM VM._\n\n| Method              | Windows (MB) | Ubuntu (MB) | macOS (MB) |\n| ------------------- | ------------ | ----------- | ---------- |\n| nuitka_onedir       | 92.10        | 106         | 166        |\n| nuitka_onefile      | 22.48        | 23          | 41         |\n| pyinstaller_onedir  | 46.07        | 52          | 113        |\n| pyinstaller_onefile | 26.47        | 25          | 45         |\n\n![Bundle sizes](./assets/2024-01-13_native_binaries/image_2.png)\n_Figure: Bundle sizes of folders with executables build with Nuitka (onefile, onedir modes), PyInstaller (onefile, onedir modes)._\n\n#### Preliminary Observations\n\n- Nuitka's warmed up execution time is **fast**\n- Nuitka produces largest executables in `onedir` mode\n- Nuitka is the slowest to build (no charts for build benchmarks, this is observations based on CI build time from PoC, see links above)\n- PyInstaller produces smallest executables in `onedir` mode\n- PyInstaller is the fastest to build (no charts for build benchmarks, this is observations based on CI build time from PoC, see links above)\n\n### Honorable Mentions\n\n#### cx_Freeze\n\ncx_Freeze is a set of scripts and modules for freezing Python scripts into executables. It is similar to PyInstaller in many ways, but PyInstaller is preferred due to its more mature and comprehensive documentation.\n\n#### PyOxidizer\n\nPyOxidizer is a utility for producing binaries that embed Python. However, it is no longer actively maintained, which makes it a less desirable option for our needs.\n\n## Preferred option\n\nBased on observations so far we are leaning towards an Option 1. Where we would use PyInstaller to build native binaries for Windows, Mac and Linux.\n\nWhile `nuitka` in `onedir` mode is even faster than pip installed algokit, it generates larger executables, and is the slowest option in terms of build time. Pyinstaller is only marginally slower than `nuitka` or pip installed algokit in terms of execution time, has mature documentation, and is the fastest option to build (in `onedir` mode) and produces smaller executables than `nuitka`. Given that and the fact that `nuitka` does not support Python 3.12 yet and has a lot of `magical` optimizations hidden under the hood, we are leaning towards PyInstaller as the preferred option for building native binaries given its maturity and straightforwardness despite marginally slower execution time (which is not a big deal given that we are talking of deviations of 5-10 milliseconds).\n\n## Selected option\n\nThe team has formally pre-discussed this ADR and has agreed to proceed with Option 1 - PyInstaller.\n\n## Next Steps\n\n- [ ] Finalize the decision on the preferred option.\n- [ ] Expand PoC and polish the github action to build native binaries for Windows, Mac and Linux for x86, x86-64 and ARM architectures.\n- [ ] Implement portability snapshot tests, expanding existing algokit cli snapshot tests by running against real executable covering main functionality to test and ensure that the native binaries are portable and behave the same way as pip installed algokit cli.\n- [ ] Submit follow up ADR to discuss strategies on how to distribute the binaries in most accessible, user friendly and secure way.\n"
  },
  {
    "path": "docs/architecture-decisions/2024-01-23_init-wizard-v2.md",
    "content": "# AlgoKit Init Wizard Version 2 and Template Enhancements\n\n- **Status:** Proposed (Revision 2)\n- **Owner:** Altynbek Orumbayev\n- **Deciders:** Alessandro (Algorand Foundation), Rob Moore (MakerX), MakerX team\n- **Creation Date:** 2024-01-23\n- **Decision Date:** 2024-02-13\n- **Update Date:** 2024-02-12\n\n## Revisions\n\n- **Revision 1:** Initial discussions with Alessandro, Michael, Joe, Georgio, and Chris identified key issues with the existing wizards and explored ways to improve templating capabilities. These discussions led to the consideration of a unified 'monorepo' structure to consolidate template repositories into more focused, smaller generators under a single repository. The revision also sought to refine the wizard and enhance command orchestration capabilities, incorporating tools like `npm workspaces`.\n- **Revision 2:** Engaged in multiple brainstorming sessions with Algorand Foundation/DevRel members and MakerX engineers to develop a pragmatic, time-bound, and prerequisite-neutral strategy. This approach resulted in splitting the ADR into two segments: one focusing on the `init` wizard enhancements and the other on template improvements and command orchestration upgrades in algokit-cli.\n\n## Background\n\nThis ADR emerges from various discussions aimed at enhancing the `init` wizard version 2 user experience and aligning it with emerging smart contract languages/frameworks to make it more user-friendly and accessible to beginners. It builds upon prior decisions from the [Advanced algokit generate command](./2023-07-19_advanced_generate_command.md) and [Frontend Templates](./2023-06-06_frontend-templates.md) ADRs, integrating feedback from the Algorand Foundation and DevRel team.\n\n### Main Areas for Improvement\n\n1. **Enhancements to the `Init` Wizard:**\n\n   - Improve user experience by making the wizard more intuitive and less reliant on Algorand-specific jargon.\n   - Streamline the `presets` concept to minimize user inputs and simplify the process.\n\n2. **Template Refinements:**\n\n   - Address potential complexities in maintaining the `fullstack` template with new `smart-contract` template combinations, including future `.NET` integration alongside `puya` compiler-compatible stacks.\n   - Implement `codespaces` configurations for simplified project setup in GitHub Codespaces.\n   - Consider unifying `add smart contract` generators in the `puya` and `beaker` templates into a single, generic generator managed by the CLI itself (suggestion added as part of revision 2 based on DevRel feedback). This helps solving the problem of simplifying template building experience (removing duplication of generators that are essentially generic) while still giving an option for template builders to have custom generators within templates.\n\n3. **CLI Enhancements:**\n\n   - Enhance user experience by standardizing the use of `bootstrap`, `algokit.toml`, `.algokit` folder, and `.env` conventions, presenting a unified and intuitive CLI interface.\n\n## Detailed Proposals\n\n### 1. Improved `Init` Wizard Flow\n\nShift from a detailed, template-specific question format to a more streamlined, interactive process that focuses on the user's intent rather than specific technologies within the Algorand ecosystem.\n\n**Current Init Wizard V1:**\n\n```mermaid\ngraph TD\n    A[Start] --> Z[Pick an official template Puya, TealScript, React, Beaker]\n    Z --> B[Name for this project.]\n    B --> C[Package author name]\n    C --> D[Package author email]\n    D --> E[Do you want to add VSCode configuration?]\n    E -->|yes| F[Do you want to use ESLint and Prettier for code linting and formatting?]\n    E -->|no| G[Do you want to add JetBrains configuration - primarily optimized for WebStorm?]\n    F --> H[Do you want to use Tailwind CSS? A utility-first CSS framework for rapidly building custom designs.]\n    G --> H\n    H -->|yes| I[Do you want to use a daisyUI? Framework agnostic CSS component library for building modern websites and web applications fast.]\n    H -->|no| J[Do you want to include unit tests via Jest?]\n    I --> J\n    J --> K[Do you want to include end to end tests via Playwright?]\n    K --> L[Do you want to include Github Actions workflows for build validation?]\n    L -->|yes| M[Pick your website hosting provider for continuous delivery]\n    L -->|no| N[End]\n    M --> N\n```\n\n**Proposed Init Wizard V2:**\n\n```mermaid\ngraph TB\n    A[Would you like to build a smart contract or a dapp frontend?]\n    B[If smart contract, which language would you like to use?]\n    C[If frontend, which framework would you like to use?]\n    D[Python, implies puya]\n    E[Typescript, implies TealScript]\n    F[React]\n    G[`production`/`starter`/`custom`]\n    I[`production`/`starter`/`custom`]\n    J[Would you like to include a frontend component?]\n    A --> B\n    A --> C\n    B --> D\n    D --> J\n    E --> J\n    J --> G\n    J --> F\n    B --> E\n    C --> F\n    F --> I\n```\n\nThe proposal aims to simplify the question process and reduce the overall number of questions, focusing on identifying **what** the user intends to build without the additional complexity of selecting specific Algorand ecosystem tools or languages.\n\nAs illustrated in the proposed flow diagram, the initiative seeks to:\n\n- Simplify the init wizard's questions, avoiding technical jargon and aligning with popular programming languages and frameworks, leveraging direct mappings like `python`, `typescript`, and `.net`.\n- Introduce clear preset options: `starter` for a comprehensive preselection based on the Algorand Foundation's recommendations, `production` for setups aligned with production standards, and `custom` for developers seeking detailed customization through template questions.\n\nThis proposal presents no significant drawbacks, as it refines the question flow of the algokit init process without altering the command's fundamental behavior, thereby not impacting current users and enhancing the overall `init wizard` experience.\n\n### 2. Fullstack Template and CLI Command Orchestration Enhancements\n\nThis dual-step approach suggests offloading business logic related to linking smart contracts and frontends within fullstack to a self-contained generator within the `react` template. It also introduces the concept of command orchestration in algokit-cli to manage project lifecycles more effectively.\n\n#### 2.1 Autonomy of Frontend Template\n\n**Fullstack Template Structure**:\n\n| Before                                                                                                                                                                                                                                                     | After                                                                                                                                                                                                                                                                         |\n| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| The current fullstack is tightly coupled with the frontend template, handling the connection between frontend and backend. This coupling necessitates additional safety checks whenever there are changes in the expected structure of the React template. | The frontend now invokes the `import-contract` generator within the React template, which autonomously manages the linkage with a smart contract project. This adjustment makes the fullstack less reliant on template-specific changes that it should not be concerned with. |\n\nThe proposal includes changes to the fullstack template's structure, enabling the frontend to autonomously manage its linkage with smart contract projects, thereby simplifying integration and enhancing maintainability.\n\n**Benefits:**\n\n- Simplifies integration of templates by allowing the frontend template to manage smart contract linkages independently.\n- Generalizes the fullstack template, offering flexibility in choosing backend components.\n\n**Suggested Enhancements:**\n\n- Further generalize the `AppCalls.tsx` component, removing assumptions on default `hello world` contracts and using `resolveBy: 'id'` by default.\n- Incorporate suggestions from discussions during revision 2, including the potential bundling of `add smart contract` generators directly into `algokit-cli` for reduced redundancy and enhanced template support.\n\n#### 2.2 CLI Command Orchestration\n\nAims to utilize `algokit.toml` for organizing project-specific commands, enhancing project management and command execution through a more structured approach.\n\nExample workspace root `.algokit.toml`:\n\n```toml\n[algokit]\nmin_version = \"v1.8.0\"\n\n[project]\ntype = 'workspace' # informs algokit-cli that this is a workspace root that hosts multiple projects\nname = 'myworkspace' # included for reference but unique identifier for the workspace can be actually be excluded as it is not required for orchestration to work\nprojects = ['src'] # a list of relative/absolute paths directly to folders with independent components of the project that are part of the workspace or roots of folders hosting multiple independent components\n```\n\nExample workspace of a smart contract project `.algokit.toml`:\n\n```toml\n[algokit]\nmin_version = \"v1.8.0\"\n\n[generate.smart_contract]\ndescription = \"Adds new smart contract to existing project\"\npath = \".algokit/generators/create_contract\"\n\n[project]\ntype = 'backend'\nname = '{ component_name }'\n\n# The `deploy` command is categorized separately due to its static nature. Category names align with their root command group, hence `generate` is under [generate], while `deploy` is categorized under [project].\n[project.deploy]\ncommand = \"poetry run python -m smart_contracts deploy\"\nenvironment_secrets = [\n  \"DEPLOYER_MNEMONIC\",\n]\n\n[project.deploy.localnet]\nenvironment_secrets = []\n\n# New category for project specific commands that can be defined arbitrarily by the user\n[project.commands]\ntest = { command = \"poetry run pytest\", description = \"Run unit tests for the smart contracts\" }\nbuild = { command = \"poetry run python -m smart_contracts build\", description = \"Transpile puya smart contracts into TEAL and ABI specs\" }\n# Description is used as `help` text as content gets presented to user via `algokit cli` interface.\n```\n\n**Enhancements to `.algokit.toml` enable the cli to:**\n\n- Navigate and interpret any project structure compliant with `.algokit.toml`.\n- Simplify interactions with project-specific commands.\n- Dynamically orchestrate command execution based on `.algokit.toml` configurations.\n  - For `algokit project {command_name}` executed at the workspace root, it aggregates and executes `{command_name}` from all child components either `sequentially` or `concurrently`, allowing users to choose the execution order per matching command identifier.\n  - When executed at a component's root, it runs the component-specific command from its `.algokit.toml`, also integrating it into the `algokit project` command group for visibility.\n\n**Suggested Improvements:**\n\n- Consider introducing command dependencies to ensure execution order for any command (in addition to current proposal to allow handling execution order for commands with matching 'names/identifiers'), enhancing the orchestration capabilities of the CLI.\n\n## Reference: Alternatives Considered\n\n### Consolidating AlgoKit Templates into a Monorepo\n\n**Overview:** Explores the possibility of merging AlgoKit templates into a single repository, focusing on streamlined project management and orchestration within js-centric environments.\n\n**Pros:**\n\n- Simplifies monorepo project management and orchestration in npm-based projects.\n\n**Cons:**\n\n- Increases complexity and potential conflicts with existing CLI capabilities and Copier's recommendations, suggesting that a better alternative might be to consider a shift to where we have a more CLI-centric approach that handles orchestration without introducing extra dependant pre-requisites.\n\n## Decision\n\nA phased implementation of both proposals, starting with user-facing enhancements to the `init` wizard followed by template and CLI command orchestration improvements. This strategy allows for immediate impact and addresses urgent needs first, with potential for further simplification through subsequent enhancements.\n\n## Next Steps\n\n1. Break down the selected options into detailed work items.\n2. Implement the proposed changes in the AlgoKit CLI and templates.\n3. Update documentation and onboarding materials to reflect the new changes.\n4. Announce the updates to the community and DevRel, soliciting feedback for future enhancements.\n5. Tackle minor improvements that become feasible following the main proposal implementations.\n\n### Open Questions\n\n1. Should we implement a bidirectional query mechanism where selecting a smart contract prompts the option to add a frontend, and similarly, choosing a frontend first triggers a query about integrating a smart contract? This approach necessitates a minor modification in the wording of the initial query. In the current workflow, we don't prompt adding smart contracts part since this question is prominently positioned as the initial query in the wizard's sequence.\n2. Alternative suggestions for 'type' categorization for `.algokit.toml` files? Or we are clear to proceed with `workspace` to highlight roots of full-stack/monorepo projects, `backend` to represent anything non webapp related and `frontend` to represent anything related?\n"
  },
  {
    "path": "docs/architecture-decisions/2024-01-31_binary_distribution.md",
    "content": "# AlgoKit CLI binary distribution\n\n- **Status**: Approved\n- **Owner:** Altynbek Orumbayev (MakerX)\n- **Deciders**: Alessandro (Algorand Foundation), Rob Moore (MakerX), MakerX team\n- **Date created**: 2024-01-31\n- **Date decided:** 2024-02-01\n- **Date updated**: 2024-02-05\n\n## Context\n\nThe following ADR is a continuation of [native binaries](./2024-01-13_native_binaries.md) ADR. With initial workflows implemented, the goal is to determine the best way to distribute the native binaries to the end users.\n\n## Requirements\n\n- The solution should support a wide variety of Linux distributions, macOS (both Apple Silicon and Intel architectures), and Windows.\n- The solution should allow distribution of native binaries via os specific package managers `brew`, `snap`, `winget`.\n\n## Technical Constraints\n\n- ~~Pyinstaller binaries are dependend on the architecture and the OS. Github Actions mainly support `x86-64` architectures. This means that we need a self hosted runner to build binaries for `arm64` architectures OR use an alternative CI provider which has access to `arm64` and other architectures inside runners.~~ Based on recent announcement made at the date of submission of initial draft of this ADR, GitHub now supports mac os ARM runners on all OSS plans. This means we can target arm64 macos binaries using the default macos runners and potentially reuse them to build arm64 linux binaries using QEMU and/or buildx from Docker. Hence this is no longer a constraint - see [post](https://github.blog/changelog/2024-01-30-github-actions-introducing-the-new-m1-macos-runner-available-to-open-source/).\n- Codesigning is a recommended practice for secure distribution and would need to be implemented on top of the initial workflows introduced as part of the implementation of the [native binaries](./2024-01-13_native_binaries.md) ADR.\n\n## Options\n\n### Option 1 - Binaries are only available via dedicated package managers using OSS solutions for multi architecture support\n\nThis approach assumes that native binaries are never available for direct consumption but instead extra tailored per each selected package manager. Additionally this approach assumes using OSS solutions for multi architecture support. ~~Primarily using a QEMU/buildx based actions to build the binaries for different linux architectures, using paid m1 github runner for arm macos binaries, using default macos runner for x86 macos binaries and using default windows runner for x86-64 windows binaries~~. Based on recent announcement from [GitHub](https://github.blog/changelog/2024-01-30-github-actions-introducing-the-new-m1-macos-runner-available-to-open-source/) - ARM runners on mac are now available on all OSS plans, given that we agreed that we arent support ARM linux, mac on ARM can be compiled on ARM mac runners and windows can be compiled and targeting x86-64 using default runners. **Hence removing all major constrains and difficulties of this approach**.\n\nDiagram:\n\n```mermaid\nflowchart\n  n1[\"Release pipeline flow\"]\n\tn1 --- n2[\"Python wheel build & test validation, runs on [3.10, 3.11, 3.12 on windows, mac, ubuntu]\"]\n\tn2 --- n3[\"Build binaries\"]\n\tn3 --- n4[\"Linux\"]\n\tn3 --- n5[\"Mac\"]\n\tn3 --- n6[\"Windows\"]\n\tn4 --- n7[\"ubuntu-latest x86-64\"]\n\tn7 --- n9[\"Upload artifacts\"]\n\tn5 --- n10[\"macos-latest x86-64\"]\n\tn5 --- n11[\"macos-large-latest ARM\"]\n\tn6 --- n12[\"windows-latest (x86-64)\"]\n\tn10 --- n9\n\tn11 --- n9\n\tn12 --- n9\n\tn9 --- n13[\"Distribution\"]\n\tn13 --- n14[\"pypi pure python wheel\"]\n\tn13 --- n15[\"snap\"]\n\tn13 --- n16[\"brew\"]\n\tn13 --- n17[\"winget\"]\n\tn17 --- n18[\"refresh manifest\"]\n\tn18 --- n19[\"submit release PR to winget community repo\"]\n\tn16 --- n20[\"refresh cask definition\"]\n\tn20 --- n21[\"submit pr to algorandfoundation/tap\"]\n\tn15 --- n22[\"x86-64\"]\n\tn22 --- n23[\"build and submit x86-64 snap on ubuntu-latest runner\"]\n\tn23 --- n24[\"publish snap\"]\n```\n\n#### General pros and cons\n\n**Pros**\n\n- Ability to rely on package manager to handle installation, updates and removal of the binaries\n- OSS solutions for multi architecture support are available and can be used to build the binaries for different architectures on Linux using QEMU\n\n**Cons**\n\n- Assuming we are targeting x86-64/ARM64 on mac, x86-64 on Windows and x86-64 on Linux, no major cons are identified with this approach other than certain non deteministic factos around initial setup of Apple dev account, approval of PR on winget repo and setup of the Snapstore profile.\n\n#### Snap\n\n**Pros**\n\n- Snap is available on all major Linux distributions\n- Ubuntu provides a Launchpad platform that simplifies compiling snaps on different architectures remotely\n- Snap supports distribution of pyinstaller binaries (verified in a PoC on a fork of algokit-cli)\n- Has flexible configuration options over release channels (edge, beta, release candidate, production/stable).\n\n**Cons**\n\n- ~~Snap provides a native support for pyhon applications which may be simpler to use than pyinstaller rather than distributing the binaries as it allows us to rely directly on remote Launchpad builds instead of pre-building the binaries per each architecture.~~ **No longer an issue given latest decision not to support ARM64 linux given that pipx is still gonna be a first class citizen alternative.**\n- ~~If we are to distribute pyinstaller binaries, the binaries itself need to be cross compiled on target architectures. Currently we get `x86-64` binaries with `ubuntu` runners, however we would need to introduce extra self hosted runners to get `arm64` binaries. In this case we would need to run building of binaries AND building of snaps in build matrices consiting of default `ubuntu` runners and self hosted `arm64` runners. This will increase the build time and complexity of the build process.~~ **No longer an issue given latest decision not to support ARM64 linux given that pipx is still gonna be a first class citizen alternative.**\n\n#### Brew\n\n**Pros**\n\n- A flow for distributing algokit wheel via `brew` is already established\n- Brew supports distribution of pyinstaller binaries (verified in a PoC on a fork of algokit-cli)\n- Will require minor changes in the existing brew workflow to operate with binary artifacts instead of wheel artifacts\n\n**Cons**\n\n- ~~Algokit cli relies on dependencies that are not [`fat` binaries](https://en.wikipedia.org/wiki/Fat_binary). This means we can't use pyinstaller to target `universal2` architecture and instead need to build the binaries for each architecture separately. Hence using a paid ARM macos runner is a simple solution to get binaries for Apple Silicon.~~ **No longer an issue given announcement from Github that ARM runners are now available on all OSS plans.**\n- Codesigning is required for distribution of binaries via `brew`. This means that we need to have a valid Apple Developer account and a valid certificate to sign the binaries. While this is a good practice regardless, listing this as a con given non deterministic nature of obtaining a valid certificate from Apple.\n- ~~Separate ARM worker for apple silicon binaries is required. Github provides beta version of such runners for with paid billing plans.~~ **No longer paid given recent announcement from Github.**\n\n#### Winget\n\n**Pros**\n\n- Winget is available on all major Windows distributions\n- Winget supports distribution of pyinstaller binaries and in fact it does not support distribution of python wheels, making binaries a good candidate for winget.\n- Will require minor changes in the existing brew workflow to convert pyinstaller .exe binaries to winget .msi binaries\n\n**Cons**\n\n- Winget requires contributing the manifest file to an open source repository which may cause potential delays in the distribution of the binaries as each PR needs to be reviewed and approved by the maintainers of the repository.\n\n#### Conclusion\n\nAll of the above package managers are viable and can be used to distribute the pyinstaller build binaries. ~~Requirement on supporting additional architectures like `arm64` introduce unique challenges that ideally should be addressed by introducing custom self hosted runners to the build matrix. This will increase the complexity of the build process and will require additional maintenance of the runners.~~ **Given recent decisions around ARM linux support, we can safely assume that this is the most balanced approach that will allow us to distribute the binaries for all supported architectures without introducing additional complexity of maintaining self hosted runners or implementing an in-house self-update mechanism for binaries.**\n\n### Option 2 - Binaries are only available via dedicated package managers using self hosted runners for multi architecture support\n\nThis is identical to the option 1 with the exception that we are using self hosted runners to build the binaries for different architectures.\n\nDiagram:\n\n```mermaid\nflowchart\n    n1[\"Release pipeline flow\"]\n\tn1 --- n2[\"Python wheel build &amp; test validation (runs on [3.10, 3.11, 3.12 on windows, mac, ubuntu]\"]\n\tn2 --- n3[\"Build binaries\"]\n\tn3 --- n4[\"Linux\"]\n\tn3 --- n5[\"Mac\"]\n\tn3 --- n6[\"Windows\"]\n\tn4 --- n7[\"self-hosted aarch64-ubuntu22.04\"]\n\tn4 --- n8[\"self-hosted armv7-ubuntu22.04\"]\n\tn7 --- n9[\"Upload artifacts\"]\n\tn5 --- n10[\"macos-latest x86-64\"]\n\tn5 --- n11[\"macos-large-latest ARM (paid worker)\"]\n\tn6 --- n12[\"windows-latest (x86-64)\"]\n\tn8 --- n9\n\tn10 --- n9\n\tn11 --- n9\n\tn12 --- n9\n\tn9 --- n13[\"Distribution\"]\n\tn13 --- n14[\"pypi pure python wheel\"]\n\tn13 --- n15[\"snap\"]\n\tn13 --- n16[\"brew\"]\n\tn13 --- n17[\"winget\"]\n\tn17 --- n18[\"refresh manifest\"]\n\tn18 --- n19[\"submit release PR to winget community repo \"]\n\tn16 --- n20[\"refresh cask definition (can use conditional that auto picks either ARM or Intel based artifacts)\"]\n\tn20 --- n21[\"submit pr to algorandfoundation/tap\"]\n\tn15 --- n22[\"aarch64\"]\n\tn15 --- n23[\"armv7\"]\n\tn22 --- n24[\"build snap on self hosted runner for aarch64 architecture\"]\n\tn23 --- n25[\"build snap on self hosted runner for armv7 architecture\"]\n\tn24 --- n26[\"publish snap\"]\n\tn25 --- n26\n```\n\n#### General pros and cons\n\n**Pros**\n\n- Simplified build matrix as we can simply define additional `runs-on` for each architecture we want to support to target our custom self hosted runners\n- Same pros as option 1\n\n**Cons**\n\n- The main drawback for self hosted runners is requirements on additional maintenance, very careful configuration and security considerations. This is a non trivial task and will require additional resources to maintain implement. Github itself generally does not recommend using them for public repositories as forked repositories can potentially gain access to the self hosted runners. There is a lot of workarounds to this issue, but it is still a non trivial task to implement and maintain.\n\n### Option 3 - Binaries are available for direct consumption as self contained executables\n\nThis approach assumes that native binaries are available for direct consumption as self contained executables. This means that the binaries are not distributed via package managers but instead are available for direct download from the Algorand Foundation website/dedicated installer script that needs to be introduce. The script can figure out the operating system, architecture and pull the correct binary from public github releases page.\n\nDiagram:\n\n```mermaid\nflowchart\n    n1[\"Release pipeline flow\"]\n\tn1 --- n2[\"Python wheel build & test validation (runs on [3.10, 3.11, 3.12 on windows, mac, ubuntu]\"]\n\tn2 --- n3[\"Build binaries\"]\n\tn3 --- n4[\"Linux\"]\n\tn3 --- n5[\"Mac\"]\n\tn3 --- n6[\"Windows\"]\n\tn4 --- n7[\"ubuntu-latest\"]\n\tn7 --- n9[\"Upload artifacts\"]\n\tn5 --- n10[\"macos-latest x86-64\"]\n\tn5 --- n11[\"macos-large-latest ARM\"]\n\tn6 --- n12[\"windows-latest (x86-64)\"]\n\tn10 --- n9\n\tn11 --- n9\n\tn12 --- n9\n\tn9 --- n13[\"Distribution\"]\n\tn13 --- n14[\"pypi pure python wheel\"]\n\tn13 --- n15[\"binaries\"]\n\tn15 --- n16[\"Windows\"]\n\tn15 --- n17[\"Linux\"]\n\tn15 --- n18[\"Mac\"]\n\tn16 --- n19[\"Transform to msi installer\"]\n\tn17 --- n20[\"codesign\"]\n\tn19 --- n20\n\tn18 --- n21[\"transform to .pkg installer\"]\n\tn21 --- n20\n\tn20 --- n22[\"append to github release\"]\n```\n\n#### General pros and cons\n\n**Pros**\n\n- Ability to distribute binaries for all supported architectures without extra complexity of maintaining distributions via package managers `brew`, `snap`, `winget`\n- Self update mechanism can be implemented within the algokit cli to check for updates and pull newer versions of the binaries. This will allow users to always have the latest version of the binaries without the need to wait for the package manager to update the binaries.\n\n**Cons**\n\n- Users who prefer to use package managers will need to manually install the binaries and keep track of the updates\n- Self update mechanism will require additional maintenance and testing to ensure correct handling of the updates\n\n### Option 4 - Binaries are available for direct consumption as self contained executables, dedicated package managers distribute the wheels\n\nThis is potentially the most complex approach and it combines option 1 and option 3. This means that we are distributing the binaries for direct consumption as self contained executables and additionally we are distributing the wheels via package managers.\n\n**Pros**\n\n- Ability to distribute binaries for all supported architectures without extra complexity of maintaining distributions via package managers `brew`, `snap`, `winget`\n- Ability to rely on python optimizations in dedicated package managers like `snap` and `brew` to distribute the wheel artifacts\n\n**Cons**\n\n- **The major drawback with the approach** is that it does not eliminate dependency on python to run the algokit cli when using package managers which goes against initial goals of this outcome (removing requirement on having python installed on user's machine)\n- Self update mechanism will require additional maintenance and testing to ensure correct handling of the updates and is at risk of not being used that often if users will still primarily rely on package managers or `pipx` to install the algokit cli\n- Deminishes the value of having self contained binaries as users will still need to have python installed on their machines to run the algokit cli if they prefer to use package managers\n\n## Preferred option\n\n### Notes from discussion on 2024-02-01\n\n- We are not supporting Windows ARM and Ubuntu ARM\n- We are not using self-hosted runners approach\n- Pending on key decision from Algorand Foundation on whether we want to have brew, snap, winget distribution vs self contained binary executables.\n\nBased on the above, the most balanced option in terms of user experience and maintenance complexity is **Option 1**. This option allows us to distribute the binaries for all supported architectures without introducing additional complexity of maintaining self hosted runners or implementing an in-house self-update mechanism for binaries. Additionally each individual package manager has unique capabilities simplifying aggregation of metrics and controlling the release process.\n\n## Selected option\n\nOption 1\n\n## Next Steps\n\n1. Splitting ADR into self contained work items to parallelize the implementation of the selected option\n2. Productizing PoCs implemented as part of this ADR to finalize integrations with `winget`, `snap` and `brew`\n3. Aggregating requirements for obtaining accesses to the package managers and developer certificates for codesigning\n4. Implementing codesigning mechanisms and finalizing implementation with detailed documentation of new installation options for users\n\n## Open questions\n\n~~Do we want to build the binaries for ARM based windows machines? If so, this implies that we need to introduce self hosted runners for windows as well given that there seems to be no OSS options to build the binaries for ARM based windows machines in Github Actions.~~ Clarified by decision to not support ARM versions of linux and windows given alternative options like pipx still being available as a first class citizen.\n"
  },
  {
    "path": "docs/architecture-decisions/2024-03-06_local_dev_ui_packaging.md",
    "content": "# Local dev UI packaging\n\n- **Status**: Draft\n- **Owner:** Patrick Dinh (MakerX), Negar Abbasi (MakerX)\n- **Deciders**: Alessandro (Algorand Foundation), Rob Moore (MakerX), MakerX team\n- **Date created**: 2024-03-06\n\n## Context\n\nWe are building a web-based local development interface to support:\n\n- Exploring transactions, assets and accounts\n- Visualising transactions\n- Launching a VS Code debug session from an application call transaction\n- Integrating and using a dev wallet via KMD\n- Network switching between LocalNet, TestNet and MainNet\n- Calling deployed ABI apps\n- Integration with the AlgoKit CLI to perform any relevant actions\n- Launching the interface from the AlgoKit CLI\n\n## Requirements\n\nThe local development interface should have:\n\n- Support for a wide variety of Linux distributions, macOS (both Apple Silicon and Intel architectures), and Windows 10+.\n- Local system access to:\n  - The user home directory on the file system.\n  - Launch child processes.\n  - Launch the UI from the AlgoKit CLI.\n  - Launch shell commands.\n- The ability for the explorer portion to be deployed to a static web host.\n- The ability to be installed via the following channels:\n  - Winget for Windows.\n  - Homebrew for macOS.\n  - Snapcraft for Linux.\n- The ability for users to see a notification when a new version is available and can update.\n\n## Out of scope\n\n- Support for ARM processors on Linux or Windows.\n\n## Options\n\n### Option 1 - Electron\n\n[Electron](https://www.electronjs.org/) is a framework for creating native applications with web technologies like JavaScript, HTML, and CSS. It allows developers to build cross-platform desktop apps using their existing web development skills.\n\nLink to PoC is here: [Electron PoC](https://github.com/negar-abbasi/electron-poc).\n\n**Pros**\n\n- Electron is a mature framework with a large community and a lot of resources available.\n- Uses standard JavaScript and Node APIs, which most developers are very familiar with.\n- It supports all the local system access requirements via [icpMain](https://www.electronjs.org/docs/latest/api/ipc-main), allowing asynchronous communication from the main process to renderer processes.\n  - File system access is enabled using the Node.js `fs` module. See [Node.js File System (fs) module docs](https://nodejs.org/api/fs.html).\n  - Launching processes is enabled using the Node.js `child_process` module to spawn new processes. [Node.js Child Processes](https://nodejs.org/api/child_process.html). Specifically, well use the `spawn` or `exec` functions.\n  - Running shell commands is enabled via the Node.js `child_process` module's `exec` function.\n- Electron supports an [auto update](https://www.electronjs.org/docs/latest/api/auto-updater) for windows and macOS only. For Linux, if the explorer is distributed via Snapcraft, it should get auto updated.\n- Electron does not have any built in tooling for packaging and distribution. There are however several third-party tools available for packaging and distribution, such as [electron-builder](https://www.electron.build/), [electron-packager](https://www.npmjs.com/package/electron-packager), and [electron-forge](https://www.electronforge.io/).\n- Electron Forge is an all-in-one tool that handles the packaging and distribution of Electron apps. Under the hood, it combines a lot of existing Electron tools (e.g. @electron/packager, @electron/osx-sign, electron-winstaller, etc.) into a single interface so we do not have to worry about wiring them all together. [docs](https://www.electronjs.org/docs/latest/tutorial/tutorial-packaging#using-electron-forge)\n  - It can package the app into format that we are interested in:\n    - `.deb`, `.snap` for Linux\n    - `.msi` for Windows\n    - `.dmg` for macOS\n\n**Cons**\n\n- Electron is resource hungry, for a small test app (Hello World) it uses 146.0 MB of memory and 0.47% of CPU (running on macOS M2 CPU with 12 cores and 16GB RAM)\n- When built on a local dev machine, the package size for macOS is ~250MB.\n\n### Option 2 - Tauri\n\n[Tauri](https://tauri.app/about/intro) is a toolkit that helps developers make applications for the major desktop platforms - using virtually any frontend framework in existence. The core is built with Rust, and the CLI leverages Node.js making Tauri a genuinely polyglot approach to creating and maintaining great apps.\n\n**Pros**\n\n- Tauri supports all requirements for the local development interface via their JavaScript API without the need to write any Rust.\n- It can manage the [file systems](https://tauri.app/v1/api/js/fs), launch another [process](https://tauri.app/v1/api/js/process), run a [shell command](https://tauri.app/v1/api/js/shell).\n- Tauri integrates well with major web frameworks. [`create-tauri-app`](https://github.com/tauri-apps/create-tauri-app) and a good template project can be bootstrapped and working within minutes.\n\n  - Once bootstrapped, the web app can be bundled individually and deployed as a website. Below is the `npm script` Tauri generates for a Vite project, we can see that it supports `vite build`\n\n  ```json\n  \"scripts\": {\n    \"dev\": \"vite\",\n    \"build\": \"tsc && vite build\",\n    \"preview\": \"vite preview\",\n    \"tauri\": \"tauri\"\n  },\n  ```\n\n- For [compiling binaries](https://tauri.app/v1/guides/building/), the Tauri Bundler supports\n  - Windows: setup.exe, .msi\n  - macOS: .app, .dmg\n  - Linux: .deb, .appimage\n- The Tauri Bundler supports code signing for:\n  - [Windows](https://tauri.app/v1/guides/distribution/sign-windows)\n  - [Linux](https://tauri.app/v1/guides/distribution/sign-linux)\n  - [macOS](https://tauri.app/v1/guides/distribution/sign-macos)\n- Tauri offers a [built-in updater](https://tauri.app/v1/guides/distribution/updater) for the NSIS (Windows), MSI (Windows), AppImage (Linux) and App bundle (macOS) distribution formats.\n- Tauri is reasonably efficient, for a small test app (Hello World) it uses 30 MB of RAM and 0.1% CPU (running on macOS M1 CPU and 32GB RAM).\n- When built on a local dev machine, the package size for macOS is ~5MB.\n\n**Cons**\n\n- If we need to extend the functionality beyond the support of Tauri's JavaScript API, we will need to write the code in Rust, which would be a new language in the AlgoKit ecosystem and a less common skill in the team.\n- At the point of writing, building with `snap` (for Linux) isn't officially supported by Tauri. There is a open [PR](https://github.com/tauri-apps/tauri/pull/6532). We can however support snap by packaging the Linux build output ourselves.\n- Tauri relies on [Webview](https://tauri.app/v1/references/webview-versions/) which are not the same across platforms. This means that we'll need to perform more testing on the styling and rendering, to ensure a consistent experience across the different platform Webviews and the supported versions.\n  - For reference, [here](https://github.com/tauri-apps/tauri/issues?q=is%3Aissue+webview+css) are Tauri's issues related to CSS.\n- For some versions of Windows 10, WebView2 needs to be installed. This process requires internet connection whilst installing.\n\n### Option 3 - Wails\n\n[Wails](https://wails.io/) is similar to Tauri but the core is written in Go.\n\n**Pros**\n\n- Wails has init templates for major web framework. React + TypeScript + Vite is supported.\n- Wails has an auto codegen to generate the contract between the main process and the renderer process.\n- Wails doesn't have built-in code signing for Windows and Mac. However, the document on how to do code signing with GitHub actions is very detailed.\n- Wails is reasonably efficient, for a small test app (Hello World) it uses 30 MB of RAM and 0.1% CPU (running on macOS M1 CPU and 32GB RAM).\n- When built on a local dev machine, the package size for macOS is ~5MB.\n\n**Cons**\n\n- Documentation isn't as comprehensive as Electron and Tauri. Because of this, I didn't investigate much further into Wails. Tauri seems to be a more supported project, Wails doesn't give us anything additional.\n- The code to interact with file systems, shell and child processes will be written in Go, which would be a new language in the AlgoKit ecosystem and a less common skill in the team.\n- No built-in updater. It is tracked in this [issue](https://github.com/wailsapp/wails/issues/1178).\n- Wails is based on WebView, therefore, it has the same cross-platform issues with Tauri.\n- Wails supports building for Windows, Mac and Linux. The documentation however isn't super clear:\n  - I could build Windows binaries from Mac.\n  - I couldn't build Linux binaries from Mac.\n  - The document doesn't mention options to build installers.\n\n## Preferred option\n\n- **Option2** Tauri is the preferred option because it is well documented and has a big community behind it. Tauri supports all of our use cases and is less resource hungry than Electron.\n\n## Selected option\n\nOption 2\n\nGiven the good community support, great docs, low resource consumption and not needing to write much (if any) Rust, Tauri (Option 2) appears to fit our needs very well.\n"
  },
  {
    "path": "docs/articles/output_stability.md",
    "content": "# Smart Contract Output Stability\n\nSmart contracts development is analogous to low level firmware software development; it's a highly constrained environment in terms of both compute power and memory storage, with a high risk of vulnerabilities due to lower level access to memory and less developer-oriented security tooling. \nBecause of this, the assembly language code that is output for a smart contract is important - a seemingly innocuous minor change could inadvertently add a security vulnerability, or could significantly change the execution and memory profile. \nAs such it is important to ensure that, even if higher level code is refactored, there are no unintended changes to the generated smart contract assembly language output. \nWe refer to this property as **output stability**.\n\nWe recommend having \"output stability tests\" that require a developer to explicitly opt-in to accepting a change in the output of a smart contract's assembly code. This can be implemented as part of an automated build process which fails if the output changes aren't committed to source control, thus preventing deployment of the smart contract without a human review taking place (assuming automated deployment).\n"
  },
  {
    "path": "docs/cli/index.md",
    "content": "# AlgoKit CLI Reference Documentation\n\n\n- [algokit](#algokit)\n    - [Options](#options)\n    - [--version](#--version)\n    - [-v, --verbose](#-v---verbose)\n    - [--color, --no-color](#--color---no-color)\n    - [--skip-version-check](#--skip-version-check)\n  - [compile](#compile)\n    - [Options](#options-1)\n    - [-v, --version ](#-v---version-)\n    - [py](#py)\n    - [Arguments](#arguments)\n    - [PUYAPY_ARGS](#puyapy_args)\n    - [python](#python)\n    - [Arguments](#arguments-1)\n    - [PUYAPY_ARGS](#puyapy_args-1)\n    - [ts](#ts)\n    - [Arguments](#arguments-2)\n    - [PUYATS_ARGS](#puyats_args)\n    - [typescript](#typescript)\n    - [Arguments](#arguments-3)\n    - [PUYATS_ARGS](#puyats_args-1)\n  - [completions](#completions)\n    - [install](#install)\n    - [Options](#options-2)\n    - [--shell ](#--shell-)\n    - [uninstall](#uninstall)\n    - [Options](#options-3)\n    - [--shell ](#--shell--1)\n  - [config](#config)\n    - [container-engine](#container-engine)\n    - [Options](#options-4)\n    - [-f, --force](#-f---force)\n    - [Arguments](#arguments-4)\n    - [ENGINE](#engine)\n    - [js-package-manager](#js-package-manager)\n    - [Arguments](#arguments-5)\n    - [PACKAGE_MANAGER](#package_manager)\n    - [py-package-manager](#py-package-manager)\n    - [Arguments](#arguments-6)\n    - [PACKAGE_MANAGER](#package_manager-1)\n    - [version-prompt](#version-prompt)\n    - [Arguments](#arguments-7)\n    - [ENABLE](#enable)\n  - [dispenser](#dispenser)\n    - [fund](#fund)\n    - [Options](#options-5)\n    - [-r, --receiver ](#-r---receiver-)\n    - [-a, --amount ](#-a---amount-)\n    - [--whole-units](#--whole-units)\n    - [limit](#limit)\n    - [Options](#options-6)\n    - [--whole-units](#--whole-units-1)\n    - [login](#login)\n    - [Options](#options-7)\n    - [--ci](#--ci)\n    - [-o, --output ](#-o---output-)\n    - [-f, --file ](#-f---file-)\n    - [logout](#logout)\n    - [refund](#refund)\n    - [Options](#options-8)\n    - [-t, --txID ](#-t---txid-)\n  - [doctor](#doctor)\n    - [Options](#options-9)\n    - [-c, --copy-to-clipboard](#-c---copy-to-clipboard)\n  - [explore](#explore)\n    - [Arguments](#arguments-8)\n    - [NETWORK](#network)\n  - [generate](#generate)\n    - [client](#client)\n    - [Options](#options-10)\n    - [-o, --output ](#-o---output--1)\n    - [-l, --language ](#-l---language-)\n    - [-v, --version ](#-v---version--1)\n    - [Arguments](#arguments-9)\n    - [APP_SPEC_PATH_OR_DIR](#app_spec_path_or_dir)\n    - [ARGS](#args)\n  - [goal](#goal)\n    - [Options](#options-11)\n    - [--console](#--console)\n    - [--interactive](#--interactive)\n    - [Arguments](#arguments-10)\n    - [GOAL_ARGS](#goal_args)\n  - [init](#init)\n    - [Options](#options-12)\n    - [-n, --name ](#-n---name-)\n    - [-t, --template ](#-t---template-)\n    - [--template-url ](#--template-url-)\n    - [--template-url-ref ](#--template-url-ref-)\n    - [--UNSAFE-SECURITY-accept-template-url](#--unsafe-security-accept-template-url)\n    - [--git, --no-git](#--git---no-git)\n    - [--defaults](#--defaults)\n    - [--bootstrap, --no-bootstrap](#--bootstrap---no-bootstrap)\n    - [--ide, --no-ide](#--ide---no-ide)\n    - [--workspace, --no-workspace](#--workspace---no-workspace)\n    - [-a, --answer  ](#-a---answer--)\n    - [example](#example)\n    - [Options](#options-13)\n    - [-l, --list](#-l---list)\n    - [Arguments](#arguments-11)\n    - [EXAMPLE_ID](#example_id)\n  - [localnet](#localnet)\n    - [codespace](#codespace)\n    - [Options](#options-14)\n    - [-m, --machine ](#-m---machine-)\n    - [-a, --algod-port ](#-a---algod-port-)\n    - [-i, --indexer-port ](#-i---indexer-port-)\n    - [-k, --kmd-port ](#-k---kmd-port-)\n    - [-n, --codespace-name ](#-n---codespace-name-)\n    - [-r, --repo-url ](#-r---repo-url-)\n    - [-t, --timeout ](#-t---timeout-)\n    - [-f, --force](#-f---force-1)\n    - [config](#config-1)\n    - [Options](#options-15)\n    - [-f, --force](#-f---force-2)\n    - [Arguments](#arguments-12)\n    - [ENGINE](#engine-1)\n    - [console](#console)\n    - [explore](#explore-1)\n    - [logs](#logs)\n    - [Options](#options-16)\n    - [--follow, -f](#--follow--f)\n    - [--tail ](#--tail-)\n    - [reset](#reset)\n    - [Options](#options-17)\n    - [--update, --no-update](#--update---no-update)\n    - [-P, --config-dir ](#-p---config-dir-)\n    - [--check](#--check)\n    - [start](#start)\n    - [Options](#options-18)\n    - [-n, --name ](#-n---name--1)\n    - [-P, --config-dir ](#-p---config-dir--1)\n    - [-d, --dev, --no-dev](#-d---dev---no-dev)\n    - [--force](#--force)\n    - [--check](#--check-1)\n    - [status](#status)\n    - [Options](#options-19)\n    - [--check](#--check-2)\n    - [stop](#stop)\n  - [project](#project)\n    - [bootstrap](#bootstrap)\n    - [Options](#options-20)\n    - [--force](#--force-1)\n    - [Options](#options-21)\n    - [--interactive, --no-ci, --non-interactive, --ci](#--interactive---no-ci---non-interactive---ci)\n    - [-p, --project-name ](#-p---project-name-)\n    - [-t, --type ](#-t---type-)\n    - [Options](#options-22)\n    - [--interactive, --non-interactive, --ci](#--interactive---non-interactive---ci)\n    - [Options](#options-23)\n    - [--ci, --no-ci](#--ci---no-ci)\n    - [Options](#options-24)\n    - [--ci, --no-ci](#--ci---no-ci-1)\n    - [deploy](#deploy)\n    - [Options](#options-25)\n    - [-C, -c, --command ](#-c--c---command-)\n    - [--interactive, --non-interactive, --ci](#--interactive---non-interactive---ci-1)\n    - [-P, --path ](#-p---path-)\n    - [--deployer ](#--deployer-)\n    - [--dispenser ](#--dispenser-)\n    - [-p, --project-name ](#-p---project-name--1)\n    - [Arguments](#arguments-13)\n    - [ENVIRONMENT_NAME](#environment_name)\n    - [EXTRA_ARGS](#extra_args)\n    - [link](#link)\n    - [Options](#options-26)\n    - [-p, --project-name ](#-p---project-name--2)\n    - [-l, --language ](#-l---language--1)\n    - [-a, --all](#-a---all)\n    - [-f, --fail-fast](#-f---fail-fast)\n    - [-v, --version ](#-v---version--2)\n    - [list](#list)\n    - [Arguments](#arguments-14)\n    - [WORKSPACE_PATH](#workspace_path)\n    - [run](#run)\n  - [task](#task)\n    - [analyze](#analyze)\n    - [Options](#options-27)\n    - [-r, --recursive](#-r---recursive)\n    - [--force](#--force-2)\n    - [--diff](#--diff)\n    - [-o, --output ](#-o---output--2)\n    - [-e, --exclude ](#-e---exclude-)\n    - [Arguments](#arguments-15)\n    - [INPUT_PATHS](#input_paths)\n    - [ipfs](#ipfs)\n    - [Options](#options-28)\n    - [-f, --file ](#-f---file--1)\n    - [-n, --name ](#-n---name--2)\n    - [mint](#mint)\n    - [Options](#options-29)\n    - [--creator ](#--creator-)\n    - [--name ](#--name-)\n    - [-u, --unit ](#-u---unit-)\n    - [-t, --total ](#-t---total-)\n    - [-d, --decimals ](#-d---decimals-)\n    - [--nft, --ft](#--nft---ft)\n    - [-i, --image ](#-i---image-)\n    - [-m, --metadata ](#-m---metadata-)\n    - [--mutable, --immutable](#--mutable---immutable)\n    - [-n, --network ](#-n---network-)\n    - [nfd-lookup](#nfd-lookup)\n    - [Options](#options-30)\n    - [-o, --output ](#-o---output--3)\n    - [Arguments](#arguments-16)\n    - [VALUE](#value)\n    - [opt-in](#opt-in)\n    - [Options](#options-31)\n    - [-a, --account ](#-a---account-)\n    - [-n, --network ](#-n---network--1)\n    - [Arguments](#arguments-17)\n    - [ASSET_IDS](#asset_ids)\n    - [opt-out](#opt-out)\n    - [Options](#options-32)\n    - [-a, --account ](#-a---account--1)\n    - [--all](#--all)\n    - [-n, --network ](#-n---network--2)\n    - [Arguments](#arguments-18)\n    - [ASSET_IDS](#asset_ids-1)\n    - [send](#send)\n    - [Options](#options-33)\n    - [-f, --file ](#-f---file--2)\n    - [-t, --transaction ](#-t---transaction-)\n    - [-n, --network ](#-n---network--3)\n    - [sign](#sign)\n    - [Options](#options-34)\n    - [-a, --account ](#-a---account--2)\n    - [-f, --file ](#-f---file--3)\n    - [-t, --transaction ](#-t---transaction--1)\n    - [-o, --output ](#-o---output--4)\n    - [--force](#--force-3)\n    - [transfer](#transfer)\n    - [Options](#options-35)\n    - [-s, --sender ](#-s---sender-)\n    - [-r, --receiver ](#-r---receiver--1)\n    - [--asset, --id ](#--asset---id-)\n    - [-a, --amount ](#-a---amount--1)\n    - [--whole-units](#--whole-units-2)\n    - [-n, --network ](#-n---network--4)\n    - [vanity-address](#vanity-address)\n    - [Options](#options-36)\n    - [-m, --match ](#-m---match-)\n    - [-o, --output ](#-o---output--5)\n    - [-a, --alias ](#-a---alias-)\n    - [--file-path ](#--file-path-)\n    - [-f, --force](#-f---force-3)\n    - [Arguments](#arguments-19)\n    - [KEYWORD](#keyword)\n    - [wallet](#wallet)\n    - [Options](#options-37)\n    - [-a, --address ](#-a---address-)\n    - [-m, --mnemonic](#-m---mnemonic)\n    - [-f, --force](#-f---force-4)\n    - [Arguments](#arguments-20)\n    - [ALIAS_NAME](#alias_name)\n    - [Arguments](#arguments-21)\n    - [ALIAS](#alias)\n    - [Options](#options-38)\n    - [-f, --force](#-f---force-5)\n    - [Arguments](#arguments-22)\n    - [ALIAS](#alias-1)\n    - [Options](#options-39)\n    - [-f, --force](#-f---force-6)\n\n# algokit\n\nAlgoKit is your one-stop shop to develop applications on the Algorand blockchain.\n\nIf you are getting started, please see the quick start tutorial: [https://dev.algorand.co/getting-started/algokit-quick-start/](https://dev.algorand.co/getting-started/algokit-quick-start/).\n\n```shell\nalgokit [OPTIONS] COMMAND [ARGS]...\n```\n\n### Options\n\n\n### --version\nShow the version and exit.\n\n\n### -v, --verbose\nEnable logging of DEBUG messages to the console.\n\n\n### --color, --no-color\nForce enable or disable of console output styling.\n\n\n### --skip-version-check\nSkip version checking and prompting.\n\n## compile\n\nCompile smart contracts and smart signatures written in a supported high-level language\nto a format deployable on the Algorand Virtual Machine (AVM).\n\n```shell\nalgokit compile [OPTIONS] COMMAND [ARGS]...\n```\n\n### Options\n\n\n### -v, --version <version>\nThe compiler version to pin to, for example, 1.0.0. If no version is specified, AlgoKit checks if the compiler is installed and runs the installed version. If the compiler is not installed, AlgoKit runs the latest version. If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. Otherwise, AlgoKit runs the specified version.\n\n### py\n\nCompile Algorand Python contract(s) using the PuyaPy compiler.\n\n```shell\nalgokit compile py [OPTIONS] [PUYAPY_ARGS]...\n```\n\n### Arguments\n\n\n### PUYAPY_ARGS\nOptional argument(s)\n\n### python\n\nCompile Algorand Python contract(s) using the PuyaPy compiler.\n\n```shell\nalgokit compile python [OPTIONS] [PUYAPY_ARGS]...\n```\n\n### Arguments\n\n\n### PUYAPY_ARGS\nOptional argument(s)\n\n### ts\n\nCompile Algorand TypeScript contract(s) using the PuyaTs compiler.\n\n```shell\nalgokit compile ts [OPTIONS] [PUYATS_ARGS]...\n```\n\n### Arguments\n\n\n### PUYATS_ARGS\nOptional argument(s)\n\n### typescript\n\nCompile Algorand TypeScript contract(s) using the PuyaTs compiler.\n\n```shell\nalgokit compile typescript [OPTIONS] [PUYATS_ARGS]...\n```\n\n### Arguments\n\n\n### PUYATS_ARGS\nOptional argument(s)\n\n## completions\n\nInstall and Uninstall AlgoKit shell integrations.\n\n```shell\nalgokit completions [OPTIONS] COMMAND [ARGS]...\n```\n\n### install\n\nInstall shell completions, this command will attempt to update the interactive profile script\nfor the current shell to support algokit completions. To specify a specific shell use --shell.\n\n```shell\nalgokit completions install [OPTIONS]\n```\n\n### Options\n\n\n### --shell <shell>\nSpecify shell to install algokit completions for.\n\n\n* **Options**\n\n    bash | zsh\n\n\n### uninstall\n\nUninstall shell completions, this command will attempt to update the interactive profile script\nfor the current shell to remove any algokit completions that have been added.\nTo specify a specific shell use --shell.\n\n```shell\nalgokit completions uninstall [OPTIONS]\n```\n\n### Options\n\n\n### --shell <shell>\nSpecify shell to install algokit completions for.\n\n\n* **Options**\n\n    bash | zsh\n\n\n## config\n\nConfigure settings used by AlgoKit\n\n```shell\nalgokit config [OPTIONS] COMMAND [ARGS]...\n```\n\n### container-engine\n\nSet the default container engine for use by AlgoKit CLI to run LocalNet images.\n\n```shell\nalgokit config container-engine [OPTIONS] [[docker|podman]]\n```\n\n### Options\n\n\n### -f, --force\nSkip confirmation prompts. Defaults to 'yes' to all prompts.\n\n### Arguments\n\n\n### ENGINE\nOptional argument\n\n### js-package-manager\n\nSet the default JavaScript package manager for use by AlgoKit CLI.\n\n```shell\nalgokit config js-package-manager [OPTIONS] [[npm|pnpm]]\n```\n\n### Arguments\n\n\n### PACKAGE_MANAGER\nOptional argument\n\n### py-package-manager\n\nSet the default Python package manager for use by AlgoKit CLI.\n\n```shell\nalgokit config py-package-manager [OPTIONS] [[poetry|uv]]\n```\n\n### Arguments\n\n\n### PACKAGE_MANAGER\nOptional argument\n\n### version-prompt\n\nControls whether AlgoKit checks and prompts for new versions.\nSet to [disable] to prevent AlgoKit performing this check permanently, or [enable] to resume checking.\nIf no argument is provided then outputs current setting.\n\nAlso see --skip-version-check which can be used to disable check for a single command.\n\n```shell\nalgokit config version-prompt [OPTIONS] [[enable|disable]]\n```\n\n### Arguments\n\n\n### ENABLE\nOptional argument\n\n## dispenser\n\nInteract with the AlgoKit TestNet Dispenser.\n\n```shell\nalgokit dispenser [OPTIONS] COMMAND [ARGS]...\n```\n\n### fund\n\nFund your wallet address with TestNet ALGOs.\n\n```shell\nalgokit dispenser fund [OPTIONS]\n```\n\n### Options\n\n\n### -r, --receiver <receiver>\n**Required** Address or alias of the receiver to fund with TestNet ALGOs.\n\n\n### -a, --amount <amount>\n**Required** Amount to fund. Defaults to microAlgos.\n\n\n### --whole-units\nUse whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\n\n### limit\n\nGet information about current fund limit on your account. Resets daily.\n\n```shell\nalgokit dispenser limit [OPTIONS]\n```\n\n### Options\n\n\n### --whole-units\nUse whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\n\n### login\n\nLogin to your Dispenser API account.\n\n```shell\nalgokit dispenser login [OPTIONS]\n```\n\n### Options\n\n\n### --ci\nGenerate an access token for CI. Issued for 30 days.\n\n\n### -o, --output <output_mode>\nChoose the output method for the access token. Defaults to stdout. Only applicable when --ci flag is set.\n\n\n* **Options**\n\n    stdout | file\n\n\n\n### -f, --file <output_filename>\nOutput filename where you want to store the generated access token.Defaults to algokit_ci_token.txt. Only applicable when --ci flag is set and --output mode is file.\n\n### logout\n\nLogout of your Dispenser API account.\n\n```shell\nalgokit dispenser logout [OPTIONS]\n```\n\n### refund\n\nRefund ALGOs back to the dispenser wallet address.\n\n```shell\nalgokit dispenser refund [OPTIONS]\n```\n\n### Options\n\n\n### -t, --txID <tx_id>\n**Required** Transaction ID of your refund operation.\n\n## doctor\n\nDiagnose potential environment issues that may affect AlgoKit.\n\nWill search the system for AlgoKit dependencies and show their versions, as well as identifying any\npotential issues.\n\n```shell\nalgokit doctor [OPTIONS]\n```\n\n### Options\n\n\n### -c, --copy-to-clipboard\nCopy the contents of the doctor message (in Markdown format) in your clipboard.\n\n## explore\n\nExplore the specified network using lora.\n\n```shell\nalgokit explore [OPTIONS] [[localnet|testnet|mainnet]]\n```\n\n### Arguments\n\n\n### NETWORK\nOptional argument\n\n## generate\n\nGenerate code for an Algorand project.\n\n```shell\nalgokit generate [OPTIONS] COMMAND [ARGS]...\n```\n\n### client\n\nCreate a typed ApplicationClient from an ARC-32/56 application.json\n\nSupply the path to an application specification file or a directory to recursively search\nfor \"application.json\" files\n\n```shell\nalgokit generate client [OPTIONS] [APP_SPEC_PATH_OR_DIR] [ARGS]...\n```\n\n### Options\n\n\n### -o, --output <output_path_pattern>\nPath to the output file. The following tokens can be used to substitute into the output path: {contract_name}, {app_spec_dir}\n\n\n### -l, --language <language>\nProgramming language of the generated client code\n\n\n* **Options**\n\n    python | typescript\n\n\n\n### -v, --version <version>\nThe client generator version to pin to, for example, 1.0.0. If no version is specified, AlgoKit checks if the client generator is installed and runs the installed version. If the client generator is not installed, AlgoKit runs the latest version. If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. Otherwise, AlgoKit runs the specified version.\n\n### Arguments\n\n\n### APP_SPEC_PATH_OR_DIR\nOptional argument\n\n\n### ARGS\nOptional argument(s)\n\n## goal\n\nRun the Algorand goal CLI against the AlgoKit LocalNet.\n\nLook at [https://dev.algorand.co/algokit/algokit-cli/goal](https://dev.algorand.co/algokit/algokit-cli/goal) for more information.\n\n```shell\nalgokit goal [OPTIONS] [GOAL_ARGS]...\n```\n\n### Options\n\n\n### --console\nOpen a Bash console so you can execute multiple goal commands and/or interact with a filesystem.\n\n\n### --interactive\nForce running the goal command in interactive mode.\n\n### Arguments\n\n\n### GOAL_ARGS\nOptional argument(s)\n\n## init\n\nInitializes a new project from a template, including prompting\nfor template specific questions to be used in template rendering.\n\nTemplates can be default templates shipped with AlgoKit, or custom\ntemplates in public Git repositories.\n\nIncludes ability to initialise Git repository, run algokit project bootstrap and\nautomatically open Visual Studio Code.\n\nThis should be run in the parent directory that you want the project folder\ncreated in.\n\nBy default, the --workspace flag creates projects within a workspace structure or integrates them into an existing\none, promoting organized management of multiple projects. Alternatively,\nto disable this behavior use the --no-workspace flag, which ensures\nthe new project is created in a standalone target directory. This is\nsuitable for isolated projects or when workspace integration is unnecessary.\n\n```shell\nalgokit init [OPTIONS] COMMAND [ARGS]...\n```\n\n### Options\n\n\n### -n, --name <directory_name>\nName of the project / directory / repository to create.\n\n\n### -t, --template <template_name>\nName of an official template to use. To choose interactively, run this command with no arguments.\n\n\n* **Options**\n\n    tealscript | typescript | python | react | fullstack | base\n\n\n\n### --template-url <URL>\nURL to a git repo with a custom project template.\n\n\n### --template-url-ref <URL>\nSpecific tag, branch or commit to use on git repo specified with --template-url. Defaults to latest.\n\n\n### --UNSAFE-SECURITY-accept-template-url\nAccept the specified template URL, acknowledging the security implications of arbitrary code execution trusting an unofficial template.\n\n\n### --git, --no-git\nInitialise git repository in directory after creation.\n\n\n### --defaults\nAutomatically choose default answers without asking when creating this template.\n\n\n### --bootstrap, --no-bootstrap\nWhether to run algokit project bootstrap to install and configure the new project's dependencies locally.\n\n\n### --ide, --no-ide\nWhether to open an IDE for you if the IDE and IDE config are detected. Supported IDEs: VS Code.\n\n\n### --workspace, --no-workspace\nWhether to prefer structuring standalone projects as part of a workspace. An AlgoKit workspace is a conventional project structure that allows managing multiple standalone projects in a monorepo.\n\n\n### -a, --answer <key> <value>\nAnswers key/value pairs to pass to the template.\n\n### example\n\nInitialize a new project from an example template.\n\nAllows you to quickly create a new project by copying one of the official AlgoKit example templates.\nIf no example ID is provided, launches an interactive selector to choose from available examples.\nThe example will be copied to a new directory in your current location.\n\n```shell\nalgokit init example [OPTIONS] [EXAMPLE_ID]\n```\n\n### Options\n\n\n### -l, --list\nList all available examples\n\n### Arguments\n\n\n### EXAMPLE_ID\nOptional argument\n\n## localnet\n\nManage the AlgoKit LocalNet.\n\n```shell\nalgokit localnet [OPTIONS] COMMAND [ARGS]...\n```\n\n### codespace\n\nManage the AlgoKit LocalNet in GitHub Codespaces.\n\n```shell\nalgokit localnet codespace [OPTIONS]\n```\n\n### Options\n\n\n### -m, --machine <machine>\nThe GitHub Codespace machine type to use. Defaults to base tier.\n\n\n* **Options**\n\n    basicLinux32gb | standardLinux32gb | premiumLinux | largePremiumLinux\n\n\n\n### -a, --algod-port <algod_port>\nThe port for the Algorand daemon. Defaults to 4001.\n\n\n### -i, --indexer-port <indexer_port>\nThe port for the Algorand indexer. Defaults to 8980.\n\n\n### -k, --kmd-port <kmd_port>\nThe port for the Algorand kmd. Defaults to 4002.\n\n\n### -n, --codespace-name <codespace_name>\nThe name of the codespace. Defaults to 'algokit-localnet_timestamp'.\n\n\n### -r, --repo-url <repo_url>\nThe URL of the repository. Defaults to algokit base template repo.\n\n\n### -t, --timeout <timeout_minutes>\nDefault max runtime timeout in minutes. Upon hitting the timeout a codespace will be shutdown to prevent accidental spending over GitHub Codespaces quota. Defaults to 4 hours.\n\n\n### -f, --force\nForce delete previously used codespaces with {CODESPACE_NAME_PREFIX}\\* name prefix and skip prompts. Defaults to explicitly prompting for confirmation.\n\n### config\n\nSet the default container engine for use by AlgoKit CLI to run LocalNet images.\n\n```shell\nalgokit localnet config [OPTIONS] [[docker|podman]]\n```\n\n### Options\n\n\n### -f, --force\nSkip confirmation prompts. Defaults to 'yes' to all prompts.\n\n### Arguments\n\n\n### ENGINE\nOptional argument\n\n### console\n\nRun the Algorand goal CLI against the AlgoKit LocalNet via a Bash console so you can execute multiple goal commands and/or interact with a filesystem.\n\n```shell\nalgokit localnet console [OPTIONS]\n```\n\n### explore\n\nExplore the AlgoKit LocalNet using lora.\n\n```shell\nalgokit localnet explore [OPTIONS]\n```\n\n### logs\n\nSee the output of the Docker containers.\n\n```shell\nalgokit localnet logs [OPTIONS]\n```\n\n### Options\n\n\n### --follow, -f\nFollow log output.\n\n\n### --tail <tail>\nNumber of lines to show from the end of the logs for each container.\n\n\n* **Default**\n\n    `all`\n\n\n### reset\n\nReset the AlgoKit LocalNet.\n\n```shell\nalgokit localnet reset [OPTIONS]\n```\n\n### Options\n\n\n### --update, --no-update\nEnable or disable updating to the latest available LocalNet version, default: don't update\n\n\n### -P, --config-dir <config_path>\nSpecify the custom localnet configuration directory.\n\n\n### --check\nForce check the Docker registry for new LocalNet image versions, ignoring the version check cache.\n\n### start\n\nStart the AlgoKit LocalNet.\n\n```shell\nalgokit localnet start [OPTIONS]\n```\n\n### Options\n\n\n### -n, --name <name>\nSpecify a name for a custom LocalNet instance. AlgoKit will not manage the configuration of named LocalNet instances, allowing developers to configure it in any way they need. Defaults to 'sandbox'.\n\n\n### -P, --config-dir <config_path>\nSpecify the custom localnet configuration directory. Defaults to '~/.config' on UNIX and 'C:\\\\Users\\\\USERNAME\\\\AppData\\\\Roaming' on Windows.\n\n\n### -d, --dev, --no-dev\nControl whether to launch 'algod' in developer mode or not. Defaults to 'yes'.\n\n\n### --force\nIgnore the prompt to stop the LocalNet if it's already running.\n\n\n### --check\nForce check the Docker registry for new LocalNet image versions, ignoring the version check cache.\n\n### status\n\nCheck the status of the AlgoKit LocalNet.\n\n```shell\nalgokit localnet status [OPTIONS]\n```\n\n### Options\n\n\n### --check\nForce check the Docker registry for new LocalNet image versions, ignoring the version check cache.\n\n### stop\n\nStop the AlgoKit LocalNet.\n\n```shell\nalgokit localnet stop [OPTIONS]\n```\n\n## project\n\nProvides a suite of commands for managing your AlgoKit project.\nThis includes initializing project dependencies, deploying smart contracts,\nand executing predefined or custom commands within your project environment.\n\n```shell\nalgokit project [OPTIONS] COMMAND [ARGS]...\n```\n\n### bootstrap\n\nExpedited initial setup for any developer by installing and configuring dependencies and other\nkey development environment setup activities.\n\n```shell\nalgokit project bootstrap [OPTIONS] COMMAND [ARGS]...\n```\n\n### Options\n\n\n### --force\nContinue even if minimum AlgoKit version is not met\n\n#### all\n\nRuns all bootstrap sub-commands in the current directory and immediate sub directories.\n\n```shell\nalgokit project bootstrap all [OPTIONS]\n```\n\n### Options\n\n\n### --interactive, --no-ci, --non-interactive, --ci\nEnable/disable interactive prompts. If the CI environment variable is set, defaults to non-interactive\n\n\n### -p, --project-name <value>\n(Optional) Projects to execute the command on. Defaults to all projects found in the current directory.\n\n\n### -t, --type <project_type>\n(Optional) Limit execution to specific project types if executing from workspace.\n\n\n* **Options**\n\n    ProjectType.FRONTEND | ProjectType.CONTRACT | ProjectType.BACKEND\n\n\n#### env\n\nCopies .env.template file to .env in the current working directory and prompts for any unspecified values.\n\n```shell\nalgokit project bootstrap env [OPTIONS]\n```\n\n### Options\n\n\n### --interactive, --non-interactive, --ci\nEnable/disable interactive prompts. If the CI environment variable is set, defaults to non-interactive\n\n#### npm\n\nRuns npm install in the current working directory to install Node.js dependencies.\n\n```shell\nalgokit project bootstrap npm [OPTIONS]\n```\n\n### Options\n\n\n### --ci, --no-ci\nRun 'npm ci' instead of 'npm install' in CI mode (clean install).\n\n#### pnpm\n\nRuns pnpm install in the current working directory to install Node.js dependencies.\n\n```shell\nalgokit project bootstrap pnpm [OPTIONS]\n```\n\n### Options\n\n\n### --ci, --no-ci\nRun 'pnpm install --frozen-lockfile' instead of 'pnpm install' in     CI mode (clean install with frozen lockfile).\n\n#### poetry\n\nInstalls Python Poetry (if not present) and runs poetry install in the current working directory to install Python dependencies.\n\n```shell\nalgokit project bootstrap poetry [OPTIONS]\n```\n\n#### uv\n\nInstalls UV (if not present) and runs uv sync in the current working directory to install Python dependencies.\n\n```shell\nalgokit project bootstrap uv [OPTIONS]\n```\n\n### deploy\n\nDeploy smart contracts from AlgoKit compliant repository.\n\n```shell\nalgokit project deploy [OPTIONS] [ENVIRONMENT_NAME] [EXTRA_ARGS]...\n```\n\n### Options\n\n\n### -C, -c, --command <command>\nCustom deploy command. If not provided, will load the deploy command from .algokit.toml file.\n\n\n### --interactive, --non-interactive, --ci\nEnable/disable interactive prompts. Defaults to non-interactive if the CI environment variable is set. Interactive MainNet deployments prompt for confirmation.\n\n\n### -P, --path <path>\nSpecify the project directory. If not provided, current working directory will be used.\n\n\n### --deployer <deployer_alias>\n(Optional) Alias of the deployer account. Otherwise, will prompt the deployer mnemonic if specified in .algokit.toml file.\n\n\n### --dispenser <dispenser_alias>\n(Optional) Alias of the dispenser account. Otherwise, will prompt the dispenser mnemonic if specified in .algokit.toml file.\n\n\n### -p, --project-name <value>\n(Optional) Projects to execute the command on. Defaults to all projects found in the current directory. Option is mutually exclusive with command.\n\n### Arguments\n\n\n### ENVIRONMENT_NAME\nOptional argument\n\n\n### EXTRA_ARGS\nOptional argument(s)\n\n### link\n\nAutomatically invoke 'algokit generate client' on contract projects available in the workspace.\nMust be invoked from the root of a standalone 'frontend' typed project.\n\n```shell\nalgokit project link [OPTIONS]\n```\n\n### Options\n\n\n### -p, --project-name <value>\nSpecify contract projects for the command. Defaults to all in the current workspace.\n\n\n### -l, --language <language>\nProgramming language of the generated client code\n\n\n* **Options**\n\n    python | typescript\n\n\n\n### -a, --all\nLink all contract projects with the frontend project Option is mutually exclusive with project_name.\n\n\n### -f, --fail-fast\nExit immediately if at least one client generation process fails\n\n\n### -v, --version <version>\nThe client generator version to pin to, for example, 1.0.0. If no version is specified, AlgoKit checks if the client generator is installed and runs the installed version. If the client generator is not installed, AlgoKit runs the latest version. If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. Otherwise, AlgoKit runs the specified version.\n\n### list\n\nList all projects in the workspace\n\n```shell\nalgokit project list [OPTIONS] [WORKSPACE_PATH]\n```\n\n### Arguments\n\n\n### WORKSPACE_PATH\nOptional argument\n\n### run\n\nDefine custom commands and manage their execution in you projects.\n\n```shell\nalgokit project run [OPTIONS] COMMAND [ARGS]...\n```\n\n## task\n\nCollection of useful tasks to help you develop on Algorand.\n\n```shell\nalgokit task [OPTIONS] COMMAND [ARGS]...\n```\n\n### analyze\n\nAnalyze TEAL programs for common vulnerabilities using Tealer. This task uses a third party tool to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. For full list of available detectors, please refer to [https://github.com/crytic/tealer?tab=readme-ov-file#detectors](https://github.com/crytic/tealer?tab=readme-ov-file#detectors)\n\n```shell\nalgokit task analyze [OPTIONS] INPUT_PATHS...\n```\n\n### Options\n\n\n### -r, --recursive\nRecursively search for all TEAL files within the provided directory.\n\n\n### --force\nForce verification without the disclaimer confirmation prompt.\n\n\n### --diff\nExit with a non-zero code if differences are found between current and last reports. Reports are generated each run, but with this flag execution fails if the current report doesn't match the last report. Reports are stored in the .algokit/static-analysis/snapshots folder by default. Use --output for a custom path.\n\n\n### -o, --output <output_path>\nDirectory path where to store the results of the static analysis. Defaults to .algokit/static-analysis/snapshots.\n\n\n### -e, --exclude <detectors_to_exclude>\nExclude specific vulnerabilities from the analysis. Supports multiple exclusions in a single run.\n\n### Arguments\n\n\n### INPUT_PATHS\nRequired argument(s)\n\n### ipfs\n\nUpload files to IPFS using Pinata provider.\n\n```shell\nalgokit task ipfs [OPTIONS] COMMAND [ARGS]...\n```\n\n#### login\n\nLogin to Pinata ipfs provider. You will be prompted for your JWT.\n\n```shell\nalgokit task ipfs login [OPTIONS]\n```\n\n#### logout\n\nLogout of Pinata ipfs provider.\n\n```shell\nalgokit task ipfs logout [OPTIONS]\n```\n\n#### upload\n\nUpload a file to Pinata ipfs provider. Please note, max file size is 100MB.\n\n```shell\nalgokit task ipfs upload [OPTIONS]\n```\n\n### Options\n\n\n### -f, --file <file_path>\n**Required** Path to the file to upload.\n\n\n### -n, --name <name>\nHuman readable name for this upload, for use in file listings.\n\n### mint\n\nMint new fungible or non-fungible assets on Algorand.\n\n```shell\nalgokit task mint [OPTIONS]\n```\n\n### Options\n\n\n### --creator <creator>\n**Required** Address or alias of the asset creator.\n\n\n### --name <asset_name>\nAsset name.\n\n\n### -u, --unit <unit_name>\n**Required** Unit name of the asset.\n\n\n### -t, --total <total>\nTotal supply of the asset. Defaults to 1.\n\n\n### -d, --decimals <decimals>\nNumber of decimals. Defaults to 0.\n\n\n### --nft, --ft\nWhether the asset should be validated as NFT or FT. Refers to NFT by default and validates canonical\ndefinitions of pure or fractional NFTs as per ARC3 standard.\n\n\n### -i, --image <image_path>\n**Required** Path to the asset image file to be uploaded to IPFS.\n\n\n### -m, --metadata <token_metadata_path>\nPath to the ARC19 compliant asset metadata file to be uploaded to IPFS. If not provided,\na default metadata object will be generated automatically based on asset-name, decimals and image.\nFor more details refer to [https://arc.algorand.foundation/ARCs/arc-0003#json-metadata-file-schema](https://arc.algorand.foundation/ARCs/arc-0003#json-metadata-file-schema).\n\n\n### --mutable, --immutable\nWhether the asset should be mutable or immutable. Refers to ARC19 by default.\n\n\n### -n, --network <network>\nNetwork to use. Refers to localnet by default.\n\n\n* **Options**\n\n    localnet | testnet | mainnet\n\n\n### nfd-lookup\n\nPerform a lookup via NFD domain or address, returning the associated address or domain respectively.\n\n```shell\nalgokit task nfd-lookup [OPTIONS] VALUE\n```\n\n### Options\n\n\n### -o, --output <output>\nOutput format for NFD API response. Defaults to address|domain resolved.\n\n\n* **Options**\n\n    full | tiny | address\n\n\n### Arguments\n\n\n### VALUE\nRequired argument\n\n### opt-in\n\nOpt-in to an asset(s). This is required before you can receive an asset. Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\n\n```shell\nalgokit task opt-in [OPTIONS] ASSET_IDS...\n```\n\n### Options\n\n\n### -a, --account <account>\n**Required** Address or alias of the signer account.\n\n\n### -n, --network <network>\nNetwork to use. Refers to localnet by default.\n\n\n* **Options**\n\n    localnet | testnet | mainnet\n\n\n### Arguments\n\n\n### ASSET_IDS\nRequired argument(s)\n\n### opt-out\n\nopt-out of an asset(s). You can only opt out of an asset with a zero balance. Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\n\n```shell\nalgokit task opt-out [OPTIONS] [ASSET_IDS]...\n```\n\n### Options\n\n\n### -a, --account <account>\n**Required** Address or alias of the signer account.\n\n\n### --all\nOpt-out of all assets with zero balance.\n\n\n### -n, --network <network>\nNetwork to use. Refers to localnet by default.\n\n\n* **Options**\n\n    localnet | testnet | mainnet\n\n\n### Arguments\n\n\n### ASSET_IDS\nOptional argument(s)\n\n### send\n\nSend a signed transaction to the given network.\n\n```shell\nalgokit task send [OPTIONS]\n```\n\n### Options\n\n\n### -f, --file <file>\nSingle or multiple message pack encoded signed transactions from binary file to send. Option is mutually exclusive with transaction.\n\n\n### -t, --transaction <transaction>\nBase64 encoded signed transaction to send. Option is mutually exclusive with file.\n\n\n### -n, --network <network>\nNetwork to use. Refers to localnet by default.\n\n\n* **Options**\n\n    localnet | testnet | mainnet\n\n\n### sign\n\nSign goal clerk compatible Algorand transaction(s).\n\n```shell\nalgokit task sign [OPTIONS]\n```\n\n### Options\n\n\n### -a, --account <account>\n**Required** Address or alias of the signer account.\n\n\n### -f, --file <file>\nSingle or multiple message pack encoded transactions from binary file to sign. Option is mutually exclusive with transaction.\n\n\n### -t, --transaction <transaction>\nSingle base64 encoded transaction object to sign. Option is mutually exclusive with file.\n\n\n### -o, --output <output>\nThe output file path to store signed transaction(s).\n\n\n### --force\nForce signing without confirmation.\n\n### transfer\n\nTransfer algos or assets from one account to another.\n\n```shell\nalgokit task transfer [OPTIONS]\n```\n\n### Options\n\n\n### -s, --sender <sender>\n**Required** Address or alias of the sender account.\n\n\n### -r, --receiver <receiver>\n**Required** Address or alias to an account that will receive the asset(s).\n\n\n### --asset, --id <asset_id>\nAsset ID to transfer. Defaults to 0 (Algo).\n\n\n### -a, --amount <amount>\n**Required** Amount to transfer.\n\n\n### --whole-units\nUse whole units (Algos | ASAs) instead of smallest divisible units (for example, microAlgos). Disabled by default.\n\n\n### -n, --network <network>\nNetwork to use. Refers to localnet by default.\n\n\n* **Options**\n\n    localnet | testnet | mainnet\n\n\n### vanity-address\n\nGenerate a vanity Algorand address. Your KEYWORD can only include letters A - Z and numbers 2 - 7.\nKeeping your KEYWORD under 5 characters will usually result in faster generation.\nNote: The longer the KEYWORD, the longer it may take to generate a matching address.\nPlease be patient if you choose a long keyword.\n\n```shell\nalgokit task vanity-address [OPTIONS] KEYWORD\n```\n\n### Options\n\n\n### -m, --match <match>\nLocation where the keyword will be included. Default is start.\n\n\n* **Options**\n\n    start | anywhere | end\n\n\n\n### -o, --output <output>\nHow the output will be presented.\n\n\n* **Options**\n\n    stdout | alias | file\n\n\n\n### -a, --alias <alias>\nAlias for the address. Required if output is \"alias\".\n\n\n### --file-path <output_file_path>\nFile path where to dump the output. Required if output is \"file\".\n\n\n### -f, --force\nAllow overwriting an aliases without confirmation, if output option is 'alias'.\n\n### Arguments\n\n\n### KEYWORD\nRequired argument\n\n### wallet\n\nCreate short aliases for your addresses and accounts on AlgoKit CLI.\n\n```shell\nalgokit task wallet [OPTIONS] COMMAND [ARGS]...\n```\n\n#### add\n\nAdd an address or account to be stored against a named alias (at most 50 aliases).\n\n```shell\nalgokit task wallet add [OPTIONS] ALIAS_NAME\n```\n\n### Options\n\n\n### -a, --address <address>\n**Required** The address of the account.\n\n\n### -m, --mnemonic\nIf specified then prompt the user for a mnemonic phrase interactively using masked input.\n\n\n### -f, --force\nAllow overwriting an existing alias.\n\n### Arguments\n\n\n### ALIAS_NAME\nRequired argument\n\n#### get\n\nGet an address or account stored against a named alias.\n\n```shell\nalgokit task wallet get [OPTIONS] ALIAS\n```\n\n### Arguments\n\n\n### ALIAS\nRequired argument\n\n#### list\n\nList all addresses and accounts stored against a named alias.\n\n```shell\nalgokit task wallet list [OPTIONS]\n```\n\n#### remove\n\nRemove an address or account stored against a named alias.\n\n```shell\nalgokit task wallet remove [OPTIONS] ALIAS\n```\n\n### Options\n\n\n### -f, --force\nAllow removing an alias without confirmation.\n\n### Arguments\n\n\n### ALIAS\nRequired argument\n\n#### reset\n\nRemove all aliases.\n\n```shell\nalgokit task wallet reset [OPTIONS]\n```\n\n### Options\n\n\n### -f, --force\nAllow removing all aliases without confirmation.\n"
  },
  {
    "path": "docs/features/compile.md",
    "content": "# AlgoKit Compile\n\nThe AlgoKit Compile feature enables you to compile smart contracts (apps) and smart signatures (logic signatures) written in a supported high-level language to a format deployable on the Algorand Virtual Machine (AVM).\n\nWhen running the compile command, AlgoKit will take care of working out which compiler you need and dynamically resolve it. Additionally, AlgoKit will detect if a matching compiler version is already installed globally on your machine or is included in your project and use that.\n\n## Prerequisites\n\nSee [Compile Python - Prerequisites](#prerequisites-1) and [Compile TypeScript - Prerequisites](#prerequisites-2) for details.\n\n## What is Algorand Python & PuyaPy?\n\nAlgorand Python is a semantically and syntactically compatible, typed Python language that works with standard Python tooling and allows you to express smart contracts (apps) and smart signatures (logic signatures) for deployment on the Algorand Virtual Machine (AVM).\n\nAlgorand Python can be deployed to Algorand by using the PuyaPy optimising compiler, which takes Algorand Python and outputs [ARC-32](https://github.com/algorandfoundation/ARCs/blob/main/ARCs/arc-0032.md) application spec files (among other formats) which, [when deployed](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/generate.md#1-typed-clients), will result in AVM bytecode execution semantics that match the given Python code.\n\nIf you want to learn more, check out the [PuyaPy docs](https://github.com/algorandfoundation/puya/blob/main/docs/index.md).\n\nBelow is an example Algorand Python smart contract.\n\n```py\nfrom algopy import ARC4Contract, arc4\n\nclass HelloWorldContract(ARC4Contract):\n    @arc4.abimethod\n    def hello(self, name: arc4.String) -> arc4.String:\n        return \"Hello, \" + name\n```\n\nFor more complex examples, see the [examples](https://github.com/algorandfoundation/puya/tree/main/examples) in the [PuyaPy repo](https://github.com/algorandfoundation/puya).\n\n## What is Algorand TypeScript & PuyaTs?\n\nAlgorand TypeScript is a typed TypeScript language that allows you to express smart contracts (apps) and smart signatures (logic signatures) for deployment on the Algorand Virtual Machine (AVM). Algorand TypeScript is currently in beta.\n\nAlgorand TypeScript can be deployed to Algorand by using the PuyaTs optimising compiler, which takes Algorand TypeScript and outputs [ARC-32](https://github.com/algorandfoundation/ARCs/blob/main/ARCs/arc-0032.md) application spec files (among other formats) which, [when deployed](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/generate.md#1-typed-clients), will result in AVM bytecode execution semantics that match the given TypeScript code.\n\nBelow is an example Algorand TypeScript smart contract.\n\n```typescript\nimport { Contract } from \"@algorandfoundation/puya-sdk\";\n\nclass HelloWorldContract extends Contract {\n  hello(name: string): string {\n    return \"Hello, \" + name;\n  }\n}\n```\n\n## Usage\n\nAvailable commands and possible usage are as follows:\n\n```\nUsage: algokit compile [OPTIONS] COMMAND [ARGS]...\n\n  Compile smart contracts and smart signatures written in a supported high-level language to a format deployable on\n  the Algorand Virtual Machine (AVM).\n\nOptions:\n  -v, --version TEXT  The compiler version to pin to, for example, 1.0.0. If no version is specified, AlgoKit checks\n                      if the compiler is installed and runs the installed version. If the compiler is not installed,\n                      AlgoKit runs the latest version. If a version is specified, AlgoKit checks if an installed\n                      version matches and runs the installed version. Otherwise, AlgoKit runs the specified version.\n  -h, --help          Show this message and exit.\n\nCommands:\n  py         Compile Algorand Python contract(s) using the PuyaPy compiler.\n  python     Compile Algorand Python contract(s) using the PuyaPy compiler.\n  ts         Compile Algorand TypeScript contract(s) using the PuyaTs compiler.\n  typescript Compile Algorand TypeScript contract(s) using the PuyaTs compiler.\n```\n\n### Compile Python\n\nThe command `algokit compile python` or `algokit compile py` will run the [PuyaPy](https://github.com/algorandfoundation/puya) compiler against the supplied Algorand Python smart contract.\n\nAll arguments supplied to the command are passed directly to PuyaPy, therefore this command supports all options supported by the PuyaPy compiler.\n\nAny errors detected by PuyaPy during the compilation process will be printed to the output.\n\n#### Prerequisites\n\nPuyaPy requires Python 3.12+, so please ensure your Python version satisfies this requirement.\n\nThis command will attempt to resolve a matching installed PuyaPy compiler, either globally installed in the system or locally installed in your project (via [Poetry](https://python-poetry.org/)). If no appropriate match is found, the PuyaPy compiler will be dynamically run using [pipx](https://pipx.pypa.io/stable/). In this case pipx is also required.\n\n#### Examples\n\nTo see a list of the supported PuyaPy options, run the following:\n\n```shell\nalgokit compile python -h\n```\n\nTo determine the version of the PuyaPy compiler in use, execute the following command:\n\n```shell\nalgokit compile python --version\n```\n\nTo compile a single Algorand Python smart contract and write the output to a specific location, run the following:\n\n```shell\nalgokit compile python hello_world/contract.py --out-dir hello_world/out\n```\n\nTo compile multiple Algorand Python smart contracts and write the output to a specific location, run the following:\n\n```shell\nalgokit compile python hello_world/contract.py calculator/contract.py --out-dir my_contracts\n```\n\nTo compile a directory of Algorand Python smart contracts and write the output to the default location, run the following:\n\n```shell\nalgokit compile python my_contracts\n```\n\n### Compile TypeScript\n\nThe command `algokit compile typescript` or `algokit compile ts` will run the PuyaTs compiler against the supplied Algorand TypeScript smart contract.\n\nAll arguments supplied to the command are passed directly to PuyaTs, therefore this command supports all options supported by the PuyaTs compiler.\n\nAny errors detected by PuyaTs during the compilation process will be printed to the output.\n\n#### Prerequisites\n\nPuyaTs requires Node 22+ and NPM 10+, so please ensure your versions satisfy this requirement.\n\nThe command will attempt to find a correctly installed PuyaTs compiler in the following order:\n\n1. First, it checks if a matching version is installed at the project level (using `npm ls`).\n2. Next, it checks if a matching version is installed globally (using `npm --global ls`).\n3. If no appropriate match is found, it will run the compiler using npx with the `-y` flag.\n\n#### Examples\n\nTo see a list of the supported PuyaTs options, run the following:\n\n```shell\nalgokit compile typescript -h\n```\n\nTo determine the version of the PuyaTs compiler in use, execute the following command:\n\n```shell\nalgokit compile typescript --version\n```\n\nTo compile a single Algorand TypeScript smart contract and write the output to a specific location, run the following:\n\n```shell\nalgokit compile typescript hello_world/contract.algo.ts --out-dir hello_world/out\n```\n\nTo build multiple Algorand TypeScript smart contracts and write the output to a specific location, run the following:\n\n```shell\nalgokit compile typescript hello_world/contract.algo.ts calculator/contract.algo.ts --out-dir my_contracts\n```\n\nTo compile a directory of Algorand TypeScript smart contracts and write the output to the default location, run the following:\n\n```shell\nalgokit compile typescript my_contracts\n```\n"
  },
  {
    "path": "docs/features/completions.md",
    "content": "# AlgoKit Completions\n\nAlgoKit supports shell completions for zsh and bash shells, e.g.\n\n**bash**\n\n```\n$ algokit <Press Tab>\nbootstrap    completions  config       doctor       explore      goal         init         sandbox\n```\n\n**zsh**\n\n```\n$ ~ algokit <Press Tab>\nbootstrap    -- Bootstrap AlgoKit project dependencies.\ncompletions  -- Install and Uninstall AlgoKit shell integration.\nconfig       -- Configure AlgoKit options.\ndoctor       -- Run the Algorand doctor CLI.\nexplore      -- Explore the specified network in the...\ngoal         -- Run the Algorand goal CLI against the AlgoKit Sandbox.\ninit         -- Initializes a new project.\nsandbox      -- Manage the AlgoKit sandbox.\n```\n\n## Installing\n\nTo setup the completions, AlgoKit provides commands that will modify the current users interactive shell script (`.bashrc`/`.zshrc`).\n\n> **Note**\n> If you would prefer AlgoKit to not modify your interactive shell scripts you can install the completions yourself by following the instructions [here](https://click.palletsprojects.com/en/8.1.x/shell-completion/).\n\nTo [install](../cli/index.md#install) completions for the current shell execute `algokit completions install`. You should see output similar to below:\n\n```\n$ ~ algokit completions install\nAlgoKit completions installed for zsh 🎉\nRestart shell or run `. ~/.zshrc` to enable completions\n```\n\nAfter installing the completions don't forget to restart the shell to begin using them!\n\n## Uninstalling\n\nTo [uninstall](../cli/index.md#uninstall) completions for the current shell run `algokit completions uninstall`:\n\n```\n$ ~ algokit completions uninstall\nAlgoKit completions uninstalled for zsh 🎉\n```\n\n## Shell Option\n\nTo install/uninstall the completions for a specific [shell](../cli/index.md#--shell-) the `--shell` option can be used e.g. `algokit completions install --shell bash`.\n\nTo learn more about the `algokit completions` command, please refer to [completions](../cli/index.md#completions) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/config.md",
    "content": "# AlgoKit Config\n\nThe `algokit config` command allows you to manage various global settings used by AlgoKit CLI. This feature is essential for customizing your AlgoKit environment to suit your needs.\n\n## Usage\n\nThis command group provides a set of subcommands to configure AlgoKit settings.\nSubcommands\n\n- `version-prompt`: Configure the version prompt settings.\n- `container-engine`: Configure the container engine settings.\n- `js-package-manager`: Configure the default JavaScript package manager.\n- `py-package-manager`: Configure the default Python package manager.\n\n### Version Prompt Configuration\n\n```zsh\n$ algokit config version-prompt [OPTIONS]\n```\n\nThis command configures the version prompt settings for AlgoKit.\n\n- `--enable`: Enable the version prompt.\n- `--disable`: Disable the version prompt.\n\n### Container Engine Configuration\n\n```zsh\n$ algokit config container-engine [OPTIONS] [ENGINE]\n```\n\nThis command configures the container engine settings for AlgoKit.\n\n- `--force`, `-f`: Skip confirmation prompts. Defaults to 'yes' to all prompts.\n- `ENGINE`: Optional argument to specify the container engine (docker or podman).\n\n### JavaScript Package Manager Configuration\n\n```zsh\n$ algokit config js-package-manager [OPTIONS] [PACKAGE_MANAGER]\n```\n\nThis command configures the default JavaScript package manager used by AlgoKit's bootstrap command.\n\n- `PACKAGE_MANAGER`: Optional argument to specify the package manager (npm or pnpm).\n\nIf no package manager is specified, AlgoKit will prompt you to select one interactively.\n\n### Python Package Manager Configuration\n\n```zsh\n$ algokit config py-package-manager [OPTIONS] [PACKAGE_MANAGER]\n```\n\nThis command configures the default Python package manager used by AlgoKit's bootstrap command.\n\n- `PACKAGE_MANAGER`: Optional argument to specify the package manager (poetry or uv).\n\nIf no package manager is specified, AlgoKit will prompt you to select one interactively.\n\n## Further Reading\n\nFor in-depth details, visit the [configuration section](../cli/index.md#config) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/dispenser.md",
    "content": "# AlgoKit TestNet Dispenser\n\nThe AlgoKit Dispenser feature allows you to interact with the AlgoKit TestNet Dispenser. This feature is essential for funding your wallet with TestNet ALGOs, refunding ALGOs back to the dispenser wallet, and getting information about current fund limits on your account.\n\n## Usage\n\n```zsh\n$ algokit dispenser [OPTIONS] COMMAND [ARGS]...\n```\n\nThis command provides a set of subcommands to interact with the AlgoKit TestNet Dispenser.\nSubcommands\n\n- `login`: Login to your Dispenser API account.\n- `logout`: Logout of your Dispenser API account.\n- `fund`: Fund your wallet address with TestNet ALGOs.\n- `refund`: Refund ALGOs back to the dispenser wallet address.\n- `limit`: Get information about current fund limits on your account.\n\n### API Documentation\n\nFor detailed API documentation, visit the [AlgoKit Dispenser API](https://github.com/algorandfoundation/algokit/blob/main/docs/testnet_api.md) documentation.\n\n### CI Access Token\n\nAll dispenser commands can work in CI mode by using a CI access token that can be generated by passing `--ci` flag to `login` command. Once a token is obtained, setting the value to the following environment variable `ALGOKIT_DISPENSER_ACCESS_TOKEN` will enable CI mode for all dispenser commands. If both a user mode and CI mode access token is available, the CI mode will take precedence.\n\n## Login\n\n```zsh\n$ algokit dispenser login [OPTIONS]\n```\n\nThis command logs you into your Dispenser API account if you are not already logged in.\nOptions\n\n- `--ci`: Generate an access token for CI. Issued for 30 days.\n- `--output`, -o: Output mode where you want to store the generated access token. Defaults to stdout. Only applicable when --ci flag is set.\n- `--file`, -f: Output filename where you want to store the generated access token. Defaults to `ci_token.txt`. Only applicable when --ci flag is set and --output mode is `file`.\n\n> Please note, algokit relies on [keyring](https://pypi.org/project/keyring/) for storing your API credentials. This implies that your credentials are stored in your system's keychain. By default it will prompt for entering your system password unless you have set it up to always allow access for `algokit-cli` to obtain API credentials.\n\n## Logout\n\n```zsh\n$ algokit dispenser logout\n```\n\nThis command logs you out of your Dispenser API account if you are logged in.\n\n## Fund\n\n```zsh\n$ algokit dispenser fund [OPTIONS]\n```\n\nThis command funds your wallet address with TestNet ALGOs.\nOptions\n\n- `--receiver`, -r: Receiver [alias](./tasks/wallet.md#add) or address to fund with TestNet ALGOs. This option is required.\n- `--amount`, -a: Amount to fund. Defaults to microAlgos. This option is required.\n- `--whole-units`: Use whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\n\n## Refund\n\n```zsh\n$ algokit dispenser refund [OPTIONS]\n```\n\nThis command refunds ALGOs back to the dispenser wallet address.\nOptions\n\n- `--txID`, -t: Transaction ID of your refund operation. This option is required. The receiver address of the transaction must be the same as the dispenser wallet address that you can obtain by observing a `sender` field of [`fund`](#fund) transaction.\n\n> Please note, performing a refund operation will not immediately change your daily fund limit. Your daily fund limit is reset daily at midnigth UTC. If you have reached your daily fund limit, you will not be able to perform a refund operation until your daily fund limit is reset.\n\n## Limit\n\n```zsh\n$ algokit dispenser limit [OPTIONS]\n```\n\nThis command gets information about current fund limits on your account. The limits reset daily.\nOptions\n\n- `--whole-units`: Use whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\n\n## Further Reading\n\nFor in-depth details, visit the [dispenser section](../cli/index.md#dispenser) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/doctor.md",
    "content": "# AlgoKit Doctor\n\nThe AlgoKit Doctor feature allows you to check your AlgoKit installation along with its dependencies. This is useful for diagnosing potential issues with using AlgoKit.\n\n## Functionality\n\nThe AlgoKit Doctor allows you to make sure that your system has the correct dependencies installed and that they satisfy the minimum required versions. All passed checks will appear in your command line natural color while warnings will be in yellow (warning) and errors or missing critical services will be in red (error). The critical services that AlgoKit will check for (since they are [directly used by certain commands](../../README.md#prerequisites)): Docker, docker compose and git.\n\nPlease run this command to if you are facing an issue running AlgoKit. It is recommended to run it before [submitting an issue to AlgoKit](https://github.com/algorandfoundation/algokit-cli/issues/new). You can copy the contents of the Doctor command message (in Markdown format) to your clipboard by providing the `-c` flag to the command as follows `algokit doctor -c`.\n\n> NOTE: You can also use the `--verbose` or `-v` flag to show additional information including package dependencies of the AlgoKit CLI: `algokit -v doctor`. This only works when `algokit` is installed as a Python package (e.g., via `pipx install algokit`).\n\n# Examples\n\nFor example, running `algokit doctor` with all prerequisites installed will result in output similar to the following:\n\n```\n$ ~ algokit doctor\ntimestamp: 2023-03-29T03:58:05+00:00\nAlgoKit: 0.6.0\nAlgoKit Python: 3.11.2 (main, Mar 24 2023, 00:16:47) [Clang 14.0.0 (clang-1400.0.29.202)] (location: /Users/algokit/.local/pipx/venvs/algokit)\nOS: macOS-13.2.1-arm64-arm-64bit\ndocker: 20.10.22\ndocker compose: 2.15.1\ngit: 2.39.1\npython: 3.10.9 (location: /Users/algokit/.asdf/shims/python)\npython3: 3.10.9 (location: /Users/algokit/.asdf/shims/python3)\npipx: 1.2.0\npoetry: 1.3.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 4.0.10-34-gb753315\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n```\n\nThe doctor command will indicate if there is any issues to address, for example:\n\nIf AlgoKit detects a newer version, this will be indicated next to the AlgoKit version\n\n```\nAlgoKit: 1.2.3 (latest: 4.5.6)\n```\n\nIf the detected version of docker compose is unsupported, this will be shown:\n\n```\ndocker compose: 2.1.3\n  Docker Compose 2.5.0 required to run `algokit localnet command`;\n  install via https://docs.docker.com/compose/install/\n```\n\nFor more details about the `AlgoKit doctor` command, please refer to the [AlgoKit CLI reference documentation](../cli/index.md#doctor).\n"
  },
  {
    "path": "docs/features/explore.md",
    "content": "# AlgoKit explore\n\nAlgoKit provides a quick shortcut to [explore](../cli/index.md#explore) various Algorand networks using [lora](https://lora.algokit.io/) including [AlgoKit LocalNet](./localnet.md)!\n\n## LocalNet\n\nThe following three commands are all equivalent and will open lora pointing to the local [AlgoKit LocalNet](./localnet.md) instance:\n\n- `algokit explore`\n- `algokit explore localnet`\n- `algokit localnet explore`\n\n## Testnet\n\n`algokit explore testnet` will open lora pointing to TestNet via the <https://testnet-api.algonode.cloud> [node](https://algonode.io/api/).\n\n## Mainnet\n\n`algokit explore mainnet` will open lora pointing to MainNet via the <https://mainnet-api.algonode.cloud> [node](https://algonode.io/api/).\n\nTo learn more about the `algokit explore` command, please refer to [explore](../cli/index.md#explore) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/generate.md",
    "content": "# AlgoKit Generate\n\nThe `algokit generate` [command](../cli/index.md#generate) is used to generate components used in an AlgoKit project. It also allows for custom generate commands which are loaded from the .algokit.toml file in your project directory.\n\n## 1. Typed clients\n\nThe `algokit generate client` [command](../cli/index.md#client) can be used to generate a typed client from an [ARC-0032](https://arc.algorand.foundation/ARCs/arc-0032) or [ARC-0056](https://github.com/algorandfoundation/ARCs/pull/258) application specification with both Python and TypeScript available as target languages.\n\n### Prerequisites\n\nTo generate Python clients an installation of pip and pipx is required.\nTo generate TypeScript clients an installation of Node.js and npx is also required.\n\nEach generated client will also have a dependency on `algokit-utils` libraries for the target language.\n\n### Input file / directory\n\nYou can either specify a path to an ARC-0032 JSON file, an ARC-0056 JSON file or to a directory that is recursively scanned for `application.json`, `*.arc32.json`, `*.arc56.json` file(s).\n\n### Output tokens\n\nThe output path is interpreted as relative to the current working directory, however an absolute path may also be specified e.g.\n`algokit generate client application.json --output /absolute/path/to/client.py`\n\nThere are two tokens available for use with the `-o`, `--output` [option](../cli/index.md#-o---output-output_path_pattern):\n\n- `{contract_name}`: This will resolve to a name based on the ARC-0032/ARC-0056 contract name, formatted appropriately for the target language.\n- `{app_spec_dir}`: This will resolve to the parent directory of the `application.json`, `*.arc32.json`, `*.arc56.json` file which can be useful to output a client relative to its source file.\n\n### Version Pinning\n\nIf you want to ensure typed client output stability across different environments and additionally protect yourself from any potential breaking changes introduced in the client generator packages, you can specify a version you'd like to pin to.\n\nTo make use of this feature, pass `-v`, `--version`, for example `algokit generate client --version 1.2.3 path/to/application.json`.\n\nAlternatively, you can achieve output stability by installing the underlying [Python](https://github.com/algorandfoundation/algokit-client-generator-py) or [TypeScript](https://github.com/algorandfoundation/algokit-client-generator-ts) client generator package either locally in your project (via `poetry` or `npm` respectively) or globally on your system (via `pipx` or `npm` respectively). AlgoKit will search for a matching installed version before dynamically resolving.\n\n### Usage\n\nUsage examples of using a generated client are below, typed clients allow your favourite IDE to provide better intellisense to provide better discoverability\nof available operations and parameters.\n\n#### Python\n\n```python\n# A similar working example can be seen in the algokit python template, when using Python deployment\nfrom smart_contracts.artifacts.HelloWorldApp.client import (\n    HelloWorldAppClient,\n)\n\napp_client = HelloWorldAppClient(\n    algod_client,\n    creator=deployer,\n    indexer_client=indexer_client,\n)\ndeploy_response = app_client.deploy(\n    on_schema_break=OnSchemaBreak.ReplaceApp,\n    on_update=OnUpdate.UpdateApp,\n    allow_delete=True,\n    allow_update=True,\n)\n\nresponse = app_client.hello(name=\"World\")\n```\n\n#### TypeScript\n\n```typescript\n// A similar working example can be seen in the algokit python template with typescript deployer, when using TypeScript deployment\nimport { HelloWorldAppClient } from \"./artifacts/HelloWorldApp/client\";\n\nconst appClient = new HelloWorldAppClient(\n  {\n    resolveBy: \"creatorAndName\",\n    findExistingUsing: indexer,\n    sender: deployer,\n    creatorAddress: deployer.addr,\n  },\n  algod\n);\nconst app = await appClient.deploy({\n  allowDelete: isLocal,\n  allowUpdate: isLocal,\n  onSchemaBreak: isLocal ? \"replace\" : \"fail\",\n  onUpdate: isLocal ? \"update\" : \"fail\",\n});\nconst response = await appClient.hello({ name: \"world\" });\n```\n\n### Examples\n\nTo output a single application.json to a python typed client:\n`algokit generate client path/to/application.json --output client.py`\n\nTo process multiple application.json in a directory structure and output to a typescript client for each in the current directory:\n`algokit generate client smart_contracts/artifacts --output {contract_name}.ts`\n\nTo process multiple application.json in a directory structure and output to a python client alongside each application.json:\n`algokit generate client smart_contracts/artifacts --output {app_spec_path}/client.py`\n\n## 2. Using Custom Generate Commands\n\nCustom generate commands are defined in the `.algokit.toml` file within the project directory, typically supplied by community template builders or official AlgoKit templates. These commands are specified under the `generate` key and serve to execute a generator at a designated path with provided answer key/value pairs.\n\n### Understanding `Generators`\n\nA `generator` is essentially a compact, self-sufficient `copier` template. This template can optionally be defined within the primary `algokit templates` to offer supplementary functionality after a project is initialized from the template. For instance, the official [`algokit-python-template`](https://github.com/algorandfoundation/algokit-python-template/tree/main/template_content) provides a generator within the `.algokit/generators` directory. This generator can be employed for executing extra tasks on AlgoKit projects that have been initiated from this template, such as adding new smart contracts to an existing project. For a comprehensive explanation, please refer to the [`architecture decision record`](../architecture-decisions/2023-07-19_advanced_generate_command.md).\n\n### Requirements\n\nTo utilize custom generate commands, you must have `copier` installed. This installation is included by default in the AlgoKit CLI. Therefore, no additional installation is necessary if you have already installed the `algokit cli`.\n\n### How to Use\n\nA custom command can be defined in the `.algokit.toml` as shown:\n\n```toml\n[generate.my_generator]\npath = \"path/to/my_generator\"\ndescription = \"A brief description of the function of my_generator\"\n```\n\nFollowing this, you can execute the command as follows:\n\n`algokit generate my_generator --answer key value --path path/to/my_generator`\n\nIf no `path` is given, the command will use the path specified in the `.algokit.toml`. If no `answer` is provided, the command will initiate an interactive `copier` prompt to request answers (similar to `algokit init`).\n\nThe custom command employs the `copier` library to duplicate the files from the generator's path to the current working directory, substituting any values from the `answers` dictionary.\n\n### Examples\n\nAs an example, let's use the `smart-contract` generator from the `algokit-python-template` to add new contract to an existing project based on that template. The `smart-contract` generator is defined as follows:\n\n```toml\n[algokit]\nmin_version = \"v1.3.1\"\n\n... # other keys\n\n[generate.smart_contract]\ndescription = \"Adds a new smart contract to the existing project\"\npath = \".algokit/generators/create_contract\"\n```\n\nTo execute this generator, ensure that you are operating from the same directory as the `.algokit.toml` file, and then run:\n\n```bash\n$ algokit generate\n\n# The output will be as follows:\n# Note how algokit dynamically injects a new `smart-contract` command based\n# on the `.algokit.toml` file\n\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client          Create a typed ApplicationClient from an ARC-32 application.json\n  smart-contract  Adds a new smart contract to the existing project\n```\n\nTo execute the `smart-contract` generator, run:\n\n```bash\n$ algokit generate smart-contract\n\n# or\n\n$ algokit generate smart-contract -a contract_name \"MyCoolContract\"\n```\n\n#### Third Party Generators\n\nIt is important to understand that by default, AlgoKit will always prompt you before executing a generator to ensure it's from a trusted source. If you are confident about the source of the generator, you can use the `--force` or `-f` option to execute the generator without this confirmation prompt. Be cautious while using this option and ensure the generator is from a trusted source. At the moment, a trusted source for a generator is defined as _a generator that is included in the official AlgoKit templates (e.g. `smart-contract` generator in `algokit-python-template`)_\n"
  },
  {
    "path": "docs/features/goal.md",
    "content": "# AlgoKit goal\n\nAlgoKit goal command provides the user with a mechanism to run [goal cli](https://dev.algorand.co/algokit/algokit-cli/goal/) commands against the current [AlgoKit LocalNet](./localnet.md).\n\nYou can explore all possible goal commands by running `algokit goal` e.g.:\n\n```\n$ ~ algokit goal\n GOAL is the CLI for interacting Algorand software instance. The binary 'goal' is installed alongside the algod binary and is considered an integral part of the complete installation. The binaries should be used in tandem - you should not try to use a version of goal with a different version of algod.\n\n Usage:\n goal [flags]\n goal [command]\n\n Available Commands:\n account     Control and manage Algorand accounts\n app         Manage applications\n asset       Manage assets\n clerk       Provides the tools to control transactions\n completion  Shell completion helper\n help        Help about any command\n kmd         Interact with kmd, the key management daemon\n ledger      Access ledger-related details\n license     Display license information\n logging     Control and manage Algorand logging\n network     Create and manage private, multi-node, locally-hosted networks\n node        Manage a specified algorand node\n protocols\n report\n version     The current version of the Algorand daemon (algod)\n wallet      Manage wallets: encrypted collections of Algorand account keys\n\n Flags:\n -d, --datadir stringArray   Data directory for the node\n -h, --help                  help for goal\n -k, --kmddir string         Data directory for kmd\n -v, --version               Display and write current build version and exit\n\n Use \"goal [command] --help\" for more information about a command.\n```\n\nFor instance, running `algokit goal report` would result in output like:\n\n```\n$ ~ algokit goal report\n 12885688322\n 3.12.2.dev [rel/stable] (commit #181490e3)\n go-algorand is licensed with AGPLv3.0\n source code available at https://github.com/algorand/go-algorand\n\n Linux ff7828f2da17 5.15.49-linuxkit #1 SMP PREEMPT Tue Sep 13 07:51:32 UTC 2022 aarch64 GNU/Linux\n\n Genesis ID from genesis.json: sandnet-v1\n\n Last committed block: 0\n Time since last block: 0.0s\n Sync Time: 0.0s\n Last consensus protocol: future\n Next consensus protocol: future\n Round for next consensus protocol: 1\n Next consensus protocol supported: true\n Last Catchpoint:\n Genesis ID: sandnet-v1\n Genesis hash: vEg1NCh6SSXwS6O5HAfjYCCNAs4ug328s3RYMr9syBg=\n```\n\nIf the AlgoKit Sandbox `algod` docker container is not present or not running, the command will fail with a clear error, e.g.:\n\n```\n$ ~ algokit goal\n Error: No such container: algokit_algod\n Error: Error executing goal; ensure the Sandbox is started by executing `algokit sandbox status`\n```\n\n```\n$ ~ algokit goal\n Error response from daemon: Container 5a73961536e2c98e371465739053d174066c40d00647c8742f2bb39eb793ed7e is not running\n Error: Error executing goal; ensure the Sandbox is started by executing `algokit sandbox status`\n```\n\n## Working with Files in the Container\n\nWhen interacting with the container, especially if you're using tools like goal, you might need to reference files or directories. Here's how to efficiently deal with files and directories:\n\n### Automatic File Mounting\n\nWhen you specify a file or directory path in your `goal` command, the system will automatically mount that path from your local filesystem into the container. This way, you don't need to copy files manually each time.\n\nFor instance, if you want to compile a `teal` file:\n\n```\nalgokit goal clerk compile /Path/to/inputfile/approval.teal -o /Path/to/outputfile/approval.compiled\n```\n\nHere, `/Path/to/inputfile/approval.teal` and `/Path/to/outputfile/approval.compiled` are paths on your local file system, and they will be automatically accessible to the `goal` command inside the container.\n\n### Manual Copying of Files\n\nIn case you want to manually copy files into the container, you can do so using `docker cp`:\n\n```\ndocker cp foo.txt algokit_algod:/root\n```\n\nThis command copies the `foo.txt` from your local system into the root directory of the `algokit_algod` container.\n\nNote: Manual copying is optional and generally only necessary if you have specific reasons for doing so since the system will auto-mount paths specified in commands.\n\n## Running multiple commands\n\nIf you want to run multiple commands or interact with the filesystem you can execute `algokit goal --console`. This will open a [Bash](https://www.gnu.org/software/bash/) shell session on the `algod` Docker container and from there you can execute goal directly, e.g.:\n\n```bash\n$ algokit goal --console\nOpening Bash console on the algod node; execute `exit` to return to original console\nroot@82d41336608a:~# goal account list\n[online]        C62QEFC7MJBPHAUDMGVXGZ7WRWFAF3XYPBU3KZKOFHYVUYDGU5GNWS4NWU      C62QEFC7MJBPHAUDMGVXGZ7WRWFAF3XYPBU3KZKOFHYVUYDGU5GNWS4NWU      4000000000000000 microAlgos\n[online]        DVPJVKODAVEKWQHB4G7N6QA3EP7HKAHTLTZNWMV4IVERJQPNGKADGURU7Y      DVPJVKODAVEKWQHB4G7N6QA3EP7HKAHTLTZNWMV4IVERJQPNGKADGURU7Y      4000000000000000 microAlgos\n[online]        4BH5IKMDDHEJEOZ7T5LLT4I7EVIH5XCOTX3TPVQB3HY5TUBVT4MYXJOZVA      4BH5IKMDDHEJEOZ7T5LLT4I7EVIH5XCOTX3TPVQB3HY5TUBVT4MYXJOZVA      2000000000000000 microAlgos\n```\n\n## Interactive Mode\n\nSome `goal` commands require interactive input from the user. By default, AlgoKit will attempt to run commands in non-interactive mode first, and automatically switch to interactive mode if needed. You can force a command to run in interactive mode by using the `--interactive` flag:\n\n```bash\n$ algokit goal --interactive wallet new algodev\nPlease choose a password for wallet 'algodev':\nPlease confirm the password:\nCreating wallet...\nCreated wallet 'algodev'\nYour new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): n\n```\n\nThis is particularly useful when you know a command will require user input, such as creating new accounts, importing keys, or signing transactions.\n\nFor more details about the `AlgoKit goal` command, please refer to the [AlgoKit CLI reference documentation](../cli/index.md#goal).\n"
  },
  {
    "path": "docs/features/init.md",
    "content": "# AlgoKit Init\n\nThe `algokit init` [command](../cli/index.md#init) is used to quickly initialize new projects using official Algorand Templates or community provided templates. It supports a fully guided command line wizard experience, as well as fully scriptable / non-interactive functionality via command options.\n\n## Quick start\n\nFor a quick start template with all of the defaults you can run: `algokit init` which will interactively guide you through picking the right stack to build your AlgoKit project. Afterwards, you should immediately be able to hit F5 to compile the hello world smart contract to the `smart_contracts/artifacts` folder (with breakpoint debugging - try setting a breakpoint in `smart_contracts/helloworld.py`) and open the `smart_contracts/helloworld.py` file and get linting, automatic formatting and syntax highlighting.\n\n## Prerequisites\n\nGit is a prerequisite for the init command as it is used to clone templates and initialize git repos. Please consult the [README](../../README.md#prerequisites) for installation instructions.\n\n## Functionality\n\nAs outlined in [quick start](#quick-start), the simplest use of the command is to just run `algokit init` and you will then be guided through selecting a template and configuring options for that template. e.g.\n\n```\n$ ~ algokit init\n? Which of these options best describes the project you want to start? `Smart Contract` | `Dapp Frontend` | `Smart Contract & Dapp Frontend` | `Custom`\n? Name of project / directory to create the project in:  my-cool-app\n```\n\nOnce above 2 questions are answered, the `cli` will start instantiating the project and will start asking questions specific to the template you are instantiating. By default official templates such as `python`, `typescript`, `fullstack`, `react`, `python` include a notion of a `preset`. If you want to skip all questions and let the tool preset the answers tailored for a starter project you can pick `Starter`, for a more advanced project that includes unit tests, CI automation and other advanced features, pick `Production`. Lastly, if you prefer to modify the experience and tailor the template to your needs, pick the `Custom` preset.\n\nIf you want to accept the default for each option simply hit [enter] or alternatively to speed things up you can run `algokit init --defaults` and they will be auto-accepted.\n\n### Workspaces vs Standalone Projects\n\nAlgoKit supports two distinct project structures: Workspaces and Standalone Projects. This flexibility allows developers to choose the most suitable approach for their project's needs.\n\nTo initialize a project within a workspace, use the `--workspace` flag. If a workspace does not already exist, AlgoKit will create one for you by default (unless you disable it via `--no-workspace` flag). Once established, new projects can be added to this workspace, allowing for centralized management.\n\nTo create a standalone project, use the `--no-workspace` flag during initialization. This instructs AlgoKit to bypass the workspace structure and set up the project as an isolated entity.\n\nFor more details on workspaces and standalone projects, refer to the [AlgoKit Project documentation](./project.md#workspaces-vs-standalone-projects).\n\n## Bootstrapping\n\nYou will also be prompted if you wish to run the [bootstrap](../cli/index.md#bootstrap) command, this is useful if you plan to immediately begin developing in the new project. If you passed in `--defaults` or `--bootstrap` then it will automatically run bootstrapping unless you passed in `--no-bootstrap`.\n\n```\n\n? Do you want to run `algokit bootstrap` to bootstrap dependencies for this new project so it can be run immediately? Yes\nInstalling Python dependencies and setting up Python virtual environment via Poetry\npoetry: Creating virtualenv my-smart-contract in /Users/algokit/algokit-init/my-smart-contract/.venv\npoetry: Updating dependencies\npoetry: Resolving dependencies...\npoetry:\npoetry: Writing lock file\npoetry:\npoetry: Package operations: 53 installs, 0 updates, 0 removals\npoetry:\npoetry: • Installing pycparser (2.21)\n\n---- other output omitted for brevity ----\n\npoetry: • Installing ruff (0.0.171)\nCopying /Users/algokit/algokit-init/my-smart-contract/smart_contracts/.env.template to /Users/algokit/algokit-init/my-smart-contract/smart_contracts/.env and prompting for empty values\n? Would you like to initialise a git repository and perform an initial commit? Yes\n🎉 Performed initial git commit successfully! 🎉\n🙌 Project initialized at `my-smart-contract`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️ https://github.com/algorandfoundation/algokit-python-template\nAs a suggestion, if you wanted to open the project in VS Code you could execute:\n\n> cd my-smart-contract && code .\n\n```\n\nAfter bootstrapping you are also given the opportunity to initialize a git repo, upon successful completion of the init command the project is ready to be used. If you pass in `--git` it will automatically initialise the git repository and if you pass in `--no-git` it won't.\n\n> Please note, when using `--no-workspaces`, algokit init will assume a max lookup depth of 1 for a fresh template based project. Otherwise it will assume a max depth of 2, since default algokit workspace structure is at most 2 levels deep.\n\n## Options\n\nThere are a number of options that can be used to provide answers to the template prompts. Some of the options requiring further explanation are detailed below, but consult the CLI reference for all available [options](../cli/index.md#init).\n\n## Community Templates\n\nAs well as the official Algorand templates shown when running the init command, community templates can also be provided by providing a URL via the prompt or the `--template-url` option.\n\ne.g. `algokit init --template-url https://github.com/algorandfoundation/algokit-python-template` (that being the url of the official python template, the same as `algokit init -t python`).\n\nThe `--template-url` option can be combined with `--template-url-ref` to specify a specific commit, branch or tag\n\ne.g. `algokit init --template-url https://github.com/algorandfoundation/algokit-python-template --template-url-ref 0232bb68a2f5628e910ee52f62bf13ded93fe672`\n\nIf the URL is not an official template there is a potential security risk and so to continue you must either acknowledge this prompt, or if you are in a non-interactive environment you can pass the `--UNSAFE-SECURITY-accept-template-url` option (but we generally don't recommend this option so users can review the warning message first) e.g.\n\n```\n\nCommunity templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of \\_tasks, \\_migrations and \\_jinja_extensions in copier.yml\n? Continue anyway? Yes\n\n```\n\nIf you want to create a community template, you can use the [AlgoKit guidelines on template building](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/tutorials/algokit-template.md#creating-algokit-templates) and [Copier documentation](https://copier.readthedocs.io/en/stable/) as a starting point.\n\n## Template Answers\n\nAnswers to specific template prompts can be provided with the `--answer {key} {value}` option, which can be used multiple times for each prompt. Quotes can be used for values with spaces e.g. `--answer author_name \"Algorand Foundation\"`.\n\nTo find out the key for a specific answer you can either look at `.algokit/.copier-answers.yml` in the root folder of a project created via `algokit init` or in the `copier.yaml` file of a template repo e.g. for the [python template](https://github.com/algorandfoundation/algokit-python-template/blob/main/copier.yaml).\n\n## Non-interactive project initialization\n\nBy combining a number of options, it is possible to initialize a new project without any interaction. For example, to create a project named `my-smart-contract` using the `python` template with no git, no bootstrapping, the author name of `Algorand Foundation`, and defaults for all other values, you could execute the following:\n\n```\n\n$ ~ algokit init -n my-smart-contract -t python --no-git --no-bootstrap --answer author_name \"Algorand Foundation\" --defaults\n🙌 Project initialized at `my-smart-contract`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️ https://github.com/algorandfoundation/algokit-python-template\nAs a suggestion, if you wanted to open the project in VS Code you could execute:\n\n> cd my-smart-contract && code .\n\n```\n\n## Initializing Examples\n\nAlgoKit provides a collection of pre-built example projects that you can use to quickly start development. These examples demonstrate various use cases and best practices for Algorand development.\n\n### Using the Example Command\n\nYou can initialize a new project from an example using the `algokit init example` command:\n\n```bash\n# List and select from available examples interactively\nalgokit init example\n\n#List the available examples\nalgokit init example -l/--list\n\n# Initialize a specific example directly\nalgokit init example <example_id>\n```\n\nWhen run without an example ID, the command launches an interactive selector that displays available examples with their descriptions and categories. Examples are copied to a new directory in your current location, named after the example ID.\n\n### Available Examples\n\nExamples are organized in the AlgoKit templates repository and cover various use cases including:\n\n- Smart contract examples\n- dApp frontend examples\n- Full-stack applications\n- Integration samples\n\nEach example comes with all necessary files and configurations to get started immediately. After initialization, you can navigate to the example directory and begin development.\n\n### Exploring Example Code\n\nTo explore what examples are available before initializing, you can run the interactive selector and browse through the options. Examples include a name and type to help you select the most appropriate one for your needs.\n\nAfter initializing an example, AlgoKit automatically attempts to open the project in your default IDE to help you quickly start exploring and modifying the code.\n\nFor more details about the `AlgoKit init` command, please refer to the [AlgoKit CLI reference documentation](../cli/index.md#init).\n"
  },
  {
    "path": "docs/features/localnet.md",
    "content": "# AlgoKit LocalNet\n\nThe AlgoKit LocalNet feature allows you to manage (start, stop, reset, manage) a locally sandboxed private Algorand network. This allows you to interact and deploy changes against your own Algorand network without needing to worry about funding TestNet accounts, information you submit being publicly visible or being connected to an active Internet connection (once the network has been started).\n\nAlgoKit LocalNet uses Docker images that are optimised for a great dev experience. This means the Docker images are small and start fast. It also means that features suited to developers are enabled such as KMD (so you can programmatically get faucet private keys).\n\nThe philosophy we take with AlgoKit LocalNet is that you should treat it as an ephemeral network. This means assume it could be reset at any time - don't store data on there that you can't recover / recreate. We have optimised the AlgoKit LocalNet experience to minimise situations where the network will get reset to improve the experience, but it can and will still happen in a number of situations.\n\n> For details on executing `algokit localnet` without `docker` or `podman` refer to the [codespaces](#github-codespaces-based-localnet) section.\n\n## Prerequisites\n\nAlgoKit LocalNet relies on Docker and Docker Compose being present and running on your system. Alternatively, you can use Podman as a replacement for Docker see [Podman support](#podman-support).\n\nYou can install Docker by following the [official installation instructions](https://docs.docker.com/get-docker/). Most of the time this will also install Docker Compose, but if not you can [follow the instructions](https://docs.docker.com/compose/install/) for that too.\n\nIf you are on Windows then you will need WSL 2 installed first, for which you can find the [official installation instructions](https://learn.microsoft.com/en-us/windows/wsl/install). If you are using Windows 10 then ensure you are on the latest version to reduce likelihood of installation problems.\n\nAlternatively, the Windows 10/11 Pro+ supported [Hyper-V backend](https://docs.docker.com/desktop/install/windows-install/) for Docker can be used instead of the WSL 2 backend.\n\n### Podman support\n\nIf you prefer to use [Podman](https://podman.io/) as your container engine, make sure to install and configure Podman first. Then you can set the default container engine that AlgoKit will use, by running: `algokit config container-engine podman`. See [Container-based LocalNet](#container-based-localnet) for more details.\n\n## Known issues\n\nThe AlgoKit LocalNet is built with 30,000 participation keys generated and after 30,000 rounds is reached it will no longer be able to add rounds. At this point you can simply reset the LocalNet to continue development. Participation keys are slow to generate hence why they are pre-generated to improve experience.\n\n## Supported operating environments\n\nWe rely on the official Algorand docker images for Indexer, Conduit and Algod, which means that AlgoKit LocalNet is supported on Windows, Linux and Mac on Intel and AMD chipsets (including Apple Silicon).\n\n## Container-based LocalNet\n\nAlgoKit cli supports both [Docker](https://www.docker.com/) and [Podman](https://podman.io/) as container engines. While `docker` is used by default, executing the below:\n\n```\nalgokit config container-engine\n# or\nalgokit config container-engine podman|docker\n```\n\nWill set the default container engine to use when executing `localnet` related commands via `subprocess`.\n\n### Creating / Starting the LocalNet\n\nTo create / start your AlgoKit LocalNet instance you can run `algokit localnet start`. This will:\n\n- Detect if you have Docker and Docker Compose installed\n- Detect if you have the Docker engine running\n- Create a new Docker Compose deployment for AlgoKit LocalNet if it doesn't already exist\n- (Re-)Start the containers\n\nYou can also specify additional options:\n\n- `--name`: Specify a name for a custom LocalNet instance. This allows you to have multiple LocalNet configurations. Refer to [Named LocalNet Configuration Directory](#named-localnet-configuration-directory) for more details.\n- `--config-dir`: Specify a custom configuration directory for the LocalNet.\n- `--dev/--no-dev`: Control whether to launch 'algod' in developer mode or not. Defaults to 'yes' (developer mode enabled).\n\nIf it's the first time running it on your machine then it will download the following images from DockerHub:\n\n- [`algorand/algod`](https://hub.docker.com/r/algorand/algod) (~500 MB)\n- [`algorand/indexer`](https://hub.docker.com/r/algorand/indexer) (~96 MB)\n- [`algorand/conduit`](https://hub.docker.com/r/algorand/conduit) (~98 MB)\n- [`postgres:13-alpine`](https://hub.docker.com/_/postgres) (~80 MB)\n\nOnce they have downloaded, it won't try and re-download images unless you perform a `algokit localnet reset`.\n\nOnce the LocalNet has started, the following endpoints will be available:\n\n- [algod](https://dev.algorand.co/reference/rest-apis/algod/):\n  - address: <http://localhost:4001>\n  - token: `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`\n- [kmd](https://dev.algorand.co/reference/rest-apis/kmd/):\n  - address: <http://localhost:4002>\n  - token: `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`\n- [indexer](https://dev.algorand.co/reference/rest-apis/indexer/):\n  - address: <http://localhost:8980>\n- tealdbg port:\n  - address: <http://localhost:9392>\n\n### Creating / Starting a Named LocalNet\n\nAlgoKit manages the default LocalNet environment and automatically keeps the configuration updated with any upstream changes. As a result, configuration changes are reset automatically by AlgoKit, so that developers always have access to a known good LocalNet configuration. This works well for the majority of scenarios, however sometimes developers need the control to make specific configuration changes for specific scenarios.\n\nWhen you want more control, named LocalNet instances can be used by running `algokit localnet start --name {name}`. This command will set up and run a named LocalNet environment (based off the default), however AlgoKit will not update the environment or configuration automatically. From here developers are able to modify their named environment in any way they like, for example setting `DevMode: false` in `algod_network_template.json`.\n\nOnce you have a named LocalNet running, the AlgoKit LocalNet commands will target this instance.\nIf at any point you'd like to switch back to the default LocalNet, simply run `algokit localnet start`.\n\n### Specifying a custom LocalNet configuration directory\n\nYou can specify a custom LocalNet configuration directory by using the `--config-dir` option or by setting the `ALGOKIT_LOCALNET_CONFIG_DIR` environment variable. This allows you to have multiple LocalNet instances with different configurations in different directories, which is useful in 'CI/CD' scenarios where you can save your custom localnet in your version control and then run `algokit localnet start --config-dir /path/to/custom/config` to use it within your pipeline.\n\nFor example, to create a LocalNet instance with a custom configuration directory, you can run:\n\n```\nalgokit localnet start --config-dir /path/to/custom/config\n```\n\n### Named LocalNet Configuration Directory\n\nWhen running `algokit localnet start --name {name}`, AlgoKit stores configuration files in a specific directory on your system. The location of this directory depends on your operating system:\n\n- **Windows**: We use the value of the `APPDATA` environment variable to determine the directory to store the configuration files. This is usually `C:\\Users\\USERNAME\\AppData\\Roaming`.\n- **Linux or Mac**: We use the value of the `XDG_CONFIG_HOME` environment variable to determine the directory to store the configuration files. If `XDG_CONFIG_HOME` is not set, the default location is `~/.config`.\n\nAssuming you have previously used a default LocalNet, the path `./algokit/sandbox/` will exist inside the configuration directory, containing the configuration settings for the default LocalNet instance. Additionally, for each named LocalNet instance you have created, the path `./algokit/sandbox_{name}/` will exist, containing the configuration settings for the respective named LocalNet instances.\n\nIt is important to note that only the configuration files for a named LocalNet instance should be changed. Any changes made to the default LocalNet instance will be reverted by AlgoKit.\n\nYou can use `--name` flag along with `--config-dir` option to specify a custom path for the LocalNet configuration directory. This allows you to manage multiple LocalNet instances with different configurations in different directories on your system.\n\n### Controlling Algod Developer Mode\n\nBy default, AlgoKit LocalNet starts algod in developer mode. This mode enables certain features that are useful for development but may not reflect the behavior of a production network. You can control this setting using the `--dev/--no-dev` flag when starting the LocalNet:\n\n```bash\nalgokit localnet start --no-dev  # Starts algod without developer mode\nalgokit localnet start --dev     # Starts algod with developer mode (default)\n```\n\nIf you change this setting for an existing LocalNet instance, AlgoKit will prompt you to restart the LocalNet to apply the changes.\n\n### Stopping and Resetting the LocalNet\n\nTo stop the LocalNet you can execute `algokit localnet stop`. This will turn off the containers, but keep them ready to be started again in the same state by executing `algokit localnet start`.\n\nTo reset the LocalNet you can execute `algokit localnet reset`, which will tear down the existing containers, refresh the container definition from the latest stored within AlgoKit and update to the latest Docker images. If you want to keep the same container spec and versions as you currently have, but quickly tear down and start a new instance then run `algokit localnet reset --no-update`.\n\n### Viewing transactions in the LocalNet\n\nYou can see a web-based user interface of the current state of your LocalNet including all transactions by using the [AlgoKit Explore](./explore.md) feature, e.g. by executing `algokit localnet explore`.\n\n### Executing goal commands against AlgoKit LocalNet\n\nSee the [AlgoKit Goal](./goal.md) feature. You can also execute `algokit localnet console` to open a [Bash shell which allows you to run the goal commandline](./goal.md#running-multiple-commands).\n\nNote: if you want to copy files into the container so you can access them via goal then you can use the following:\n\n```\ndocker cp foo.txt algokit_algod:/root\n```\n\n### Getting access to the private key of the faucet account\n\nIf you want to use the LocalNet then you need to get the private key of the initial wallet so you can transfer ALGOs out of it to other accounts you create.\n\nThere are two ways to do this:\n\n**Option 1: Manually via goal**\n\n```\nalgokit goal account list\nalgokit goal account export -a {address_from_an_online_account_from_above_command_output}\n```\n\n**Option 2: Automatically via kmd API**\n\nNeeding to do this manual step every time you spin up a new development environment or reset your LocalNet is frustrating. Instead, it's useful to have code that uses the Sandbox APIs to automatically retrieve the private key of the default account.\n\nAlgoKit Utils provides methods to help you do this:\n\n- TypeScript - [`ensureFunded`](https://github.com/algorandfoundation/algokit-utils-ts/blob/main/docs/capabilities/transfer.md#ensurefunded) and [`getDispenserAccount`](https://github.com/algorandfoundation/algokit-utils-ts/blob/main/docs/capabilities/transfer.md#dispenser)\n- Python - [`ensure_funded`](https://algorandfoundation.github.io/algokit-utils-py/html/apidocs/algokit_utils/algokit_utils.html#algokit_utils.ensure_funded) and [`get_dispenser_account`](https://algorandfoundation.github.io/algokit-utils-py/html/apidocs/algokit_utils/algokit_utils.html#algokit_utils.get_dispenser_account)\n\nFor more details about the `AlgoKit localnet` command, please refer to the [AlgoKit CLI reference documentation](../cli/index.md#localnet).\n\n## GitHub Codespaces-based LocalNet\n\nThe AlgoKit LocalNet feature also supports running the LocalNet in a GitHub Codespace with port forwarding by utilizing the [GitHub CLI](https://github.com/cli/gh). This allows you to run the LocalNet without the need to use Docker. This is especially useful for scenarios where certain hardware or software limitations may prevent you from being able to run Docker.\n\nTo run the LocalNet in a GitHub Codespace, you can use the `algokit localnet codespace` command.\nBy default without `--force` flag it will prompt you to delete stale codespaces created earlier (if any). Upon termination it will also prompt to delete the codespace that was used prior to termination.\n\nRunning an interactive session ensures that you have control over the lifecycle of your Codespace, preventing unnecessary usage and potential costs. GitHub Codespaces offers a free tier with certain limits, which you can review in the [GitHub Codespaces documentation](https://docs.github.com/en/codespaces/overview#pricing).\n\n### Options\n\n- `-m`, `--machine`: Specifies the GitHub Codespace machine type to use. Defaults to `basicLinux32gb`. Available options are `basicLinux32gb`, `standardLinux32gb`, `premiumLinux`, and `largePremiumLinux`. Refer to [GitHub Codespaces documentation](https://docs.github.com/en/codespaces/overview/machine-types) for more details.\n- `-a`, `--algod-port`: Sets the port for the Algorand daemon. Defaults to `4001`.\n- `-i`, `--indexer-port`: Sets the port for the Algorand indexer. Defaults to `8980`.\n- `-k`, `--kmd-port`: Sets the port for the Algorand kmd. Defaults to `4002`.\n- `-n`, `--codespace-name`: Specifies the name of the codespace. Defaults to a random name with a timestamp.\n- `-t`, `--timeout`: Max duration for running the port forwarding process. Defaults to 1 hour. This timeout ensures the codespace **will automatically shut down** after the specified duration to prevent accidental overspending of free quota on GitHub Codespaces. [More details](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces).\n- `-r`, `--repo-url`: The URL of the repository to use. Defaults to the AlgoKit base template repository (`algorandfoundation/algokit-base-template`). The reason why algokit-base-template is used by default is due to [.devcontainer.json](https://github.com/algorandfoundation/algokit-base-template/blob/main/template_content/.devcontainer.json) which defines the scripts that take care of setting up AlgoKit CLI during container start. You can use any custom repo as a base, however it's important to ensure the reference [.devcontainer.json](https://github.com/algorandfoundation/algokit-base-template/blob/main/template_content/.devcontainer.json) file exists in your repository **otherwise there will be no ports to forward from the codespace**.\n- `--force`, `-f`: Force deletes stale codespaces and skips confirmation prompts. Defaults to explicitly prompting for confirmation.\n\nFor more details about managing LocalNet in GitHub Codespaces, please refer to the [AlgoKit CLI reference documentation](../cli/index.md#codespace).\n\n> Tip: By specifying alternative port values it is possible to have several LocalNet instances running where one is using default ports via `algokit localnet start` with Docker | Podman and the other relies on port forwarding via `algokit localnet codespace`.\n"
  },
  {
    "path": "docs/features/project/bootstrap.md",
    "content": "# AlgoKit Project Bootstrap\n\nThe AlgoKit Project Bootstrap feature allows you to bootstrap different project dependencies by looking up specific files in your current directory and immediate sub directories by convention.\n\nThis is useful to allow for expedited initial setup for each developer e.g. when they clone a repository for the first time. It's also useful to provide a quick getting started experience when initialising a new project via [AlgoKit Init](./init.md) and meeting our goal of \"nothing to debugging code in 5 minutes\".\n\nIt can bootstrap one or all of the following (with other options potentially being added in the future):\n\n- Python projects - Supports Poetry and uv package managers. Installs the configured package manager if not present and runs the appropriate install command.\n- JavaScript/Node.js projects - Supports npm and pnpm package managers. Runs the appropriate install command for the configured package manager.\n- dotenv (.env) file - Checks for `.env.template` files, copies them to `.env` (which should be in `.gitignore` so developers can safely make local specific changes) and prompts for any blank values (so the developer has an easy chance to fill in their initial values where there isn't a clear default).\n\n> **Note**: Invoking bootstrap from `algokit bootstrap` is not recommended. Please prefer using `algokit project bootstrap` instead.\n\nYou can configure which package managers are used by default via:\n\n- `algokit config py-package-manager` - Configure Python package manager (poetry or uv)\n- `algokit config js-package-manager` - Configure JavaScript package manager (npm or pnpm)\n\nFor more details, see the [configuration documentation](../config.md).\n\n## Package Manager Override\n\nYou can override the default package manager settings on a per-project basis by adding configuration to your project's `.algokit.toml` file:\n\n```toml\n[package_manager]\npython = \"uv\"        # Override Python package manager (poetry or uv)\njavascript = \"pnpm\"  # Override JavaScript package manager (npm or pnpm)\n```\n\nThis project-specific configuration takes precedence over your global settings, allowing different projects to use different package managers as needed.\n\n### Configuration Precedence\n\nThe bootstrap command follows this precedence order when determining which package manager to use:\n\n1. **Project override** - Configuration in `.algokit.toml` (highest priority)\n2. **User preference** - Global configuration set via `algokit config` (respects your explicit choice)\n3. **Smart defaults** - Based on project structure (e.g., `poetry.toml` → Poetry, `pnpm-lock.yaml` → PNPM)\n4. **Interactive prompt** - Asked on first use if no preference is set\n\nThis means if you set a global preference (e.g., `algokit config py-package-manager uv`), it will be used across all projects unless explicitly overridden at the project level. Smart defaults only apply when you haven't set a preference yet.\n\n## Package Manager Command Translation\n\nDuring the bootstrap process, AlgoKit automatically translates package manager commands in your project's `.algokit.toml` file to match your configured package manager preferences. This ensures that project run commands work correctly regardless of which package manager the template was originally created with.\n\n### How It Works\n\nWhen you run `algokit project bootstrap`, if your project contains run commands in `.algokit.toml`, they will be automatically updated:\n\n- **JavaScript**: `npm` ↔ `pnpm` - Only semantically equivalent commands are translated\n- **Python**: `poetry` ↔ `uv` - Only semantically equivalent commands are translated\n\n### JavaScript Translation (npm ↔ pnpm)\n\n**Commands that translate:**\n\n- `npm install` → `pnpm install`\n- `npm run <script>` → `pnpm run <script>`\n- `npm test` → `pnpm test`\n- `npm start` → `pnpm start`\n- `npm build` → `pnpm build`\n\n**Commands that DON'T translate** (will show a warning):\n\n- `npm exec` / `npx` ↔ `pnpm exec` / `pnpm dlx` - Different behavior:\n  - `npx` searches locally in `node_modules/.bin`, then in global installs, then downloads remotely if not found\n  - `pnpm exec` only searches locally in project dependencies (fails if not found locally)\n  - `pnpm dlx` always fetches from remote registry (never checks local dependencies)\n- `npm fund` - No pnpm equivalent (pnpm does not provide funding information display)\n- `npm audit` ↔ `pnpm audit` - May report different vulnerabilities due to differences in auditing algorithms and vulnerability databases (command translates with warning)\n\n### Python Translation (poetry ↔ uv)\n\nOnly commands with equivalent semantics are translated:\n\n**Commands that translate:**\n\n- `poetry install` → `uv sync` (special case: different command name)\n- `poetry run` → `uv run`\n- `poetry add` → `uv add`\n- `poetry remove` → `uv remove`\n- `poetry lock` → `uv lock`\n- `poetry init` → `uv init`\n\n**Commands that DON'T translate** (will show a warning):\n\n- `poetry show`, `poetry config`, `poetry export`, `poetry search`, `poetry check`, `poetry publish` - No uv equivalent\n- `uv pip`, `uv venv`, `uv tool`, `uv python` - No poetry equivalent\n\nWhen AlgoKit encounters a command that cannot be translated, it will:\n\n1. Leave the command unchanged in `.algokit.toml`\n2. Display a warning message explaining that the command has no equivalent\n3. The command may not work when executed with `algokit project run`\n\n### Example\n\nGiven a `.algokit.toml` with:\n\n```toml\n[project.run]\nbuild = { commands = [\"npm run build\"] }\ncreate = { commands = [\"npx create-next-app\"] }  # Different behavior in pnpm\ntest = { commands = [\"poetry run pytest\"] }\ndeps = { commands = [\"poetry show --tree\"] }  # No uv equivalent\n```\n\nIf you've configured:\n\n- JavaScript package manager: `pnpm`\n- Python package manager: `uv`\n\nAfter bootstrap:\n\n```toml\n[project.run]\nbuild = { commands = [\"pnpm run build\"] }        # ✅ Translated\ncreate = { commands = [\"npx create-next-app\"] }   # ⚠️ Not translated (warning shown)\ntest = { commands = [\"uv run pytest\"] }          # ✅ Translated\ndeps = { commands = [\"poetry show --tree\"] }     # ⚠️ Not translated (warning shown)\n```\n\nYou'll see warnings:\n\n```\n⚠️ Command 'npx create-next-app' behaves differently in pnpm. Consider using 'pnpm exec' for local binaries or 'pnpm dlx' for remote packages. The command will remain unchanged.\n⚠️ Command 'poetry show --tree' has no direct equivalent in uv. The command will remain unchanged and may not work as expected.\n```\n\nThis approach ensures your project commands work correctly while being transparent about limitations.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```\n$ ~ algokit project bootstrap\nUsage: algokit project bootstrap [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  all     Bootstrap all aspects of the current directory and immediate sub directories by convention.\n  env     Bootstrap .env file in the current working directory.\n  npm     Bootstrap Node.js project in the current working directory.\n  poetry  Bootstrap Python Poetry and install in the current working directory.\n```\n\n## Functionality\n\n### Bootstrap .env file\n\nThe command `algokit project bootstrap env` runs two main tasks in the current directory:\n\n- Searching for `.env.template` file in the current directory and use it as template to create a new `.env` file in the same directory.\n- Prompting the user to enter a value for any empty token values in the `env.` including printing the comments above that empty token\n\nFor instance, a sample `.env.template` file as follows:\n\n```\nSERVER_URL=https://myserver.com\n# This is a mandatory field to run the server, please enter a value\n# For example: 5000\nSERVER_PORT=\n```\n\nRunning the `algokit project bootstrap env` command while the above `.env.template` file in the current directory will result in the following:\n\n```\n$ ~ algokit project bootstrap env\nCopying /Users/me/my-project/.env.template to /Users/me/my-project/.env and prompting for empty values\n# This is a mandatory field to run the server, please enter a value value\n# For example: 5000\n\n? Please provide a value for SERVER_PORT:\n```\n\nAnd when the user enters a value for `SERVER_PORT`, a new `.env` file will be created as follows (e.g. if they entered `4000` as the value):\n\n```\nSERVER_URL=https://myserver.com\n# This is a mandatory field to run the server, please enter a value\n# For example: 5000\nSERVER_PORT=4000\n```\n\n### Bootstrap Node.js project\n\nThe command `algokit project bootstrap npm` installs Node.js project dependencies if there is a `package.json` file in the current directory by running `npm install` command to install all node modules specified in that file. However, when running in CI mode **with** present `package-lock.json` file (either by setting the `CI` environment variable or using the `--ci` flag), it will run `npm ci` instead, which provides a cleaner and more deterministic installation. If `package-lock.json` is missing, it will show a clear error message and resolution instructions. If you don't have `npm` available it will show a clear error message and resolution instructions.\n\nHere is an example outcome of running `algokit project bootstrap npm` command:\n\n```\n$ ~ algokit project bootstrap npm\nInstalling npm dependencies\nnpm:\nnpm: added 17 packages, and audited 18 packages in 3s\nnpm:\nnpm: 2 packages are looking for funding\nnpm: run `npm fund` for details\nnpm:\nnpm: found 0 vulnerabilities\n```\n\n### Bootstrap Python poetry project\n\nThe command `algokit project bootstrap poetry` does two main actions:\n\n- Checking for Poetry version by running `poetry --version` and upgrades it if required\n- Installing Python dependencies and setting up Python virtual environment via Poetry in the current directory by running `poetry install`.\n\nHere is an example of running `algokit project bootstrap poetry` command:\n\n```\n$ ~ algokit project bootstrap poetry\nInstalling Python dependencies and setting up Python virtual environment via Poetry\npoetry:\npoetry: Installing dependencies from lock file\npoetry:\npoetry: Package operations: 1 installs, 1 update, 0 removals\npoetry:\npoetry: • Installing pytz (2022.7)\npoetry: • Updating copier (7.0.1 -> 7.1.0a0)\npoetry:\npoetry: Installing the current project: algokit (0.1.0)\n```\n\n### Bootstrap all\n\nExecute `algokit project bootstrap all` to initiate `algokit project bootstrap env`, `algokit project bootstrap npm`, and `algokit project bootstrap poetry` commands within the current directory and all its immediate sub-directories. This comprehensive command is automatically triggered following the initialization of a new project through the [AlgoKit Init](./init.md) command.\n\n#### Filtering Options\n\nThe `algokit project bootstrap all` command includes flags for more granular control over the bootstrapping process within [AlgoKit workspaces](../init.md#workspaces):\n\n- `--project-name`: This flag allows you to specify one or more project names to bootstrap. Only projects matching the provided names will be bootstrapped. This is particularly useful in monorepos or when working with multiple projects in the same directory structure.\n\n- `--type`: Use this flag to limit the bootstrapping process to projects of a specific type (e.g., `frontend`, `backend`, `contract`). This option streamlines the setup process by focusing on relevant project types, reducing the overall bootstrapping time.\n\nThese new flags enhance the flexibility and efficiency of the bootstrapping process, enabling developers to tailor the setup according to project-specific needs.\n\n## Further Reading\n\nTo learn more about the `algokit project bootstrap` command, please refer to [bootstrap](../../cli/index.md#bootstrap) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/project/deploy.md",
    "content": "# AlgoKit Project Deploy\n\nDeploy your smart contracts effortlessly to various networks with the algokit project deploy feature. This feature is essential for automation in CI/CD pipelines and for seamless deployment to various Algorand network environments.\n\n> **Note**: Invoking deploy from `algokit deploy` is not recommended. Please prefer using `algokit project deploy` instead.\n\n## Usage\n\n```sh\n$ algokit project deploy [OPTIONS] [ENVIRONMENT_NAME] [EXTRA_ARGS]\n```\n\nThis command deploys smart contracts from an AlgoKit compliant repository to the specified network.\n\n### Options\n\n- `--command, -C TEXT`: Specifies a custom deploy command. If this option is not provided, the deploy command will be loaded from the `.algokit.toml` file.\n- `--interactive / --non-interactive, --ci`: Enables or disables the interactive prompt for mnemonics. When the CI environment variable is set, it defaults to non-interactive.\n- `--path, -P DIRECTORY`: Specifies the project directory. If not provided, the current working directory will be used.\n- `--deployer`: Specifies the deployer alias. If not provided and if the deployer is specified in `.algokit.toml` file its mnemonic will be prompted.\n- `--dispenser`: Specifies the dispenser alias. If not provided and if the dispenser is specified in `.algokit.toml` file its mnemonic will be prompted.\n- `-p, --project-name`: (Optional) Projects to execute the command on. Defaults to all projects found in\n  the current directory. Option is mutually exclusive with `--command`.\n- `-h, --help`: Show this message and exit.\n- `[EXTRA_ARGS]...`: Additional arguments to pass to the deploy command. For instance, `algokit project deploy -- {custom args}`. This will ensure that the extra arguments are passed to the deploy command specified in the `.algokit.toml` file or directly via `--command` option.\n\n## Environment files\n\nAlgoKit `deploy` employs both a general and network-specific environment file strategy. This allows you to set environment variables that are applicable across all networks and others that are specific to a given network.\n\nThe general environment file (`.env`) should be placed at the root of your project. This file will be used to load environment variables that are common across deployments to all networks.\n\nFor each network you're deploying to, you can optionally have a corresponding `.env.[network_name]` file. This file should contain environment variables specific to that network. Network-specific environment variables take precedence over general environment variables.\n\nThe directory layout would look like this:\n\n```md\n.\n├── ... (your project files and directories)\n├── .algokit.toml # Configuration file for AlgoKit\n├── .env # (OPTIONAL) General environment variables common across all deployments\n└── .env.[{mainnet|testnet|localnet|betanet|custom}] # (OPTIONAL) Environment variables specific to deployments to a network\n```\n\n> ⚠️ Please note that creating `.env` and `.env.[network_name]` files is only necessary if you're deploying to a custom network or if you want to override the default network configurations provided by AlgoKit. AlgoKit comes with predefined configurations for popular networks like `TestNet`, `MainNet`, `BetaNet`, or AlgoKit's `LocalNet`.\n\nThe logic for loading environment variables is as follows:\n\n- If a `.env` file exists, the environment variables contained in it are loaded first.\n- If a `.env.[network_name]` file exists, the environment variables in it are loaded, overriding any previously loaded values from the `.env` file for the same variables.\n\n### Default Network Configurations\n\nThe `deploy` command assumes default configurations for `mainnet`, `localnet`, and `testnet` environments. If you're deploying to one of these networks and haven't provided specific environment variables, AlgoKit will use these default values:\n\n- **Localnet**:\n\n  - `ALGOD_TOKEN`: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n  - `ALGOD_SERVER`: \"http://localhost\"\n  - `ALGOD_PORT`: \"4001\"\n  - `INDEXER_TOKEN`: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n  - `INDEXER_SERVER`: \"http://localhost\"\n  - `INDEXER_PORT`: \"8980\"\n\n- **Mainnet**:\n\n  - `ALGOD_SERVER`: \"https://mainnet-api.algonode.cloud\"\n  - `INDEXER_SERVER`: \"https://mainnet-idx.algonode.cloud\"\n\n- **Testnet**:\n  - `ALGOD_SERVER`: \"https://testnet-api.algonode.cloud\"\n  - `INDEXER_SERVER`: \"https://testnet-idx.algonode.cloud\"\n\nThese default values are used when no specific `.env.[network_name]` file is present and the corresponding environment variables are not set. This feature simplifies the deployment process for these common networks, reducing the need for manual configuration in many cases.\n\nIf you need to override these defaults or add additional configuration for these networks, you can still do so by creating the appropriate `.env.[network_name]` file or setting the environment variables explicitly or via generic `.env` file.\n\n## AlgoKit Configuration File\n\nAlgoKit uses a configuration file called `.algokit.toml` in the root of your project. The configuration file can be created using the `algokit init` command. This file will define the deployment commands for the various network environments that you want to target.\n\nHere's an example of what the `.algokit.toml` file might look like. When deploying it will prompt for the `DEPLOYER_MNEMONIC` secret unless it is already defined as an environment variable or is deploying to localnet.\n\n```toml\n[algokit]\nmin_version = \"v{latest_version}\"\n\n[project]\n\n... # project configuration and custom commands\n\n[project.deploy]\ncommand = \"poetry run python -m smart_contracts deploy\"\nenvironment_secrets = [\n  \"DEPLOYER_MNEMONIC\",\n]\n\n[project.deploy.localnet]\nenvironment_secrets = []\n```\n\nThe `command` key under each `[project.deploy.{network_name}]` section should contain a string that represents the deployment command for that particular network. If a `command` key is not provided in a network-specific section, the command from the general `[project.deploy]` section will be used.\n\nThe `environment_secrets` key should contain a list of names of environment variables that should be treated as secrets. This can be defined in the general `[project.deploy]` section, as well as in the network-specific sections. The environment-specific secrets will be added to the general secrets during deployment.\n\nThe `[algokit]` section with the `min_version` key allows you to specify the minimum version of AlgoKit that the project requires.\n\nThis way, you can define common deployment logic and environment secrets in the `[project.deploy]` section, and provide overrides or additions for specific environments in the `[project.deploy.{environment_name}]` sections.\n\n## Deploying to a Specific Network\n\nThe command requires a `ENVIRONMENT` argument, which specifies the network environment to which the smart contracts will be deployed. Please note, the `environment` argument is case-sensitive.\n\nExample:\n\n```sh\n$ algokit project deploy testnet\n```\n\nThis command deploys the smart contracts to the testnet.\n\n## Deploying to a Specific Network from a workspace with project name filter\n\nThe command requires a `ENVIRONMENT` argument, which specifies the network environment to which the smart contracts will be deployed. Please note, the `environment` argument is case-sensitive.\n\nExample:\n\nRoot `.algokit.toml`:\n\n```toml\n[project]\ntype = \"workspace\"\nprojects_root_dir = 'projects'\n```\n\nContract project `.algokit.toml`:\n\n```toml\n[project]\ntype = \"contract\"\nname = \"myproject\"\n\n[project.deploy]\ncommand = \"{custom_deploy_command}\"\n```\n\n```bash\n$ algokit project deploy testnet --project-name myproject\n```\n\nThis command deploys the smart contracts to TestNet from a sub project named 'myproject', which is available within the current workspace. All `.env` loading logic described in [Environment files](#environment-files) is applicable, execution from the workspace root orchestrates invoking the deploy command from the working directory of each applicable sub project.\n\n## Custom Project Directory\n\nBy default, the deploy command looks for the `.algokit.toml` file in the current working directory. You can specify a custom project directory using the `--project-dir` option.\n\nExample:\n\n```sh\n$ algokit project deploy testnet --project-dir=\"path/to/project\"\n```\n\n## Custom Deploy Command\n\nYou can provide a custom deploy command using the `--custom-deploy-command` option. If this option is not provided, the deploy command will be loaded from the `.algokit.toml` file.\n\nExample:\n\n```sh\n$ algokit project deploy testnet --custom-deploy-command=\"your-custom-command\"\n```\n\n> ⚠️ Please note, chaining multiple commands with `&&` is **not** currently supported. If you need to run multiple commands, you can defer to a custom script. Refer to [run](../project/run.md#custom-command-injection) for scenarios where multiple sub-command invocations are required.\n\n## CI Mode\n\nBy using the `--ci` or `--non-interactive` flag, you can skip the interactive prompt for mnemonics.\n\nThis is useful in CI/CD environments where user interaction is not possible. When using this flag, you need to make sure that the mnemonics are set as environment variables.\n\nExample:\n\n```sh\n$ algokit project deploy testnet --ci\n```\n\n## Passing Extra Arguments\n\nYou can pass additional arguments to the deploy command. These extra arguments will be appended to the end of the deploy command specified in your `.algokit.toml` file or to the command specified directly via `--command` option.\n\nTo pass extra arguments, use `--` after the AlgoKit command and options to mark the distinction between arguments used by the CLI and arguments to be passed as extras to the deploy command/script.\n\nExample:\n\n```sh\n$ algokit project deploy testnet -- my_contract_name --some_contract_related_param\n```\n\nIn this example, `my_contract_name` and `--some_contract_related_param` are extra arguments that can be utilized by the custom deploy command invocation, for instance, to filter the deployment to a specific contract or modify deployment behavior.\n\n## Example of a Full Deployment\n\n```sh\n$ algokit project deploy testnet --custom-deploy-command=\"your-custom-command\"\n```\n\nThis example shows how to deploy smart contracts to the testnet using a custom deploy command. This also assumes that .algokit.toml file is present in the current working directory, and .env.testnet file is present in the current working directory and contains the required environment variables for deploying to TestNet environment.\n\n## Further Reading\n\nFor in-depth details, visit the [deploy](../../cli/index.md#deploy) section in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/project/link.md",
    "content": "# AlgoKit Project Link Command\n\nThe `algokit project link` command is a powerful feature designed to streamline the integration between `frontend` and `contract` typed projects within the AlgoKit ecosystem. This command facilitates the automatic path resolution and invocation of [`algokit generate client`](../generate.md#1-typed-clients) on `contract` projects available in the workspace, making it easier to integrate smart contracts with frontend applications.\n\n## Usage\n\nTo use the `link` command, navigate to the root directory of your standalone frontend project and execute:\n\n```sh\n$ algokit project link [OPTIONS]\n```\n\nThis command must be invoked from the root of a standalone 'frontend' typed project.\n\n## Options\n\n- `--project-name`, `-p`: Specify one or more contract projects for the command. If not provided, the command defaults to all contract projects in the current workspace. This option can be repeated to specify multiple projects.\n\n- `--language`, `-l`: Set the programming language of the generated client code. The default is `typescript`, but you can specify other supported languages as well.\n\n- `--all`, `-a`: Link all contract projects with the frontend project. This option is mutually exclusive with `--project-name`.\n\n- `--fail-fast`, `-f`: Exit immediately if at least one client generation process fails. This is useful for CI/CD pipelines where you want to ensure all clients are correctly generated before proceeding.\n\n- `--version`, `-v`: Allows specifying the version of the client generator to use when generating client code for contract projects. This can be particularly useful for ensuring consistency across different environments or when a specific version of the client generator includes features or fixes that are necessary for your project.\n\n## How It Works\n\nBelow is a visual representation of the `algokit project link` command in action:\n\n```mermaid\ngraph LR\n    F[Frontend Project] -->|algokit generate client| C1[Contract Project 1]\n    F -->|algokit generate client| C2[Contract Project 2]\n    F -->|algokit generate client| CN[Contract Project N]\n\n    C1 -->|algokit generate client| F\n    C2 -->|algokit generate client| F\n    CN -->|algokit generate client| F\n\n    classDef frontend fill:#f9f,stroke:#333,stroke-width:4px;\n    classDef contract fill:#bbf,stroke:#333,stroke-width:2px;\n    class F frontend;\n    class C1,C2,CN contract;\n```\n\n1. **Project Type Verification**: The command first verifies that it is being executed within a standalone frontend project by checking the project's type in the `.algokit.toml` configuration file.\n\n2. **Contract Project Selection**: Based on the provided options, it selects the contract projects to link. This can be all contract projects within the workspace, a subset specified by name, or a single project selected interactively.\n\n3. **Client Code Generation**: For each selected contract project, it generates typed client code using the specified language. The generated code is placed in the frontend project's directory specified for contract clients.\n\n4. **Feedback**: The command provides feedback for each contract project it processes, indicating success or failure in generating the client code.\n\n## Example\n\nLinking all contract projects with a frontend project and generating TypeScript clients:\n\n```sh\n$ algokit project link --all -l typescript\n```\n\nThis command will generate TypeScript clients for all contract projects and place them in the specified directory within the frontend project.\n\n## Further Reading\n\nTo learn more about the `algokit project link` command, please refer to [link](../../cli/index.md#link) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/project/list.md",
    "content": "# AlgoKit Project List Command\n\nThe `algokit project list` command is designed to enumerate all projects within an AlgoKit workspace. This command is particularly useful in workspace environments where multiple projects are managed under a single root directory. It provides a straightforward way to view all the projects that are part of the workspace.\n\n## Usage\n\nTo use the `list` command, execute the following **anywhere** within an AlgoKit workspace:\n\n```sh\n$ algokit project list [OPTIONS] [WORKSPACE_PATH]\n```\n\n- `WORKSPACE_PATH` is an optional argument that specifies the path to the workspace. If not provided, the current directory (`.`) is used as the default workspace path.\n\n## How It Works\n\n1. **Workspace Verification**: Initially, the command checks if the specified directory (or the current directory by default) is an AlgoKit workspace. This is determined by looking for a `.algokit.toml` configuration file and verifying if the `project.type` is set to `workspace`.\n\n2. **Project Enumeration**: If the directory is confirmed as a workspace, the command proceeds to enumerate all projects within the workspace. This is achieved by scanning the workspace's subdirectories for `.algokit.toml` files and extracting project names.\n\n3. **Output**: The names of all discovered projects are printed to the console. If the `-v` or `--verbose` option is used, additional details about each project are displayed.\n\n## Example Output\n\n```sh\nworkspace: {path_to_workspace} 📁\n  - myapp ({path_to_myapp}) 📜\n  - myproject-app ({path_to_myproject_app}) 🖥️\n```\n\n## Error Handling\n\nIf the command is executed in a directory that is not recognized as an AlgoKit workspace, it will issue a warning:\n\n```sh\nWARNING: No AlgoKit workspace found. Check [project.type] definition at .algokit.toml\n```\n\nThis message indicates that either the current directory does not contain a `.algokit.toml` file or the `project.type` within the file is not set to `workspace`.\n\n## Further Reading\n\nTo learn more about the `algokit project list` command, please refer to [list](../../cli/index.md#list) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/project/run.md",
    "content": "# AlgoKit Project Run\n\nThe `algokit project run` command allows defining custom commands to execute at standalone project level or being orchestrated from a workspace containing multiple standalone projects.\n\n## Usage\n\n```sh\n$ algokit project run [OPTIONS] COMMAND [ARGS]\n```\n\nThis command executes a custom command defined in the `.algokit.toml` file of the current project or workspace.\n\n### Options\n\n- `-l, --list`: List all projects associated with the workspace command. (Optional)\n- `-p, --project-name`: Execute the command on specified projects. Defaults to all projects in the current directory. (Optional)\n- `-t, --type`: Limit execution to specific project types if executing from workspace. (Optional)\n- `-s, --sequential`: Execute workspace commands sequentially, for cases where you do not have a preference on the execution order, but want to disable concurrency. (Optional, defaults to concurrent)\n- `[ARGS]...`: Additional arguments to pass to the custom command. These will be appended to the end of the command specified in the `.algokit.toml` file.\n\nTo get detailed help on the above options, execute:\n\n```bash\nalgokit project run {name_of_your_command} --help\n```\n\n### Workspace vs Standalone Projects\n\nAlgoKit supports two main types of project structures: Workspaces and Standalone Projects. This flexibility caters to the diverse needs of developers, whether managing multiple related projects or focusing on a single application.\n\n- **Workspaces**: Ideal for complex applications comprising multiple sub-projects. Workspaces facilitate organized management of these sub-projects under a single root directory, streamlining dependency management and shared configurations.\n\n- **Standalone Projects**: Suited for simpler applications or when working on a single component. This structure offers straightforward project management, with each project residing in its own directory, independent of others.\n\n> Please note, instantiating a workspace inside a workspace (aka 'workspace nesting') is not supported and not recommended. When you want to add a new project into existing workspace make sure to run `algokit init` **from the root of the workspace**\n\n### Custom Command Injection\n\nAlgoKit enhances project automation by allowing the injection of custom commands into the `.algokit.toml` configuration file. This feature enables developers to tailor the project setup to their specific needs, automating tasks such as deploying to different network environments or integrating with CI/CD pipelines.\n\n## How It Works\n\nThe orchestration between workspaces, standalone projects, and custom commands is designed to provide a seamless development experience. Below is a high-level overview of how these components interact within the AlgoKit ecosystem.\n\n```mermaid\ngraph TD;\nA[AlgoKit Project] --> B[\"Workspace (.algokit.toml)\"];\nA --> C[\"Standalone Project (.algokit.toml)\"];\nB --> D[\"Sub-Project 1 (.algokit.toml)\"];\nB --> E[\"Sub-Project 2 (.algokit.toml)\"];\nC --> F[\"Custom Commands defined in .algokit.toml\"];\nD --> F;\nE --> F;\n```\n\n- **AlgoKit Project**: The root command that encompasses all project-related functionalities.\n- **Workspace**: A root folder that is managing multiple related sub-projects.\n- **Standalone Project**: An isolated project structure for simpler applications.\n- **Custom Commands**: Commands defined by the user in the `.algokit.toml` and automatically injected into the `algokit project run` command group.\n\n### Workspace cli options\n\nBelow is only visible and available when running from a workspace root.\n\n- `-l, --list`: List all projects associated with the workspace command. (Optional)\n- `-p, --project-name`: Execute the command on specified projects. Defaults to all projects in the current directory. (Optional)\n- `-t, --type`: Limit execution to specific project types if executing from workspace. (Optional)\n  To get a detailed help on the above commands execute:\n\n```bash\nalgokit project run {name_of_your_command} --help\n```\n\n## Examples\n\nAssume you have a default workspace with the following structure:\n\n```bash\nmy_workspace\n├── .algokit.toml\n├── projects\n│   ├── project1\n│   │   └── .algokit.toml\n│   └── project2\n│       └── .algokit.toml\n```\n\nThe workspace configuration file is defined as follows:\n\n```toml\n# ... other non [project.run] related metadata\n[project]\ntype = 'workspace'\nprojects_root_path = 'projects'\n# ... other non [project.run] related metadata\n```\n\nStandalone configuration files are defined as follows:\n\n```toml\n# ... other non [project.run] related metadata\n\n[project]\ntype = 'contract'\nname = 'project_a'\n\n[project.run]\nhello = { commands = ['echo hello'], description = 'Prints hello' }\n\n# ... other non [project.run] related metadata\n```\n\n```toml\n# ... other non [project.run] related metadata\n\n[project]\ntype = 'frontend'\nname = 'project_b'\n\n[project.run]\nhello = { commands = ['echo hello'], description = 'Prints hello' }\n\n# ... other non [project.run] related metadata\n```\n\nExecuting `algokit project run hello` from the root of the workspace will concurrently execute `echo hello` in both `project_a` and `project_b` directories.\n\nExecuting `algokit project run hello` from the root of `project_(a|b)` will execute `echo hello` in the `project_(a|b)` directory.\n\n### Controlling Execution Order\n\nCustomize the execution order of commands in workspaces for precise control:\n\n1. Define order in `.algokit.toml`:\n\n   ```yaml\n   [project]\n   type = 'workspace'\n   projects_root_path = 'projects'\n\n   [project.run]\n   hello = ['project_a', 'project_b']\n   ```\n\n2. Execution behavior:\n   - Projects are executed in the specified order\n   - Invalid project names are skipped\n   - Partial project lists: Specified projects run first, others follow\n\n> Note: Explicit order always triggers sequential execution.\n\n### Controlling Concurrency\n\nYou can control whether commands are executed concurrently or sequentially:\n\n1. Use command-line options:\n\n   ```sh\n   $ algokit project run hello -s  # or --sequential\n   $ algokit project run hello -c  # or --concurrent\n   ```\n\n2. Behavior:\n   - Default: Concurrent execution\n   - Sequential: Use `-s` or `--sequential` flag\n   - Concurrent: Use `-c` or `--concurrent` flag or omit the flag (defaults to concurrent)\n\n> Note: When an explicit order is specified in `.algokit.toml`, execution is always sequential regardless of these flags.\n\n### Passing Extra Arguments\n\nYou can pass additional arguments to the custom command. These extra arguments will be appended to the end of the command specified in your `.algokit.toml` file.\n\nExample:\n\n```sh\n$ algokit project run hello -- world\n```\n\nIn this example, if the `hello` command in `.algokit.toml` is defined as `echo \"Hello\"`, the actual command executed will be `echo \"Hello\" world`.\n\n## Further Reading\n\nTo learn more about the `algokit project run` command, please refer to [run](../../cli/index.md#run) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/project.md",
    "content": "# AlgoKit Project\n\n`algokit project` is a collection of commands and command groups useful for managing AlgoKit-compliant [project workspaces](./init.md#workspaces-vs-standalone-projects).\n\n## Overview\n\nThe `algokit project` command group is designed to simplify the management of AlgoKit projects. It provides a suite of tools to initialize, deploy, link, list, and run various components within a project workspace. This command group ensures that developers can efficiently handle the lifecycle of their projects, from bootstrapping to deployment and beyond.\n\n### What is a Project?\n\nIn the context of AlgoKit, a \"project\" refers to a structured standalone or monorepo workspace that includes all the necessary components for developing, testing, and deploying Algorand applications. This may include smart contracts, frontend applications, and any associated configurations. In the context of the CLI, the `algokit project` commands help manage these components cohesively.\n\nThe orchestration between workspaces, standalone projects, and custom commands is designed to provide a seamless development experience. Below is a high-level overview of how these components interact within the AlgoKit ecosystem.\n\n```mermaid\ngraph TD;\nA[`algokit project` command group] --> B[\"Workspace (.algokit.toml)\"];\nA --> C[\"Standalone Project (.algokit.toml)\"];\nB --> D[\"Sub-Project 1 (.algokit.toml)\"];\nB --> E[\"Sub-Project 2 (.algokit.toml)\"];\nC --> F[\"Custom Commands defined in .algokit.toml\"];\nD --> F;\nE --> F;\n```\n\n- **AlgoKit Project**: The root command that encompasses all project-related functionalities.\n- **Workspace**: A root folder that is managing multiple related sub-projects.\n- **Standalone Project**: An isolated project structure for simpler applications.\n- **Custom Commands**: Commands defined by the user in the `.algokit.toml` and automatically injected into the `algokit project run` command group.\n\n### Workspaces vs Standalone Projects\n\nAs mentioned, AlgoKit supports two distinct project structures: Workspaces and Standalone Projects. This flexibility allows developers to choose the most suitable approach for their project's needs.\n\n### Workspaces\n\nWorkspaces are designed for managing multiple related projects under a single root directory. This approach is beneficial for complex applications that consist of multiple sub-projects, such as a smart contract and a corresponding frontend application. Workspaces help in organizing these sub-projects in a structured manner, making it easier to manage dependencies and shared configurations.\n\nTo initialize a project within a workspace, use the `--workspace` flag. If a workspace does not already exist, AlgoKit will create one for you by default (unless you disable it via `--no-workspace` flag). Once established, new projects can be added to this workspace, allowing for centralized management.\n\nTo mark your project as `workspace` fill in the following in your `.algokit.toml` file:\n\n```toml\n[project]\ntype = 'workspace' # type specifying if the project is a workspace or standalone\nprojects_root_path = 'projects' # path to the root folder containing all sub-projects in the workspace\n```\n\n#### VSCode optimizations\n\nAlgoKit has a set of minor optimizations for VSCode users that are useful to be aware of:\n\n- Templates created with the `--workspace` flag automatically include a VSCode code-workspace file. New projects added to an AlgoKit workspace are also integrated into an existing VSCode workspace.\n- Using the `--ide` flag with `init` triggers automatic prompts to open the project and, if available, the code workspace in VSCode.\n\n#### Handling of the `.github` Folder\n\nA key aspect of using the `--workspace` flag is how the `.github` folder is managed. This folder, which contains GitHub-specific configurations such as workflows and issue templates, is moved from the project directory to the root of the workspace. This move is necessary because GitHub does not recognize workflows located in subdirectories.\n\nHere's a simplified overview of what happens:\n\n1. If a `.github` folder is found in your project, its contents are transferred to the workspace's root `.github` folder.\n2. Files with matching names in the destination are not overwritten; they're skipped.\n3. The original `.github` folder is removed if it's left empty after the move.\n4. A notification is displayed, advising you to review the moved `.github` contents to ensure everything is in order.\n\nThis process ensures that your GitHub configurations are properly recognized at the workspace level, allowing you to utilize GitHub Actions and other features seamlessly across your projects.\n\n### Standalone Projects\n\nStandalone projects are suitable for simpler applications or when working on a single component. This structure is straightforward, with each project residing in its own directory, independent of others. Standalone projects are ideal for developers who prefer simplicity or are focusing on a single aspect of their application and are sure that they will not need to add more sub-projects in the future.\n\nTo create a standalone project, use the `--no-workspace` flag during initialization. This instructs AlgoKit to bypass the workspace structure and set up the project as an isolated entity.\n\nBoth workspaces and standalone projects are fully supported by AlgoKit's suite of tools, ensuring developers can choose the structure that best fits their workflow without compromising on functionality.\n\nTo mark your project as a standalone project fill in the following in your `.algokit.toml` file:\n\n```toml\n[project]\ntype = {'backend' | 'contract' | 'frontend'} # currently support 3 generic categories for standalone projects\nname = 'my-project' # unique name for the project inside workspace\n```\n\n> We recommend using workspaces for most projects (hence enabled by default), as it provides a more organized and scalable approach to managing multiple sub-projects. However, standalone projects are a great choice for simple applications or when you are certain that you will not need to add more sub-projects in the future, for such cases simply append `--no-workspace` when using `algokit init` command. For more details on init command please refer to [init](./init.md) command docs.\n\n## Features\n\nDive into the features of the `algokit project` command group:\n\n- [bootstrap](./project/bootstrap.md) - Bootstrap your project with AlgoKit.\n- [deploy](./project/deploy.md) - Deploy your smart contracts effortlessly to various networks.\n- [link](./project/link.md) - Powerful feature designed to streamline the integration between `frontend` and `contract` projects\n- [list](./project/list.md) - Enumerate all projects within an AlgoKit workspace.\n- [run](./project/run.md) - Define custom commands and manage their execution via `algokit` cli.\n"
  },
  {
    "path": "docs/features/tasks/analyze.md",
    "content": "# AlgoKit Task Analyze\n\nThe `analyze` task is a command-line utility that analyzes TEAL programs for common vulnerabilities using [Tealer](https://github.com/crytic/tealer) integration. It allows you to detect a range of common vulnerabilities in code written in TEAL. For full list of vulnerability detectors refer to [Tealer documentation](https://github.com/crytic/tealer?tab=readme-ov-file#detectors).\n\n## Usage\n\n```bash\nalgokit task analyze INPUT_PATHS [OPTIONS]\n```\n\n### Arguments\n\n- `INPUT_PATHS`: Paths to the TEAL files or directories containing TEAL files to be analyzed. This argument is required.\n\n### Options\n\n- `-r, --recursive`: Recursively search for all TEAL files within any provided directories.\n- `--force`: Force verification without the disclaimer confirmation prompt.\n- `--diff`: Exit with a non-zero code if differences are found between current and last reports.\n- `-o, --output OUTPUT_PATH`: Directory path where to store the reports of the static analysis.\n- `-e, --exclude DETECTORS`: Exclude specific vulnerabilities from the analysis. Supports multiple exclusions in a single run.\n\n## Example\n\n```bash\nalgokit task analyze ./contracts -r --exclude rekey-to --exclude missing-fee-check\n```\n\nThis command will recursively analyze all TEAL files in the `contracts` directory and exclude the `missing-fee-check` vulnerability from the analysis.\n\n## Security considerations\n\nThis task uses [`tealer`](https://github.com/crytic/tealer), a third-party tool, to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the [guidelines for smart contract development](https://dev.algorand.co/docs/concepts/smart-contracts/overview/). This should not be used as a substitute for an actual audit.\n"
  },
  {
    "path": "docs/features/tasks/ipfs.md",
    "content": "# AlgoKit Task IPFS\n\nThe AlgoKit IPFS feature allows you to interact with the IPFS [InterPlanetary File System](https://ipfs.tech/) using the [Piñata provider](https://www.pinata.cloud/). This feature supports logging in and out of the Piñata provider, and uploading files to IPFS.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task ipfs\nUsage: algokit task ipfs [OPTIONS]\n\nUpload files to IPFS using Pinata provider.\n\nOptions:\n  -f, --file PATH Path to the file to upload. [required]\n  -n, --name TEXT Human readable name for this upload, for use in file listings.\n  -h, --help Show this message and exit.\n```\n\n## Options\n\n- `--file, -f PATH`: Specifies the path to the file to upload. This option is required.\n- `--name, -n TEXT`: Specifies a human readable name for this upload, for use in file listings.\n\n## Prerequisites\n\nBefore you can use this feature, you need to ensure that you have signed up for a Piñata account and have a JWT. You can sign up for a Piñata account by reading [quickstart](https://docs.pinata.cloud/docs/getting-started).\n\n## Login\n\nPlease note, you need to login to the Piñata provider before you can upload files. You can do this using the `login` command:\n\n```bash\n$ algokit task ipfs login\n```\n\nThis will prompt you to enter your Piñata JWT. Once you are logged in, you can upload files to IPFS.\n\n## Upload\n\nTo upload a file to IPFS, you can use the `ipfs` command as follows:\n\n```bash\n$ algokit task ipfs --file {PATH_TO_YOUR_FILE}\n```\n\nThis will upload the file to IPFS using the Piñata provider and return the CID (Content Identifier) of the uploaded file.\n\n## Logout\n\nIf you want to logout from the Piñata provider, you can use the `logout` command:\n\n```bash\n$ algokit task ipfs logout\n```\n\nThis will remove your Piñata JWT from the keyring.\n\n## File Size Limit\n\nPlease note, the maximum file size that can be uploaded is 100MB. If you try to upload a file larger than this, you will receive an error.\n\n## Further Reading\n\nFor in-depth details, visit the [ipfs section](../../cli/index.md#ipfs) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/mint.md",
    "content": "# AlgoKit Task Mint\n\nThe AlgoKit Mint feature allows you to mint new fungible or non-fungible assets on the Algorand blockchain. This feature supports the creation of assets, validation of asset parameters, and uploading of asset metadata and image to IPFS using the Piñata provider. Immutable assets are compliant with [ARC3](https://arc.algorand.foundation/ARCs/arc-0003), while mutable are based using [ARC19](https://arc.algorand.foundation/ARCs/arc-0019) standard.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\nUsage: algokit task mint [OPTIONS]\n\n  Mint new fungible or non-fungible assets on Algorand.\n\nOptions:\n  --creator TEXT                  Address or alias of the asset creator.  [required]\n  -n, --name TEXT                 Asset name.  [required]\n  -u, --unit TEXT                 Unit name of the asset.  [required]\n  -t, --total INTEGER             Total supply of the asset. Defaults to 1.\n  -d, --decimals INTEGER          Number of decimals. Defaults to 0.\n  -i, --image FILE                Path to the asset image file to be uploaded to IPFS.  [required]\n  -m, --metadata FILE             Path to the ARC19 compliant asset metadata file to be uploaded to IPFS. If not\n                                  provided, a default metadata object will be generated automatically based on asset-\n                                  name, decimals and image. For more details refer to\n                                  https://arc.algorand.foundation/ARCs/arc-0003#json-metadata-file-schema.\n  --mutable / --immutable         Whether the asset should be mutable or immutable. Refers to `ARC19` by default.\n  --nft / --ft                    Whether the asset should be validated as NFT or FT. Refers to NFT by default and\n                                  validates canonical definitions of pure or fractional NFTs as per ARC3 standard.\n  -n, --network [localnet|testnet|mainnet]\n                                  Network to use. Refers to `localnet` by default.\n  -h, --help                      Show this message and exit.\n```\n\n## Options\n\n- `--creator TEXT`: Specifies the address or alias of the asset creator. This option is required.\n- `-n, --name TEXT`: Specifies the asset name. This option is required.\n- `-u, --unit TEXT`: Specifies the unit name of the asset. This option is required.\n- `-t, --total INTEGER`: Specifies the total supply of the asset. Defaults to 1.\n- `-d, --decimals INTEGER`: Specifies the number of decimals. Defaults to 0.\n- `-i, --image PATH`: Specifies the path to the asset image file to be uploaded to IPFS. This option is required.\n- `-m, --metadata PATH`: Specifies the path to the ARC19 compliant asset metadata file to be uploaded to IPFS. If not provided, a default metadata object will be generated automatically based on asset-name, decimals and image.\n- `--mutable / --immutable`: Specifies whether the asset should be mutable or immutable. Refers to `ARC19` by default.\n- `--nft / --ft`: Specifies whether the asset should be validated as NFT or FT. Refers to NFT by default and validates canonical definitions of pure or fractional NFTs as per ARC3 standard.\n- `-n, --network [localnet|testnet|mainnet]`: Specifies the network to use. Refers to `localnet` by default.\n\n## Example\n\nTo mint a new asset in interactive mode, you can use the mint command as follows:\n\n```bash\n$ algokit task mint\n```\n\nThis will interactively prompt you for the required information, upload the asset image and metadata to IPFS using the Piñata provider and mint a new asset on the Algorand blockchain. The [asset's metadata](https://arc.algorand.foundation/ARCs/arc-0003#json-metadata-file-schema) will be generated automatically based on the provided asset name, decimals, and image.\n\nIf you want to provide a custom metadata file, you can use the --metadata flag:\n\n```bash\n$ algokit task mint --metadata {PATH_TO_METADATA}\n```\n\nIf the minting process is successful, the asset ID and transaction ID will be output to the console.\n\nFor non interactive mode, refer to usage section above for available options.\n\n> Please note, creator account must have at least 0.2 Algos available to cover minimum balance requirements.\n\n## Further Reading\n\nFor in-depth details, visit the [mint section](../../cli/index.md#mint) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/nfd.md",
    "content": "# AlgoKit Task NFD Lookup\n\nThe AlgoKit NFD Lookup feature allows you to perform a lookup via NFD domain or address, returning the associated address or domain respectively using the AlgoKit CLI. The feature is powered by [NFDomains MainNet API](https://api-docs.nf.domains/).\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task nfd-lookup\nUsage: algokit task nfd-lookup [OPTIONS] VALUE\n\nPerform a lookup via NFD domain or address, returning the associated address or domain respectively.\n\nOptions:\n-o, --output [full|tiny|address] Output format for NFD API response. Defaults to address|domain resolved.\n-h, --help                       Show this message and exit.\n```\n\n## Options\n\n- `VALUE`: Specifies the NFD domain or Algorand address to lookup. This argument is required.\n- `--output, -o [full|tiny|address]`: Specifies the output format for NFD API response. Defaults to address|domain resolved.\n\n> When using the `full` and `tiny` output formats, please be aware that these match the [views in get requests of the NFD API](https://api-docs.nf.domains/quick-start#views-in-get-requests). The `address` output format, which is used by default, refers to the respective domain name or address resolved and outputs it as a string (if found).\n\n## Example\n\nTo perform a lookup, you can use the nfd-lookup command as follows:\n\n```bash\n$ algokit task nfd-lookup {NFD_DOMAIN_OR_ALGORAND_ADDRESS}\n```\n\nThis will perform a lookup and return the associated address or domain. If you want to specify the output format, you can use the --output flag:\n\n```bash\n$ algokit task nfd-lookup {NFD_DOMAIN_OR_ALGORAND_ADDRESS} --output full\n```\n\nIf the lookup is successful, the result will be output to the console in a JSON format.\n\n## Further Reading\n\nFor in-depth details, visit the [nfd-lookup section](../../cli/index.md#nfd-lookup) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/opt.md",
    "content": "# AlgoKit Task Asset opt-(in|out)\n\nAlgoKit Task Asset opt-(in|out) allows you to opt-in or opt-out of Algorand Asset(s). This task supports single or multiple assets.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n### Opt-in\n\n```bash\nUsage: algokit task opt-in [OPTIONS] ASSET_IDS...\n\n  Opt-in to an asset(s). This is required before you can receive an asset.\n  Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\n\nOptions:\n  --account, -a TEXT  Address or alias of the signer account.  [required]\n  -n, --network [localnet|testnet|mainnet]\n                      Network to use. Refers to `localnet` by default.\n```\n\n### Opt-out\n\n```bash\nUsage: algokit task opt-out [OPTIONS] [ASSET_IDS]...\n\n  Opt-out of an asset(s). You can only opt out of an asset with a zero balance.\n  Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\n\nOptions:\n  --account, -a TEXT  Address or alias of the signer account.  [required]\n  --all                Opt-out of all assets with zero balance.\n  -n, --network [localnet|testnet|mainnet]\n                      Network to use. Refers to `localnet` by default.\n```\n\n## Options\n\n- `ASSET_IDS`: Specifies the asset IDs to opt-in or opt-out. To supply multiple asset IDs, separate them with a whitespace.\n- `--account`, `-a` TEXT: Specifies the address or alias of the signer account. This option is required.\n- `--all`: Specifies to opt-out of all assets with zero balance.\n- `-n`, `--network` [localnet|testnet|mainnet]: Specifies the network to use. Refers to localnet by default.\n\n## Example\n\nExample\n\nTo opt-in to an asset(s), you can use the opt-in command as follows:\n\n```bash\n$ algokit task opt-in --account {YOUR_ACCOUNT} {ASSET_ID_1} {ASSET_ID_2} {ASSET_ID_3} ...\n```\n\nTo opt-out of an asset(s), you can use the opt-out command as follows:\n\n```bash\n$ algokit task opt-out --account {YOUR_ACCOUNT} {ASSET_ID_1} {ASSET_ID_2} ...\n```\n\nTo opt-out of all assets with zero balance, you can use the opt-out command with the `--all` flag:\n\n```bash\n$ algokit task opt-out --account {YOUR_ACCOUNT} --all\n```\n\n> Please note, the account must have sufficient balance to cover the transaction fees.\n\n## Further Reading\n\nFor in-depth details, visit the [opt-in](../../cli/index.md#opt-in) and [opt-out](../../cli/index#opt-out) sections in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/send.md",
    "content": "# AlgoKit Task Send\n\nThe AlgoKit Send feature allows you to send signed Algorand transaction(s) to a specified network using the AlgoKit CLI. This feature supports sending single or multiple transactions, either provided directly as a base64 encoded string or from a binary file.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task send\nUsage: algokit task send [OPTIONS]\n\n  Send a signed transaction to the given network.\n\nOptions:\n  -f, --file FILE                 Single or multiple message pack encoded signed transactions from binary file to\n                                  send. Option is mutually exclusive with transaction.\n  -t, --transaction TEXT          Base64 encoded signed transaction to send. Option is mutually exclusive with file.\n  -n, --network [localnet|testnet|mainnet]\n                                  Network to use. Refers to `localnet` by default.\n  -h, --help                      Show this message and exit.\n```\n\n## Options\n\n- `--file, -f PATH`: Specifies the path to a binary file containing single or multiple message pack encoded signed transactions to send. Mutually exclusive with `--transaction` option.\n- `--transaction, -t TEXT`: Specifies a single base64 encoded signed transaction to send. Mutually exclusive with `--file` option.\n- `--network, -n [localnet|testnet|mainnet]`: Specifies the network to which the transactions will be sent. Refers to `localnet` by default.\n\n> Please note, `--transaction` flag only supports sending a single transaction. If you want to send multiple transactions, you can use the `--file` flag to specify a binary file containing multiple transactions.\n\n## Example\n\nTo send a transaction, you can use the `send` command as follows:\n\n```bash\n$ algokit task send --file {PATH_TO_BINARY_FILE_CONTAINING_SIGNED_TRANSACTIONS}\n```\n\nThis will send the transactions to the default `localnet` network. If you want to send the transactions to a different network, you can use the `--network` flag:\n\n```bash\n$ algokit task send --transaction {YOUR_BASE64_ENCODED_SIGNED_TRANSACTION} --network testnet\n```\n\nYou can also pipe in the `stdout` of `algokit sign` command:\n\n```bash\n$ algokit task sign --account {YOUR_ACCOUNT_ALIAS OR YOUR_ADDRESS} --file {PATH_TO_BINARY_FILE_CONTAINING_TRANSACTIONS} --force | algokit task send --network {network_name}\n```\n\nIf the transaction is successfully sent, the transaction ID (txid) will be output to the console. You can check the transaction status at the provided transaction explorer URL.\n\n## Goal Compatibility\n\nPlease note, at the moment this feature only supports [`goal clerk`](https://dev.algorand.co/algokit/algokit-cli/goal/) compatible transaction objects.\n\n## Further Reading\n\nFor in-depth details, visit the [send section](../../cli/index.md#send) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/sign.md",
    "content": "# AlgoKit Task Sign\n\nThe AlgoKit Sign feature allows you to sign Algorand transaction(s) using the AlgoKit CLI. This feature supports signing single or multiple transactions, either provided directly as a base64 encoded string or from a binary file.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task sign\nUsage: algokit task sign [OPTIONS]\n\nSign goal clerk compatible Algorand transaction(s).\n\nOptions:\n-a, --account TEXT Address or alias of the signer account. [required]\n-f, --file PATH Single or multiple message pack encoded transactions from binary file to sign.\n-t, --transaction TEXT Single base64 encoded transaction object to sign.\n-o, --output PATH The output file path to store signed transaction(s).\n--force Force signing without confirmation.\n-h, --help Show this message and exit.\n```\n\n## Options\n\n- `--account, -a TEXT`: Specifies the address or alias of the signer account. This option is required.\n- `--file, -f PATH`: Specifies the path to a binary file containing single or multiple message pack encoded transactions to sign. Mutually exclusive with `--transaction` option.\n- `--transaction, -t TEXT`: Specifies a single base64 encoded transaction object to sign. Mutually exclusive with `--file` option.\n- `--output, -o PATH`: Specifies the output file path to store signed transaction(s).\n- `--force`: If specified, it allows signing without interactive confirmation prompt.\n\n> Please note, `--transaction` flag only supports signing a single transaction. If you want to sign multiple transactions, you can use the `--file` flag to specify a binary file containing multiple transactions.\n\n## Example\n\nTo sign a transaction, you can use the `sign` command as follows:\n\n```bash\n$ algokit task sign --account {YOUR_ACCOUNT_ALIAS OR YOUR_ADDRESS} --file {PATH_TO_BINARY_FILE_CONTAINING_TRANSACTIONS}\n```\n\nThis will prompt you to confirm the transaction details before signing. If you want to bypass the confirmation, you can use the `--force` flag:\n\n```bash\n$ algokit task sign --account {YOUR_ACCOUNT_ALIAS OR YOUR_ADDRESS} --transaction {YOUR_BASE64_ENCODED_TRANSACTION} --force\n```\n\nIf the transaction is successfully signed, the signed transaction will be output to the console in a JSON format. If you want to write the signed transaction to a file, you can use the `--output` option:\n\n```bash\n$ algokit task sign --account {YOUR_ACCOUNT_ALIAS OR YOUR_ADDRESS} --transaction {YOUR_BASE64_ENCODED_TRANSACTION} --output /path/to/output/file\n```\n\nThis will write the signed transaction to the specified file.\n\n## Goal Compatibility\n\nPlease note, at the moment this feature only supports [`goal clerk`](https://dev.algorand.co/algokit/algokit-cli/goal/) compatible transaction objects.\n\nWhen `--output` option is not specified, the signed transaction(s) will be output to the console in a following JSON format:\n\n```\n[\n  {transaction_id: \"TRANSACTION_ID\", content: \"BASE64_ENCODED_SIGNED_TRANSACTION\"},\n]\n```\n\nOn the other hand, when `--output` option is specified, the signed transaction(s) will be stored to a file as a message pack encoded binary file.\n\n### Encoding transactins for signing\n\nAlgorand provides a set of options in [py-algorand-sdk](https://github.com/algorand/py-algorand-sdk) and [js-algorand-sdk](https://github.com/algorand/js-algorand-sdk) to encode transactions for signing.\n\nEncoding simple txn object in python:\n\n```py\n# Encoding single transaction as a base64 encoded string\nalgosdk.encoding.msgpack_encode({\"txn\": {YOUR_TXN_OBJECT}.dictify()}) # Resulting string can be passed directly to algokit task sign with --transaction flag\n\n# Encoding multiple transactions as a message pack encoded binary file\nalgosdk.transaction.write_to_file([{YOUR_TXN_OBJECT}], \"some_file.txn\") # Resulting file path can be passed directly to algokit sign with --file flag\n```\n\nEncoding simple txn object in javascript:\n\n```ts\nBuffer.from(algosdk.encodeObj({ txn: txn.get_obj_for_encoding() })).toString(\n  \"base64\"\n); // Resulting string can be passed directly to algokit task sign with --transaction flag\n```\n\n## Further Reading\n\nFor in-depth details, visit the [sign section](../../cli/index.md#sign) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/transfer.md",
    "content": "# AlgoKit Task Transfer\n\nThe AlgoKit Transfer feature allows you to transfer algos and assets between two accounts.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task transfer\nUsage: algokit task transfer [OPTIONS]\n\nTransfer algos or assets from one account to another.\n\nOptions:\n  -s, --sender TEXT               Address or alias of the sender account  [required]\n  -r, --receiver TEXT             Address or alias to an account that will receive the asset(s)  [required]\n  --asset, --id INTEGER           ASA asset id to transfer\n  -a, --amount INTEGER            Amount to transfer  [required]\n  --whole-units                   Use whole units (Algos | ASAs) instead of smallest divisible units (for example,\n                                  microAlgos). Disabled by default.\n  -n, --network [localnet|testnet|mainnet]\n                                  Network to use. Refers to `localnet` by default.\n  -h, --help                      Show this message and exit.\n```\n\n> Note: If you use a wallet address for the `sender` argument, you'll be asked for the mnemonic phrase. To use a wallet alias instead, see the [wallet aliasing](wallet.md) task. For wallet aliases, the sender must have a stored `private key`, but the receiver doesn't need one. This is because the sender signs and sends the transfer transaction, while the receiver reference only needs a valid Algorand address.\n\n## Examples\n\n### Transfer algo between accounts on LocalNet\n\n```bash\n$ ~ algokit task transfer -s {SENDER_ALIAS OR SENDER_ADDRESS} -r {RECEIVER_ALIAS OR RECEIVER_ADDRESS} -a {AMOUNT}\n```\n\nBy default:\n\n- the `amount` is in microAlgos. To use whole units, use the `--whole-units` flag.\n- the `network` is `localnet`.\n\n### Transfer asset between accounts on TestNet\n\n```bash\n$ ~ algokit task transfer -s {SENDER_ALIAS OR SENDER_ADDRESS} -r {RECEIVER_ALIAS OR RECEIVER_ADDRESS} -a {AMOUNT} --id {ASSET_ID} --network testnet\n```\n\nBy default:\n\n- the `amount` is smallest divisible unit of supplied `ASSET_ID`. To use whole units, use the `--whole-units` flag.\n\n## Further Reading\n\nFor in-depth details, visit the [transfer section](../../cli/index.md#transfer) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/vanity_address.md",
    "content": "# AlgoKit Task Vanity Address\n\nThe AlgoKit Vanity Address feature allows you to generate a vanity Algorand address. A vanity address is an address that contains a specific keyword in it. The keyword can only include uppercase letters A-Z and numbers 2-7. The longer the keyword, the longer it may take to generate a matching address.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task vanity-address\nUsage: algokit task vanity-address [OPTIONS] KEYWORD\n\n  Generate a vanity Algorand address. Your KEYWORD can only include letters A - Z and numbers 2 - 7. Keeping your\n  KEYWORD under 5 characters will usually result in faster generation. Note: The longer the KEYWORD, the longer it may\n  take to generate a matching address. Please be patient if you choose a long keyword.\n\nOptions:\n  -m, --match [start|anywhere|end]\n                                  Location where the keyword will be included. Default is start.\n  -o, --output [stdout|alias|file]\n                                  How the output will be presented.\n  -a, --alias TEXT                Alias for the address. Required if output is \"alias\".\n  --file-path PATH                File path where to dump the output. Required if output is \"file\".\n  -f, --force                     Allow overwriting an aliases without confirmation, if output option is 'alias'.\n  -h, --help                      Show this message and exit.\n```\n\n## Examples\n\nGenerate a vanity address with the keyword \"ALGO\" at the start of the address with default output to `stdout`:\n\n```bash\n$ ~ algokit task vanity-address ALGO\n```\n\nGenerate a vanity address with the keyword \"ALGO\" at the start of the address with output to a file:\n\n```bash\n$ ~ algokit task vanity-address ALGO -o file -f vanity-address.txt\n```\n\nGenerate a vanity address with the keyword \"ALGO\" anywhere in the address with output to a file:\n\n```bash\n$ ~ algokit task vanity-address ALGO -m anywhere -o file -f vanity-address.txt\n```\n\nGenerate a vanity address with the keyword \"ALGO\" at the start of the address and store into a [wallet alias](wallet.md):\n\n```bash\n$ ~ algokit task vanity-address ALGO -o alias -a my-vanity-address\n```\n\n## Further Reading\n\nFor in-depth details, visit the [vanity-address section](../../cli/index.md#vanity-address) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks/wallet.md",
    "content": "# AlgoKit Task Wallet\n\nManage your Algorand addresses and accounts effortlessly with the AlgoKit Wallet feature. This feature allows you to create short aliases for your addresses and accounts on AlgoKit CLI.\n\n## Usage\n\nAvailable commands and possible usage as follows:\n\n```bash\n$ ~ algokit task wallet\nUsage: algokit task wallet [OPTIONS] COMMAND [ARGS]...\n\nCreate short aliases for your addresses and accounts on AlgoKit CLI.\n\nOptions:\n-h, --help Show this message and exit.\n\nCommands:\nadd Add an address or account to be stored against a named alias.\nget Get an address or account stored against a named alias.\nlist List all addresses and accounts stored against a named alias.\nremove Remove an address or account stored against a named alias.\nreset Remove all aliases.\n```\n\n## Commands\n\n### Add\n\nThis command adds an address or account to be stored against a named alias. If the `--mnemonic` flag is used, it will prompt the user for a mnemonic phrase interactively using masked input. If the `--force` flag is used, it will allow overwriting an existing alias. Maximum number of aliases that can be stored at a time is 50.\n\n```bash\nalgokit wallet add [OPTIONS] ALIAS_NAME\n```\n\n> Please note, the command is not designed to be used in CI scope, there is no option to skip interactive masked input of the mnemonic, if you want to alias an `Account` (both private and public key) entity.\n\n#### Options\n\n- `--address, -a TEXT`: Specifies the address of the account. This option is required.\n- `--mnemonic, -m`: If specified, it prompts the user for a mnemonic phrase interactively using masked input.\n- `--force, -f`: If specified, it allows overwriting an existing alias without interactive confirmation prompt.\n\n### Get\n\nThis command retrieves an address or account stored against a named alias.\n\n```bash\nalgokit wallet get ALIAS\n```\n\n### List\n\nThis command lists all addresses and accounts stored against a named alias. If a record contains a `private_key` it will show a boolean flag indicating whether it exists, actual private key values are never exposed. As a user you can obtain the content of the stored aliases by navigating to your dedicated password manager (see [keyring details](https://pypi.org/project/keyring/)).\n\n```bash\nalgokit wallet list\n```\n\n### Remove\n\nThis command removes an address or account stored against a named alias.\nYou must confirm the prompt interactively or pass `--force` | `-f` flag to ignore the prompt.\n\n```bash\nalgokit wallet remove ALIAS  [--force | -f]\n```\n\n### Reset\n\nThis command removes all aliases. You must confirm the prompt interactively or pass `--force` | `-f` flag to ignore the prompt.\n\n```bash\nalgokit wallet reset [--force | -f]\n```\n\n## Keyring\n\nAlgoKit relies on the [keyring](https://pypi.org/project/keyring/) library, which provides an easy way to interact with the operating system's password manager. This abstraction allows AlgoKit to securely manage sensitive information such as mnemonics and private keys.\n\nWhen you use AlgoKit to store a mnemonic, it is never printed or exposed directly in the console. Instead, the mnemonic is converted and stored as a private key in the password manager. This ensures that your sensitive information is kept secure.\n\nTo retrieve the stored mnemonic, you will need to manually navigate to your operating system's password manager. The keyring library supports a variety of password managers across different operating systems. Here are some examples:\n\n- On macOS, it uses the Keychain Access app.\n- On Windows, it uses the Credential Manager.\n- On Linux, it can use Secret Service API, KWallet, or an in-memory store depending on your setup.\n\n> Remember, AlgoKit is designed to keep your sensitive information secure however your storage is only as secure as the device on which it is stored. Always ensure to maintain good security practices on your device, especially when dealing with mnemonics that are to be used on MainNet.\n\n### Keyring on WSL2\n\nWSL2 environments don't have a keyring backend installed by default. If you want to leverage this feature, you'll need to install one yourself. See [this GitHub issue for info](https://github.com/jaraco/keyring/issues/566#issuecomment-1792544475).\n\n## Further Reading\n\nFor in-depth details, visit the [wallet section](../../cli/index.md#wallet) in the AlgoKit CLI reference documentation.\n"
  },
  {
    "path": "docs/features/tasks.md",
    "content": "# AlgoKit Tasks\n\nAlgoKit Tasks are a collection of handy tasks that can be used to perform various operations on Algorand blockchain.\n\n## Features\n\n- [Wallet Aliasing](./tasks/wallet.md) - Manage your Algorand addresses and accounts effortlessly with the AlgoKit Wallet feature. This feature allows you to create short aliases for your addresses and accounts on AlgoKit CLI.\n- [Vanity Address Generation](./tasks/vanity_address.md) - Generate vanity addresses for your Algorand accounts with the AlgoKit Vanity feature. This feature allows you to generate Algorand addresses which contains a specific keyword of your choice.\n- [Transfer Assets or Algos](./tasks/transfer.md) - Transfer Algos or Assets from one account to another with the AlgoKit Transfer feature. This feature allows you to transfer Algos or Assets from one account to another on Algorand blockchain.\n- [Opt-(in|out) Assets](./tasks/opt.md) - Opt-in or opt-out of Algorand Asset(s). Supports single or multiple assets.\n- [Signing transactions](./tasks/sign.md) - Sign goal clerk compatible Algorand transactions.\n- [Sending transactions](./tasks/send.md) - Send signed goal clerk compatible Algorand transactions.\n- [NFD lookups](./tasks/nfd.md) - Perform a lookup via NFD domain or address, returning the associated address or domain respectively using the AlgoKit CLI.\n- [IPFS uploads](./tasks/ipfs.md) - Upload files to IPFS.\n- [Asset minting](./tasks/mint.md) - Mint new fungible or non-fungible assets on Algorand.\n- [Analyze TEAL code](./tasks/analyze.md) - Analyze TEAL code using [`tealer`](https://github.com/crytic/tealer) integration for common vulnerabilities.\n"
  },
  {
    "path": "docs/sphinx/conf.py",
    "content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"AlgoKit\"\ncopyright = \"2023, Algorand Foundation\"\nauthor = \"Algorand Foundation\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\"sphinx_click\"]\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\nsmartquotes = False\n"
  },
  {
    "path": "docs/sphinx/index.rst",
    "content": ".. click:: algokit.cli:algokit\n   :prog: algokit\n   :nested: full\n"
  },
  {
    "path": "docs/tutorials/algokit-template.md",
    "content": "# Creating AlgoKit Templates\n\nThis README serves as a guide on how to create custom templates for AlgoKit, a tool for initializing Algorand smart contract projects.\nCreating templates in AlgoKit involves the use of various configuration files and a templating engine to generate project structures that are tailored to your needs.\nThis guide will cover the key concepts and best practices for creating templates in AlgoKit.\nWe will also refer to the official [`algokit-python-template`](https://github.com/algorandfoundation/algokit-python-template) as an example.\n\n## Table of Contents\n\n- [Quick Start](#quick-start)\n- [Overview of AlgoKit Templates](#overview-of-algokit-templates)\n  - [Copier/Jinja](#copierjinja)\n  - [AlgoKit Functionality with Templates](#algokit-functionality-with-templates)\n- [Key Concepts](#key-concepts)\n  - [.algokit.toml](#algokittoml)\n  - [Python Support: pyproject.toml](#python-support-pyprojecttoml)\n  - [TypeScript Support: package.json](#typescript-support-packagejson)\n  - [Bootstrap Option](#bootstrap-option)\n  - [Predefined Copier Answers](#predefined-copier-answers)\n  - [Default Behaviors](#default-behaviors)\n  - [Generators](#generators)\n- [Recommendations](#recommendations)\n- [Conclusion](#conclusion)\n\n## Quick Start\n\nFor users who are keen on getting started with creating AlgoKit templates, you can follow these quick steps:\n\n1. Click on `Use this template`->`Create a new repository` on [algokit-python-template](https://github.com/algorandfoundation/algokit-python-template) Github page. This will create a new reference repository with clean git history, allowing you to start modifying and transforming the base python template into your own custom template.\n2. Modify the cloned template according to your specific needs. You can refer to the remainder of this tutorial for an understanding of expected behaviors from the AlgoKit side, Copier - the templating framework, and key concepts related to the default files you will encounter in the reference template.\n\n## Overview of AlgoKit Templates\n\nAlgoKit templates are essentially project scaffolds that can be used to initialize new smart contract projects. These templates can include code files, configuration files, and scripts. AlgoKit uses Copier along with the Jinja templating engine to create new projects based on these templates.\n\n### Copier/Jinja\n\nAlgoKit uses Copier templates. Copier is a library that allows you to create project templates that can be easily replicated and customized. It's often used along with Jinja. Jinja is a modern and designer-friendly templating engine for Python programming language. It's used in Copier templates to substitute variables in files and file names. You can find more information in the [Copier documentation](https://copier.readthedocs.io/) and [Jinja documentation](https://jinja.palletsprojects.com/).\n\n### AlgoKit Functionality with Templates\n\nAlgoKit provides the `algokit init` command to initialize a new project using a template. You can either pass the template name using the `-t` flag or select a template from a list.\n\n## Key Concepts\n\n### .algokit.toml\n\nThis file is the AlgoKit configuration file for this project which can be used to specify the minimum version of the AlgoKit. This is essential to ensure that projects created with your template are always compatible with the version of AlgoKit they are using.\n\nExample from `algokit-python-template`:\n\n```toml\n[algokit]\nmin_version = \"v1.1.0-beta.4\"\n```\n\nThis specifies that the template requires at least version `v1.1.0-beta.4` of AlgoKit.\n\n### Python Support: `pyproject.toml`\n\nPython projects in AlgoKit can leverage a wide range of tools for dependency management and project configuration. While Poetry and the `pyproject.toml` file are common choices, they are not the only options.\nIf you opt to use Poetry, you'll rely on the pyproject.toml file to define the project's metadata and dependencies. This configuration file can utilize the Jinja templating syntax for customization.\n\nExample snippet from `algokit-python-template`:\n\n```toml\n[tool.poetry]\nname = \"{{ project_name }}\"\nversion = \"0.1.0\"\ndescription = \"Algorand smart contracts\"\nauthors = [\"{{ author_name }} <{{ author_email }}>\"]\nreadme = \"README.md\"\n\n...\n```\n\nThis example shows how project metadata and dependencies are defined in `pyproject.toml`, using Jinja syntax to allow placeholders for project metadata.\n\n### TypeScript Support: `package.json`\n\nFor TypeScript projects, the `package.json` file plays a similar role as `pyproject.toml` can do for Python projects. It specifies metadata about the project and lists the dependencies required for smart contract development.\n\nExample snippet:\n\n```json\n{\n  \"name\": \"{{ project_name }}\",\n  \"version\": \"1.0.0\",\n  \"description\": \"{{ project_description }}\",\n  \"scripts\": {\n    \"build\": \"tsc\"\n  },\n  \"devDependencies\": {\n    \"typescript\": \"^4.2.4\",\n    \"tslint\": \"^6.1.3\",\n    \"tslint-config-prettier\": \"^1.18.0\"\n  }\n}\n```\n\nThis example shows how Jinja syntax is used within `package.json` to allow placeholders for project metadata and dependencies.\n\n### Bootstrap Option\n\nWhen instantiating your template via AlgoKit CLI it will optionally prompt the user to automatically run [algokit bootstrap](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/bootstrap.md) after the project is initialized and can perform various setup tasks like installing dependencies or setting up databases.\n\n- `env`: Searches for and copies an `.env*.template` file to an equivalent `.env*` file in the current working directory, prompting for any unspecified values. This feature is integral for securely managing environment variables, as it prevents sensitive data from inadvertently ending up in version control.\n  By default, Algokit will scan for network-prefixed `.env` variables (e.g., `.env.localnet`), which can be particularly useful when relying on the [Algokit deploy command](https://github.com/algorandfoundation/algokit-cli/blob/deploy-command/docs/features/deploy.md). If no such prefixed files are located, Algokit will then attempt to load default `.env` files. This functionality provides greater flexibility for different network configurations.\n\n- `poetry`: If your Python project uses Poetry for dependency management, the `poetry` command installs Poetry (if not present) and runs `poetry install` in the current working directory to install Python dependencies.\n- `npm`: If you're developing a JavaScript or TypeScript project, the `npm` command runs npm install in the current working directory to install Node.js dependencies.\n- `all`: The `all` command runs all the aforementioned bootstrap sub-commands in the current directory and its subdirectories. This command is a comprehensive way to ensure all project dependencies and environment variables are properly set up.\n\n### Predefined Copier Answers\n\nWhen initializing a new project, Copier can prompt the user for input, which is then passed to the template as variables. This is useful for customizing the new project based on user input.\n\nExample:\n\n```yaml\n# copier.yaml\nproject_name:\n  type: str\n  help: What is the name of this project.\n  placeholder: \"algorand-app\"\n```\n\nThis would prompt the user for the project name, and the input can then be used in the template using the Jinja syntax `{{ project_name }}`.\n\n#### Default Behaviors\n\nWhen creating an AlgoKit template, there are a few default behaviors that you can expect to be provided by algokit-cli itself without introducing any extra code to your templates:\n\n- **Git**: If Git is installed on the user's system and the user's working directory is a Git repository, AlgoKit CLI will commit the newly created project as a new commit in the repository. This feature helps to maintain a clean version history for the project. If you wish to add a specific commit message for this action, you can specify a `commit_message` in the `_commit` option in your `copier.yaml` file.\n\n- **VSCode**: If the user has Visual Studio Code (VSCode) installed and the path to VSCode is added to their system's PATH, AlgoKit CLI will automatically open the newly created VSCode window unless user provides specific flags into the init command.\n\n- **Bootstrap**: AlgoKit CLI is equipped to execute a bootstrap script after a project has been initialized. This script, included in AlgoKit templates, can be automatically run to perform various setup tasks, such as installing dependencies or setting up databases. This is managed by AlgoKit CLI and not within the user-created codebase. By default, if a `bootstrap` task is defined in the `copier.yaml`, AlgoKit CLI will execute it, unless the user opts out during the prompt.\n\nBy combining predefined Copier answers with these default behaviors, you can create a smooth, efficient, and intuitive initialization experience for the users of your template.\n\n### Executing Python Tasks in Templates\n\nIf you need to use Python scripts as tasks within your Copier templates, ensure that you have Python installed on the host machine.\nBy convention, AlgoKit automatically detects the Python installation on your machine and fills in the `python_path` variable accordingly.\nThis process ensures that any Python scripts included as tasks within your Copier templates will execute using the system's Python interpreter.\nIt's important to note that the use of `_copier_python` is not recommended. Here's an example of specifying a Python script execution in your `copier.yaml` without needing to explicitly use `_copier_python`:\n\n```yaml\n- \"{{ python_path }} your_python_script.py\"\n```\n\nIf you'd like your template to be backwards compatible with versions of `algokit-cli` older than `v1.11.3` when executing custom python scripts via `copier` tasks, you can use a conditional statement to determine the Python path:\n\n```yaml\n- \"{{ python_path if python_path else _copier_python }} your_python_script.py\"\n# _copier_python above is used for backwards compatibility with versions < v1.11.3 of the algokit cli\n```\n\nAnd to define `python_path` in your Copier questions:\n\n```yaml\n# Auto determined by algokit-cli from v1.11.3 to allow execution of python script\n# in binary mode.\npython_path:\n  type: str\n  help: Path to the sys.executable.\n  when: false\n```\n\n### Working with Generators\n\nAfter mastering the use of `copier` and building your templates based on the official AlgoKit template repositories, you can enhance your proficiency by learning to define `custom generators`. Essentially, generators are smaller-scope `copier` templates designed to provide additional functionality after a project has been initialized from the template.\n\nFor example, the official [`algokit-python-template`](https://github.com/algorandfoundation/algokit-python-template/tree/main/template_content) incorporates a generator in the `.algokit/generators` directory. This generator can be utilized to execute auxiliary tasks on AlgoKit projects that are initiated from this template, like adding new smart contracts to an existing project. For a comprehensive understanding, please consult the [`architecture decision record`](../architecture-decisions/2023-07-19_advanced_generate_command.md) and [`algokit generate documentation`](../features/generate.md).\n\n#### How to Create a Generator\n\nOutlined below are the fundamental steps to create a generator. Although `copier` provides complete autonomy in structuring your template, you may prefer to define your generator to meet your specific needs. Nevertheless, as a starting point, we suggest:\n\n1. Generate a new directory hierarchy within your template directory under the `.algokit/generators` folder (this is merely a suggestion, you can define your custom path if necessary and point to it via the algokit.toml file).\n2. Develop a `copier.yaml` file within the generator directory and outline the generator's behavior. This file bears similarities with the root `copier.yaml` file in your template directory, but it is exclusively for the generator. The `tasks` section of the `copier.yaml` file is where you can determine the generator's behavior. Here's an example of a generator that copies the `smart-contract` directory from the template to the current working directory:\n\n```yaml\n_task:\n  - \"echo '==== Successfully initialized new smart contract 🚀 ===='\"\n\ncontract_name:\n  type: str\n  help: Name of your new contract.\n  placeholder: \"my-new-contract\"\n  default: \"my-new-contract\"\n\n_templates_suffix: \".j2\"\n```\n\nNote that `_templates_suffix` must be different from the `_templates_suffix` defined in the root `copier.yaml` file. This is because the generator's `copier.yaml` file is processed separately from the root `copier.yaml` file.\n\n3. Develop your `generator` copier content and, when ready, test it by initiating a new project for your template and executing the generator command:\n\n```bash\nalgokit generate\n```\n\nThis should dynamically load and display your generator as an optional `cli` command that your template users can execute.\n\n## Recommendations\n\n- **Modularity**: Break your templates into modular components that can be combined in different ways.\n- **Documentation**: Include README files and comments in your templates to explain how they should be used.\n- **Versioning**: Use `.algokit.toml` to specify the minimum compatible version of AlgoKit.\n- **Testing**: Include test configurations and scripts in your templates to encourage testing best practices.\n- **Linting and Formatting**: Integrate linters and code formatters in your templates to ensure code quality.\n- **Algokit Principle**: for details on generic principles on designing templates refer to [algokit design principles](https://github.com/algorandfoundation/algokit-cli/blob/main/docs/algokit.md#guiding-principles).\n\n## Conclusion\n\nCreating custom templates in AlgoKit is a powerful way to streamline your development workflow for Algorand smart contracts, whether you are using Python or TypeScript. Leveraging Copier and Jinja for templating, and incorporating best practices for modularity, documentation, and coding standards, can result in robust, flexible, and user-friendly templates that can be a valuable asset to both your own projects and the broader Algorand community.\n\nHappy coding!\n"
  },
  {
    "path": "docs/tutorials/intro.md",
    "content": "# AlgoKit Quick Start Tutorial\n\nAlgoKit is the primary tool used by the Algorand community to develop smart contracts on the Algorand blockchain. It provides the capabilities to develop, test and deploy Algorand smart contracts within minutes! This guide is intended to help you setup AlgoKit and to start developing your application.\n\n## Quick start videos 📹\n\nIf you prefer videos, take a look at this 10 minute guide to getting started.\n\n[![Learn How to Build on Algorand in 10 Minutes](../imgs/algokit-intro-video-thumbnail.jpg)](https://www.youtube.com/embed/dow6U8DxOGc)\n\nDetailed video guides for both [Windows](https://www.youtube.com/embed/22RvINnZsRo) and [Mac](https://www.youtube.com/embed/zsurtpCGmgE) are also available.\n\n> Please note, the videos above are to be refreshed to cover v2.0 features. For now, its best to follow the instructions below to get started.\n\n## Prequisites ✅\n\nThis guide presents installing AlgoKit using an OS agnostic procedure. For OS specific instructions take a look that the [AlgoKit install](https://github.com/algorandfoundation/algokit-cli/blob/main/README.md#install) guide.\n\nBefore proceeding, ensure you have the following components installed:\n\n- [Python 3.10 - 3.14](https://www.python.org/downloads/)\n- [pipx](https://pypa.github.io/pipx/#on-linux-install-via-pip-requires-pip-190-or-later)\n- [git](https://github.com/git-guides/install-git#install-git)\n- [Docker](https://docs.docker.com/desktop/install/mac-install/) (or [Podman](https://podman.io/getting-started/installation/), see [details](../features/localnet.md#podman-support))\n- [VSCode](https://code.visualstudio.com/download)\n\n## Install AlgoKit 🛠\n\nTo install AlgoKit, run the following command from a terminal.\n\n```shell\npipx install algokit\n```\n\nAfter the installation completes, **restart the terminal**.\n\nFor more detailed installation documentation, see the [official installation guide](https://github.com/algorandfoundation/algokit-cli#install).\n\n> Please note, `pipx` is only one of the supported installation methods. You can also install AlgoKit via `brew` and _soon_ `winget` and `snap` as pre-build binaries. Refer to the official installation guide.\n\n## Verify the Installation ✔\n\nTo verify AlgoKit Installed correctly run the following.\n\n```shell\nalgokit --version\n```\n\nOutput similar to the following should be displayed:\n\n```shell\nalgokit, version 2.0.3 # or higher\n```\n\n## Start a LocalNet 🌐\n\nAlgoKit supports using a [local version of the Algorand blockchain](../features/localnet.md). To start an instance of this LocalNet run the following command from the terminal:\n\n```shell\nalgokit localnet start\n```\n\nThis should start an instance of the LocalNet within docker. If you open the Docker Desktop application you should see something similar to the following:\n\n![Docker Desktop LocalNet Instance](../imgs/localnet.png)\n\n## Create an AlgoKit project 🆕\n\nNow that AlgoKit is installed, you can rapidly create a new project to get started quickly. This can be done by running:\n\n```shell\nalgokit init\n```\n\nThis will launch a guided menu system to create a specific project tailored to your needs. The templates are basic starter applications for various Algorand development scenarios. To read more about templates checkout AlgoKit detailed documentation.\n\nFor now we'll use the `python` template, which is a lightweight starting point for learning Algorand smart contract development using Algorand Python. You can initialize a project using this template by running:\n\n```shell\nalgokit init -t python -a preset_name \"starter\"\n```\n\nNext, you will be prompted for the name of your project. Finally, select the default value for the rest of the prompts (enter).\n\nOnce finished, (if you have it installed) VS Code should automatically be opened with the initialised project and you will be prompted to install appropriate VS Code extensions. This starter app will contain one smart contract (built using the [Algorand Python](https://algorandfoundation.github.io/puya/) language) named `contract.py`, in the `smart_contracts/hello_world` folder, with one method (`hello`) that takes a `String` and returns a `String`. Notice that within the contract folder, there is a `deploy_config.py` which defines the parameters for deployment of your smart contract to the target chain (Algorand TestNet, Algorand MainNet, Algorand LocalNet or else).\n\n![AlgoKit Playground Contract](../imgs/algokitplayground.png)\n\n## AlgoKit Project structure 🏗\n\nThe structure of your fresh algokit project instance will look similar to below:\n\n```bash\n.\n├── .algokit.toml # Configuration for AlgoKit projects in the workspace.\n├── README.md # Quick start guide for the AlgoKit Workspace.\n├── {your_workspace|project_name}.code-workspace\n└── projects\n    └── {your_project_name} # Root directory for the smart contract project. To add more projects into your algokit workspace run 'algokit init' from the root of your workspace repository.\n        ├── README.md # Quick start on Algorand Python smart contract template based project.\n        ├── .algokit # Hidden folder for AlgoKit AVM debugger and custom generators.\n        ├── .algokit.toml # Project-specific commands and custom generator references.\n        ├── poetry.lock\n        ├── poetry.toml # Dependency definitions for {your_project_name}.\n        ├── pyproject.toml # Project definitions for {your_project_name}.\n        └── smart_contracts\n            ├── README.md # Guide for adding new smart contracts.\n            ├── ...\n            ├── hello_world # Contract logic for 'hello_world'.\n            │   ├── contract.py # Contract logic.\n            │   └── deploy_config.py # Deployment logic for 'hello_world'.\n            └── helpers # Helper functions for contract build and deployment.\n```\n\nAdditionally, each official smart contract template includes an interactive codespace walkthrough powered by [CodeTour](https://marketplace.visualstudio.com/items?itemName=vsls-contrib.codetour).\n\nTo start the interactive walkthrough, install the extension and click on the green play button in the left bottom corner of the `Explorer` pane as demonstrated on the screenshot below (See `Getting Started with Your AlgoKit Project`).\n\n![CodeTour Play Button](../imgs/algokitcodetour.png)\n\nUpon execution, the walkthrough will guide you through the key components of your fresh AlgoKit project, which is an important prerequisite to learn before running and deploying your first smart contract. Certainly, not because its hard to learn, but because it will save you a lot of time and effort having the correct foundations.\n\n## Run the Demo Application 🏃‍♂️\n\nOnce the `Algorand Python` based project is created, you can get started by building and deploying your first smart contract. There are a few possible ways to do so as demonstrated on the following video.\n\n[![Watch the video](https://ipfs.algonode.xyz/ipfs/Qmc9mRaPoDyhUFmek4ETxVfKUKzUg9pf3Tss5xwkBGdQis)](https://ipfs.algonode.xyz/ipfs/Qma6gNqxsSFc9Jbh8kBTZyVLv5gqFj1xnrsjoeT6MAAwCw/)\n\nThe App ID of the deployed contract and its Algorand address is displayed, followed by the message returned from the smart contract call (`Hello, Python`).\n\n1. Start LocalNet\n2. Build the smart contract. Notice how a folder named `artifacts` is created with the [AVM (Algorand Virtual Machine)](https://dev.algorand.co/concepts/smart-contracts/avm/) related artifacts for deploying your smart contract.\n3. Deploy and call the `HelloWorld` smart contract (from `contract.py` file).\n\nAfter execution, the `App ID` of the deployed contract and its Algorand address are displayed, followed by the message returned from the smart contract call (`Hello, world`).\n\nAt this point you have deployed a simple contract to a LocalNet Algorand network (running in your Docker environment) and called it successfully!\n\nAdditionally, you can find the native AVM related artifacts and the appropriate smart contract manifest JSON files have been output to the `artifacts` folder. Note that in this example, AVM bytecode is compiled using the `PuyaPy` compiler, which is what `Algorand Python` relies on to compile your Python smart contract code to TEAL ([Transaction Execution Approval Language](https://dev.algorand.co/concepts/smart-contracts/languages/teal)).\n\n### Using AlgoKit CLI to build and deploy contracts 🛠️\n\nWhen using official AlgoKit templates via `algokit init`, you can manage your projects using the AlgoKit CLI at the convenience of your terminal.\n\nFor example, to achieve the same results as in the demo earlier, build and deploy the `HelloWorld` contract from the `contract.py` file by running the following command from the terminal:\n\n```shell\nalgokit project run build\n```\n\nThis command will recompile your python contracts from `contract.py` and produce the necessary artefacts for deploying your contract.\n\n```shell\nalgokit project deploy localnet\n```\n\nThis will then deploy to your LocalNet instance and display the same output as in the earlier demo.\n\n> Please note, as highlighted in the [structure](#algokit-project-structure) section above, the `project run ...` commands are defined in the respective `.algokit.toml` file under `[project]` sections.\n\n## Next steps 🚶‍♂️\n\nWe have only covered a tiny fraction of the capabilities of the AlgoKit CLI and its related ecosystem of templates and utilities for an efficient developer experience.\n\n- To get the most out of `AlgoKit`, we recommend to get started with learning more about AlgoKit and what you can do with it by checking out our extensive [AlgoKit CLI documentation](../algokit.md).\n- Explore the `README.md` files at the root of any project created via `algokit init`. All official AlgoKit templates include detailed quick started guides, an interactive code tour and various presets which can be customized to your needs.\n- To learn more about `Algorand Python`, take a look at the [documentation](https://algorandfoundation.github.io/puya/).\n- To learn more about the commands demonstrated in this tutorial, refer to [`init`](../features/init.md) and [`project`](../features/project.md) to get a comprehensive understanding of their further capabilities.\n- If you'd like to learn more on structuring your `AlgoKit Project` as a monorepo, refer to `workspace` mode as described in [`init`](../features/init.md#workspaces-vs-standalone-projects). You can also pass the `--no-workspace` flag to setup a standalone algokit project, if preferred.\n- If you'd like to **create your own** `AlgoKit` template, refer to the [template tutorial](./algokit-template.md).\n- More information on Algorand smart contracts is also available in the [smart contract documentation](https://dev.algorand.co/concepts/smart-contracts/overview).\n"
  },
  {
    "path": "docs/tutorials/smart-contracts.md",
    "content": "# Smart Contract Tutorial\n\n_TODO_\n\n[mental model image]\n\n- Lifecycle\n- Deploy-time immutability and permanence controls\n- Deployment automation\n- ...\n\n## Next steps\n\n- Read the architecture decision\n"
  },
  {
    "path": "entitlements.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n        <key>com.apple.security.cs.allow-jit</key>\n        <true/>\n        <key>com.apple.security.cs.allow-unsigned-executable-memory</key>\n        <true/>\n</dict>\n</plist>\n"
  },
  {
    "path": "misc/multiformats_config/multibase-table.json",
    "content": "[\n    {\n        \"name\": \"identity\",\n        \"code\": \"0x00\",\n        \"status\": \"reserved\",\n        \"description\": \"(No base encoding)\"\n    },\n    {\n        \"name\": \"base2\",\n        \"code\": \"0\",\n        \"status\": \"experimental\",\n        \"description\": \"Binary (01010101)\"\n    },\n    {\n        \"name\": \"base8\",\n        \"code\": \"7\",\n        \"status\": \"draft\",\n        \"description\": \"Octal\"\n    },\n    {\n        \"name\": \"base10\",\n        \"code\": \"9\",\n        \"status\": \"draft\",\n        \"description\": \"Decimal\"\n    },\n    {\n        \"name\": \"base32upper\",\n        \"code\": \"B\",\n        \"status\": \"final\",\n        \"description\": \"RFC4648 case-insensitive - no padding\"\n    },\n    {\n        \"name\": \"base32padupper\",\n        \"code\": \"C\",\n        \"status\": \"draft\",\n        \"description\": \"RFC4648 case-insensitive - with padding\"\n    },\n    {\n        \"name\": \"base16upper\",\n        \"code\": \"F\",\n        \"status\": \"final\",\n        \"description\": \"Hexadecimal (uppercase)\"\n    },\n    {\n        \"name\": \"base36upper\",\n        \"code\": \"K\",\n        \"status\": \"draft\",\n        \"description\": \"Base36 [0-9a-z] case-insensitive - no padding\"\n    },\n    {\n        \"name\": \"base64pad\",\n        \"code\": \"M\",\n        \"status\": \"experimental\",\n        \"description\": \"RFC4648 with padding - MIME encoding\"\n    },\n    {\n        \"name\": \"base32hexpadupper\",\n        \"code\": \"T\",\n        \"status\": \"experimental\",\n        \"description\": \"RFC4648 case-insensitive - with padding\"\n    },\n    {\n        \"name\": \"base64urlpad\",\n        \"code\": \"U\",\n        \"status\": \"final\",\n        \"description\": \"RFC4648 with padding\"\n    },\n    {\n        \"name\": \"base32hexupper\",\n        \"code\": \"V\",\n        \"status\": \"experimental\",\n        \"description\": \"RFC4648 case-insensitive - no padding - highest char\"\n    },\n    {\n        \"name\": \"base58flickr\",\n        \"code\": \"Z\",\n        \"status\": \"experimental\",\n        \"description\": \"Base58 Flicker\"\n    },\n    {\n        \"name\": \"base32\",\n        \"code\": \"b\",\n        \"status\": \"final\",\n        \"description\": \"RFC4648 case-insensitive - no padding\"\n    },\n    {\n        \"name\": \"base32pad\",\n        \"code\": \"c\",\n        \"status\": \"draft\",\n        \"description\": \"RFC4648 case-insensitive - with padding\"\n    },\n    {\n        \"name\": \"base16\",\n        \"code\": \"f\",\n        \"status\": \"final\",\n        \"description\": \"Hexadecimal (lowercase)\"\n    },\n    {\n        \"name\": \"base32z\",\n        \"code\": \"h\",\n        \"status\": \"draft\",\n        \"description\": \"z-base-32 (used by Tahoe-LAFS)\"\n    },\n    {\n        \"name\": \"base36\",\n        \"code\": \"k\",\n        \"status\": \"draft\",\n        \"description\": \"Base36 [0-9a-z] case-insensitive - no padding\"\n    },\n    {\n        \"name\": \"base64\",\n        \"code\": \"m\",\n        \"status\": \"final\",\n        \"description\": \"RFC4648 no padding\"\n    },\n    {\n        \"name\": \"proquint\",\n        \"code\": \"p\",\n        \"status\": \"experimental\",\n        \"description\": \"Proquint (https://arxiv.org/html/0901.4016)\"\n    },\n    {\n        \"name\": \"base32hexpad\",\n        \"code\": \"t\",\n        \"status\": \"experimental\",\n        \"description\": \"RFC4648 case-insensitive - with padding\"\n    },\n    {\n        \"name\": \"base64url\",\n        \"code\": \"u\",\n        \"status\": \"final\",\n        \"description\": \"RFC4648 no padding\"\n    },\n    {\n        \"name\": \"base32hex\",\n        \"code\": \"v\",\n        \"status\": \"experimental\",\n        \"description\": \"RFC4648 case-insensitive - no padding - highest char\"\n    },\n    {\n        \"name\": \"base58btc\",\n        \"code\": \"z\",\n        \"status\": \"final\",\n        \"description\": \"Base58 Bitcoin\"\n    },\n    {\n        \"name\": \"base256emoji\",\n        \"code\": \"0x01F680\",\n        \"status\": \"experimental\",\n        \"description\": \"base256 with custom alphabet using variable-sized-codepoints\"\n    }\n]"
  },
  {
    "path": "misc/multiformats_config/multicodec-table.json",
    "content": "[\n    {\n        \"name\": \"identity\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x00\",\n        \"status\": \"permanent\",\n        \"description\": \"raw binary\"\n    },\n    {\n        \"name\": \"cidv1\",\n        \"tag\": \"cid\",\n        \"code\": \"0x01\",\n        \"status\": \"permanent\",\n        \"description\": \"CIDv1\"\n    },\n    {\n        \"name\": \"cidv2\",\n        \"tag\": \"cid\",\n        \"code\": \"0x02\",\n        \"status\": \"draft\",\n        \"description\": \"CIDv2\"\n    },\n    {\n        \"name\": \"cidv3\",\n        \"tag\": \"cid\",\n        \"code\": \"0x03\",\n        \"status\": \"draft\",\n        \"description\": \"CIDv3\"\n    },\n    {\n        \"name\": \"ip4\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x04\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"tcp\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x06\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha1\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x11\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha2-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x12\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha2-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x13\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha3-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x14\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha3-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x15\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha3-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x16\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha3-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x17\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"shake-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x18\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"shake-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x19\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"keccak-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1a\",\n        \"status\": \"draft\",\n        \"description\": \"keccak has variable output length. The number specifies the core length\"\n    },\n    {\n        \"name\": \"keccak-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"keccak-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"keccak-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake3\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1e\",\n        \"status\": \"draft\",\n        \"description\": \"BLAKE3 has a default 32 byte output length. The maximum length is (2^64)-1 bytes.\"\n    },\n    {\n        \"name\": \"sha2-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x20\",\n        \"status\": \"permanent\",\n        \"description\": \"aka SHA-384; as specified by FIPS 180-4.\"\n    },\n    {\n        \"name\": \"dccp\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x21\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"murmur3-x64-64\",\n        \"tag\": \"hash\",\n        \"code\": \"0x22\",\n        \"status\": \"permanent\",\n        \"description\": \"The first 64-bits of a murmur3-x64-128 - used for UnixFS directory sharding.\"\n    },\n    {\n        \"name\": \"murmur3-32\",\n        \"tag\": \"hash\",\n        \"code\": \"0x23\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ip6\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x29\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ip6zone\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x2a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ipcidr\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x2b\",\n        \"status\": \"draft\",\n        \"description\": \"CIDR mask for IP addresses\"\n    },\n    {\n        \"name\": \"path\",\n        \"tag\": \"namespace\",\n        \"code\": \"0x2f\",\n        \"status\": \"permanent\",\n        \"description\": \"Namespace for string paths. Corresponds to `/` in ASCII.\"\n    },\n    {\n        \"name\": \"multicodec\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x30\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"multihash\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x31\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"multiaddr\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x32\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"multibase\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x33\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"varsig\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x34\",\n        \"status\": \"draft\",\n        \"description\": \"Variable signature (varsig) multiformat\"\n    },\n    {\n        \"name\": \"dns\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x35\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"dns4\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x36\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"dns6\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x37\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"dnsaddr\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x38\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"protobuf\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x50\",\n        \"status\": \"draft\",\n        \"description\": \"Protocol Buffers\"\n    },\n    {\n        \"name\": \"cbor\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x51\",\n        \"status\": \"permanent\",\n        \"description\": \"CBOR\"\n    },\n    {\n        \"name\": \"raw\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x55\",\n        \"status\": \"permanent\",\n        \"description\": \"raw binary\"\n    },\n    {\n        \"name\": \"dbl-sha2-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x56\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"rlp\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x60\",\n        \"status\": \"draft\",\n        \"description\": \"recursive length prefix\"\n    },\n    {\n        \"name\": \"bencode\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x63\",\n        \"status\": \"draft\",\n        \"description\": \"bencode\"\n    },\n    {\n        \"name\": \"dag-pb\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x70\",\n        \"status\": \"permanent\",\n        \"description\": \"MerkleDAG protobuf\"\n    },\n    {\n        \"name\": \"dag-cbor\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x71\",\n        \"status\": \"permanent\",\n        \"description\": \"MerkleDAG cbor\"\n    },\n    {\n        \"name\": \"libp2p-key\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x72\",\n        \"status\": \"permanent\",\n        \"description\": \"Libp2p Public Key\"\n    },\n    {\n        \"name\": \"git-raw\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x78\",\n        \"status\": \"permanent\",\n        \"description\": \"Raw Git object\"\n    },\n    {\n        \"name\": \"torrent-info\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x7b\",\n        \"status\": \"draft\",\n        \"description\": \"Torrent file info field (bencoded)\"\n    },\n    {\n        \"name\": \"torrent-file\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x7c\",\n        \"status\": \"draft\",\n        \"description\": \"Torrent file (bencoded)\"\n    },\n    {\n        \"name\": \"leofcoin-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x81\",\n        \"status\": \"draft\",\n        \"description\": \"Leofcoin Block\"\n    },\n    {\n        \"name\": \"leofcoin-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x82\",\n        \"status\": \"draft\",\n        \"description\": \"Leofcoin Transaction\"\n    },\n    {\n        \"name\": \"leofcoin-pr\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x83\",\n        \"status\": \"draft\",\n        \"description\": \"Leofcoin Peer Reputation\"\n    },\n    {\n        \"name\": \"sctp\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x84\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"dag-jose\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x85\",\n        \"status\": \"draft\",\n        \"description\": \"MerkleDAG JOSE\"\n    },\n    {\n        \"name\": \"dag-cose\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x86\",\n        \"status\": \"draft\",\n        \"description\": \"MerkleDAG COSE\"\n    },\n    {\n        \"name\": \"lbry\",\n        \"tag\": \"namespace\",\n        \"code\": \"0x8c\",\n        \"status\": \"draft\",\n        \"description\": \"LBRY Address\"\n    },\n    {\n        \"name\": \"eth-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x90\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Header (RLP)\"\n    },\n    {\n        \"name\": \"eth-block-list\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x91\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Header List (RLP)\"\n    },\n    {\n        \"name\": \"eth-tx-trie\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x92\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Transaction Trie (Eth-Trie)\"\n    },\n    {\n        \"name\": \"eth-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x93\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Transaction (MarshalBinary)\"\n    },\n    {\n        \"name\": \"eth-tx-receipt-trie\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x94\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Transaction Receipt Trie (Eth-Trie)\"\n    },\n    {\n        \"name\": \"eth-tx-receipt\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x95\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Transaction Receipt (MarshalBinary)\"\n    },\n    {\n        \"name\": \"eth-state-trie\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x96\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum State Trie (Eth-Secure-Trie)\"\n    },\n    {\n        \"name\": \"eth-account-snapshot\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x97\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Account Snapshot (RLP)\"\n    },\n    {\n        \"name\": \"eth-storage-trie\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x98\",\n        \"status\": \"permanent\",\n        \"description\": \"Ethereum Contract Storage Trie (Eth-Secure-Trie)\"\n    },\n    {\n        \"name\": \"eth-receipt-log-trie\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x99\",\n        \"status\": \"draft\",\n        \"description\": \"Ethereum Transaction Receipt Log Trie (Eth-Trie)\"\n    },\n    {\n        \"name\": \"eth-receipt-log\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x9a\",\n        \"status\": \"draft\",\n        \"description\": \"Ethereum Transaction Receipt Log (RLP)\"\n    },\n    {\n        \"name\": \"aes-128\",\n        \"tag\": \"key\",\n        \"code\": \"0xa0\",\n        \"status\": \"draft\",\n        \"description\": \"128-bit AES symmetric key\"\n    },\n    {\n        \"name\": \"aes-192\",\n        \"tag\": \"key\",\n        \"code\": \"0xa1\",\n        \"status\": \"draft\",\n        \"description\": \"192-bit AES symmetric key\"\n    },\n    {\n        \"name\": \"aes-256\",\n        \"tag\": \"key\",\n        \"code\": \"0xa2\",\n        \"status\": \"draft\",\n        \"description\": \"256-bit AES symmetric key\"\n    },\n    {\n        \"name\": \"chacha-128\",\n        \"tag\": \"key\",\n        \"code\": \"0xa3\",\n        \"status\": \"draft\",\n        \"description\": \"128-bit ChaCha symmetric key\"\n    },\n    {\n        \"name\": \"chacha-256\",\n        \"tag\": \"key\",\n        \"code\": \"0xa4\",\n        \"status\": \"draft\",\n        \"description\": \"256-bit ChaCha symmetric key\"\n    },\n    {\n        \"name\": \"bitcoin-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xb0\",\n        \"status\": \"permanent\",\n        \"description\": \"Bitcoin Block\"\n    },\n    {\n        \"name\": \"bitcoin-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xb1\",\n        \"status\": \"permanent\",\n        \"description\": \"Bitcoin Tx\"\n    },\n    {\n        \"name\": \"bitcoin-witness-commitment\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xb2\",\n        \"status\": \"permanent\",\n        \"description\": \"Bitcoin Witness Commitment\"\n    },\n    {\n        \"name\": \"zcash-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xc0\",\n        \"status\": \"permanent\",\n        \"description\": \"Zcash Block\"\n    },\n    {\n        \"name\": \"zcash-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xc1\",\n        \"status\": \"permanent\",\n        \"description\": \"Zcash Tx\"\n    },\n    {\n        \"name\": \"caip-50\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0xca\",\n        \"status\": \"draft\",\n        \"description\": \"CAIP-50 multi-chain account id\"\n    },\n    {\n        \"name\": \"streamid\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xce\",\n        \"status\": \"draft\",\n        \"description\": \"Ceramic Stream Id\"\n    },\n    {\n        \"name\": \"stellar-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xd0\",\n        \"status\": \"draft\",\n        \"description\": \"Stellar Block\"\n    },\n    {\n        \"name\": \"stellar-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xd1\",\n        \"status\": \"draft\",\n        \"description\": \"Stellar Tx\"\n    },\n    {\n        \"name\": \"md4\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xd4\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"md5\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xd5\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"decred-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xe0\",\n        \"status\": \"draft\",\n        \"description\": \"Decred Block\"\n    },\n    {\n        \"name\": \"decred-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xe1\",\n        \"status\": \"draft\",\n        \"description\": \"Decred Tx\"\n    },\n    {\n        \"name\": \"ipld\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe2\",\n        \"status\": \"draft\",\n        \"description\": \"IPLD path\"\n    },\n    {\n        \"name\": \"ipfs\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe3\",\n        \"status\": \"draft\",\n        \"description\": \"IPFS path\"\n    },\n    {\n        \"name\": \"swarm\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe4\",\n        \"status\": \"draft\",\n        \"description\": \"Swarm path\"\n    },\n    {\n        \"name\": \"ipns\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe5\",\n        \"status\": \"draft\",\n        \"description\": \"IPNS path\"\n    },\n    {\n        \"name\": \"zeronet\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe6\",\n        \"status\": \"draft\",\n        \"description\": \"ZeroNet site address\"\n    },\n    {\n        \"name\": \"secp256k1-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xe7\",\n        \"status\": \"draft\",\n        \"description\": \"Secp256k1 public key (compressed)\"\n    },\n    {\n        \"name\": \"dnslink\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xe8\",\n        \"status\": \"permanent\",\n        \"description\": \"DNSLink path\"\n    },\n    {\n        \"name\": \"bls12_381-g1-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xea\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 public key in the G1 field\"\n    },\n    {\n        \"name\": \"bls12_381-g2-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xeb\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 public key in the G2 field\"\n    },\n    {\n        \"name\": \"x25519-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xec\",\n        \"status\": \"draft\",\n        \"description\": \"Curve25519 public key\"\n    },\n    {\n        \"name\": \"ed25519-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xed\",\n        \"status\": \"draft\",\n        \"description\": \"Ed25519 public key\"\n    },\n    {\n        \"name\": \"bls12_381-g1g2-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xee\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 concatenated public keys in both the G1 and G2 fields\"\n    },\n    {\n        \"name\": \"sr25519-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xef\",\n        \"status\": \"draft\",\n        \"description\": \"Sr25519 public key\"\n    },\n    {\n        \"name\": \"dash-block\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xf0\",\n        \"status\": \"draft\",\n        \"description\": \"Dash Block\"\n    },\n    {\n        \"name\": \"dash-tx\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xf1\",\n        \"status\": \"draft\",\n        \"description\": \"Dash Tx\"\n    },\n    {\n        \"name\": \"swarm-manifest\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xfa\",\n        \"status\": \"draft\",\n        \"description\": \"Swarm Manifest\"\n    },\n    {\n        \"name\": \"swarm-feed\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xfb\",\n        \"status\": \"draft\",\n        \"description\": \"Swarm Feed\"\n    },\n    {\n        \"name\": \"beeson\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xfc\",\n        \"status\": \"draft\",\n        \"description\": \"Swarm BeeSon\"\n    },\n    {\n        \"name\": \"udp\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0111\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"p2p-webrtc-star\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0113\",\n        \"status\": \"deprecated\",\n        \"description\": \"Use webrtc or webrtc-direct instead\"\n    },\n    {\n        \"name\": \"p2p-webrtc-direct\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0114\",\n        \"status\": \"deprecated\",\n        \"description\": \"Use webrtc or webrtc-direct instead\"\n    },\n    {\n        \"name\": \"p2p-stardust\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0115\",\n        \"status\": \"deprecated\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"webrtc-direct\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0118\",\n        \"status\": \"draft\",\n        \"description\": \"ICE-lite webrtc transport with SDP munging during connection establishment and without use of a STUN server\"\n    },\n    {\n        \"name\": \"webrtc\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0119\",\n        \"status\": \"draft\",\n        \"description\": \"webrtc transport where connection establishment is according to w3c spec\"\n    },\n    {\n        \"name\": \"p2p-circuit\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0122\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"dag-json\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x0129\",\n        \"status\": \"permanent\",\n        \"description\": \"MerkleDAG json\"\n    },\n    {\n        \"name\": \"udt\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x012d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"utp\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x012e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"crc32\",\n        \"tag\": \"hash\",\n        \"code\": \"0x0132\",\n        \"status\": \"draft\",\n        \"description\": \"CRC-32 non-cryptographic hash algorithm (IEEE 802.3)\"\n    },\n    {\n        \"name\": \"crc64-ecma\",\n        \"tag\": \"hash\",\n        \"code\": \"0x0164\",\n        \"status\": \"draft\",\n        \"description\": \"CRC-64 non-cryptographic hash algorithm (ECMA-182 - Annex B)\"\n    },\n    {\n        \"name\": \"unix\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0190\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"thread\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x0196\",\n        \"status\": \"draft\",\n        \"description\": \"Textile Thread\"\n    },\n    {\n        \"name\": \"p2p\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01a5\",\n        \"status\": \"permanent\",\n        \"description\": \"libp2p\"\n    },\n    {\n        \"name\": \"https\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01bb\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"onion\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01bc\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"onion3\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01bd\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"garlic64\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01be\",\n        \"status\": \"draft\",\n        \"description\": \"I2P base64 (raw public key)\"\n    },\n    {\n        \"name\": \"garlic32\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01bf\",\n        \"status\": \"draft\",\n        \"description\": \"I2P base32 (hashed public key or encoded public key/checksum+optional secret)\"\n    },\n    {\n        \"name\": \"tls\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01c0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sni\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01c1\",\n        \"status\": \"draft\",\n        \"description\": \"Server Name Indication RFC 6066 \\u00a7 3\"\n    },\n    {\n        \"name\": \"noise\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01c6\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"shs\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01c8\",\n        \"status\": \"draft\",\n        \"description\": \"Secure Scuttlebutt - Secret Handshake Stream\"\n    },\n    {\n        \"name\": \"quic\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01cc\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"quic-v1\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01cd\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"webtransport\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01d1\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"certhash\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01d2\",\n        \"status\": \"draft\",\n        \"description\": \"TLS certificate's fingerprint as a multihash\"\n    },\n    {\n        \"name\": \"ws\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01dd\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"wss\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01de\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"p2p-websocket-star\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01df\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"http\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x01e0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"swhid-1-snp\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x01f0\",\n        \"status\": \"draft\",\n        \"description\": \"SoftWare Heritage persistent IDentifier version 1 snapshot\"\n    },\n    {\n        \"name\": \"json\",\n        \"tag\": \"ipld\",\n        \"code\": \"0x0200\",\n        \"status\": \"permanent\",\n        \"description\": \"JSON (UTF-8-encoded)\"\n    },\n    {\n        \"name\": \"messagepack\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x0201\",\n        \"status\": \"draft\",\n        \"description\": \"MessagePack\"\n    },\n    {\n        \"name\": \"car\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x0202\",\n        \"status\": \"draft\",\n        \"description\": \"Content Addressable aRchive (CAR)\"\n    },\n    {\n        \"name\": \"ipns-record\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x0300\",\n        \"status\": \"permanent\",\n        \"description\": \"Signed IPNS Record\"\n    },\n    {\n        \"name\": \"libp2p-peer-record\",\n        \"tag\": \"libp2p\",\n        \"code\": \"0x0301\",\n        \"status\": \"permanent\",\n        \"description\": \"libp2p peer record type\"\n    },\n    {\n        \"name\": \"libp2p-relay-rsvp\",\n        \"tag\": \"libp2p\",\n        \"code\": \"0x0302\",\n        \"status\": \"permanent\",\n        \"description\": \"libp2p relay reservation voucher\"\n    },\n    {\n        \"name\": \"memorytransport\",\n        \"tag\": \"libp2p\",\n        \"code\": \"0x0309\",\n        \"status\": \"permanent\",\n        \"description\": \"in memory transport for self-dialing and testing; arbitrary\"\n    },\n    {\n        \"name\": \"car-index-sorted\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x0400\",\n        \"status\": \"draft\",\n        \"description\": \"CARv2 IndexSorted index format\"\n    },\n    {\n        \"name\": \"car-multihash-index-sorted\",\n        \"tag\": \"serialization\",\n        \"code\": \"0x0401\",\n        \"status\": \"draft\",\n        \"description\": \"CARv2 MultihashIndexSorted index format\"\n    },\n    {\n        \"name\": \"transport-bitswap\",\n        \"tag\": \"transport\",\n        \"code\": \"0x0900\",\n        \"status\": \"draft\",\n        \"description\": \"Bitswap datatransfer\"\n    },\n    {\n        \"name\": \"transport-graphsync-filecoinv1\",\n        \"tag\": \"transport\",\n        \"code\": \"0x0910\",\n        \"status\": \"draft\",\n        \"description\": \"Filecoin graphsync datatransfer\"\n    },\n    {\n        \"name\": \"transport-ipfs-gateway-http\",\n        \"tag\": \"transport\",\n        \"code\": \"0x0920\",\n        \"status\": \"draft\",\n        \"description\": \"HTTP IPFS Gateway trustless datatransfer\"\n    },\n    {\n        \"name\": \"multidid\",\n        \"tag\": \"multiformat\",\n        \"code\": \"0x0d1d\",\n        \"status\": \"draft\",\n        \"description\": \"Compact encoding for Decentralized Identifers\"\n    },\n    {\n        \"name\": \"sha2-256-trunc254-padded\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1012\",\n        \"status\": \"permanent\",\n        \"description\": \"SHA2-256 with the two most significant bits from the last byte zeroed (as via a mask with 0b00111111) - used for proving trees as in Filecoin\"\n    },\n    {\n        \"name\": \"sha2-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1013\",\n        \"status\": \"permanent\",\n        \"description\": \"aka SHA-224; as specified by FIPS 180-4.\"\n    },\n    {\n        \"name\": \"sha2-512-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1014\",\n        \"status\": \"permanent\",\n        \"description\": \"aka SHA-512/224; as specified by FIPS 180-4.\"\n    },\n    {\n        \"name\": \"sha2-512-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1015\",\n        \"status\": \"permanent\",\n        \"description\": \"aka SHA-512/256; as specified by FIPS 180-4.\"\n    },\n    {\n        \"name\": \"murmur3-x64-128\",\n        \"tag\": \"hash\",\n        \"code\": \"0x1022\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ripemd-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1052\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ripemd-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1053\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ripemd-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1054\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"ripemd-320\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1055\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"x11\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1100\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"p256-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1200\",\n        \"status\": \"draft\",\n        \"description\": \"P-256 public Key (compressed)\"\n    },\n    {\n        \"name\": \"p384-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1201\",\n        \"status\": \"draft\",\n        \"description\": \"P-384 public Key (compressed)\"\n    },\n    {\n        \"name\": \"p521-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1202\",\n        \"status\": \"draft\",\n        \"description\": \"P-521 public Key (compressed)\"\n    },\n    {\n        \"name\": \"ed448-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1203\",\n        \"status\": \"draft\",\n        \"description\": \"Ed448 public Key\"\n    },\n    {\n        \"name\": \"x448-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1204\",\n        \"status\": \"draft\",\n        \"description\": \"X448 public Key\"\n    },\n    {\n        \"name\": \"rsa-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1205\",\n        \"status\": \"draft\",\n        \"description\": \"RSA public key. DER-encoded ASN.1 type RSAPublicKey according to IETF RFC 8017 (PKCS #1)\"\n    },\n    {\n        \"name\": \"sm2-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0x1206\",\n        \"status\": \"draft\",\n        \"description\": \"SM2 public key (compressed)\"\n    },\n    {\n        \"name\": \"ed25519-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1300\",\n        \"status\": \"draft\",\n        \"description\": \"Ed25519 private key\"\n    },\n    {\n        \"name\": \"secp256k1-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1301\",\n        \"status\": \"draft\",\n        \"description\": \"Secp256k1 private key\"\n    },\n    {\n        \"name\": \"x25519-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1302\",\n        \"status\": \"draft\",\n        \"description\": \"Curve25519 private key\"\n    },\n    {\n        \"name\": \"sr25519-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1303\",\n        \"status\": \"draft\",\n        \"description\": \"Sr25519 private key\"\n    },\n    {\n        \"name\": \"rsa-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1305\",\n        \"status\": \"draft\",\n        \"description\": \"RSA private key\"\n    },\n    {\n        \"name\": \"p256-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1306\",\n        \"status\": \"draft\",\n        \"description\": \"P-256 private key\"\n    },\n    {\n        \"name\": \"p384-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1307\",\n        \"status\": \"draft\",\n        \"description\": \"P-384 private key\"\n    },\n    {\n        \"name\": \"p521-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1308\",\n        \"status\": \"draft\",\n        \"description\": \"P-521 private key\"\n    },\n    {\n        \"name\": \"bls12_381-g1-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x1309\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 G1 private key\"\n    },\n    {\n        \"name\": \"bls12_381-g2-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x130a\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 G2 private key\"\n    },\n    {\n        \"name\": \"bls12_381-g1g2-priv\",\n        \"tag\": \"key\",\n        \"code\": \"0x130b\",\n        \"status\": \"draft\",\n        \"description\": \"BLS12-381 G1 and G2 private key\"\n    },\n    {\n        \"name\": \"kangarootwelve\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x1d01\",\n        \"status\": \"draft\",\n        \"description\": \"KangarooTwelve is an extendable-output hash function based on Keccak-p\"\n    },\n    {\n        \"name\": \"aes-gcm-256\",\n        \"tag\": \"encryption\",\n        \"code\": \"0x2000\",\n        \"status\": \"draft\",\n        \"description\": \"AES Galois/Counter Mode with 256-bit key and 12-byte IV\"\n    },\n    {\n        \"name\": \"silverpine\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x3f42\",\n        \"status\": \"draft\",\n        \"description\": \"Experimental QUIC over yggdrasil and ironwood routing protocol\"\n    },\n    {\n        \"name\": \"sm3-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0x534d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"sha256a\",\n        \"tag\": \"hash\",\n        \"code\": \"0x7012\",\n        \"status\": \"draft\",\n        \"description\": \"The sum of multiple sha2-256 hashes; as specified by Ceramic CIP-124.\"\n    },\n    {\n        \"name\": \"blake2b-8\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb201\",\n        \"status\": \"draft\",\n        \"description\": \"Blake2b consists of 64 output lengths that give different hashes\"\n    },\n    {\n        \"name\": \"blake2b-16\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb202\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-24\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb203\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-32\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb204\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-40\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb205\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-48\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb206\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-56\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb207\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-64\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb208\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-72\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb209\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-80\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-88\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-96\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-104\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-112\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-120\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb20f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb210\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-136\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb211\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-144\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb212\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-152\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb213\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb214\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-168\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb215\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-176\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb216\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-184\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb217\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-192\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb218\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-200\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb219\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-208\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-216\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-232\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-240\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-248\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb21f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb220\",\n        \"status\": \"permanent\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-264\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb221\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-272\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb222\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-280\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb223\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-288\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb224\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-296\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb225\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-304\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb226\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-312\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb227\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-320\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb228\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-328\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb229\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-336\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-344\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-352\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-360\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-368\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-376\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb22f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb230\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-392\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb231\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-400\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb232\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-408\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb233\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-416\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb234\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-424\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb235\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-432\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb236\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-440\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb237\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-448\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb238\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-456\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb239\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-464\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-472\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-480\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-488\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-496\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-504\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb23f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2b-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb240\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-8\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb241\",\n        \"status\": \"draft\",\n        \"description\": \"Blake2s consists of 32 output lengths that give different hashes\"\n    },\n    {\n        \"name\": \"blake2s-16\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb242\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-24\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb243\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-32\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb244\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-40\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb245\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-48\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb246\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-56\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb247\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-64\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb248\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-72\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb249\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-80\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-88\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-96\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-104\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-112\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-120\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb24f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb250\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-136\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb251\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-144\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb252\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-152\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb253\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb254\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-168\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb255\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-176\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb256\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-184\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb257\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-192\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb258\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-200\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb259\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-208\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-216\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-232\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-240\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-248\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb25f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"blake2s-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb260\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-8\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb301\",\n        \"status\": \"draft\",\n        \"description\": \"Skein256 consists of 32 output lengths that give different hashes\"\n    },\n    {\n        \"name\": \"skein256-16\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb302\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-24\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb303\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-32\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb304\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-40\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb305\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-48\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb306\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-56\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb307\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-64\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb308\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-72\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb309\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-80\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-88\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-96\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-104\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-112\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-120\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb30f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb310\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-136\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb311\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-144\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb312\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-152\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb313\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb314\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-168\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb315\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-176\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb316\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-184\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb317\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-192\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb318\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-200\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb319\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-208\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-216\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-232\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-240\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-248\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb31f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein256-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb320\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-8\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb321\",\n        \"status\": \"draft\",\n        \"description\": \"Skein512 consists of 64 output lengths that give different hashes\"\n    },\n    {\n        \"name\": \"skein512-16\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb322\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-24\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb323\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-32\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb324\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-40\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb325\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-48\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb326\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-56\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb327\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-64\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb328\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-72\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb329\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-80\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-88\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-96\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-104\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-112\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-120\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb32f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb330\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-136\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb331\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-144\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb332\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-152\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb333\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb334\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-168\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb335\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-176\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb336\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-184\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb337\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-192\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb338\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-200\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb339\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-208\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-216\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-232\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-240\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-248\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb33f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb340\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-264\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb341\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-272\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb342\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-280\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb343\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-288\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb344\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-296\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb345\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-304\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb346\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-312\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb347\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-320\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb348\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-328\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb349\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-336\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-344\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-352\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-360\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-368\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-376\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb34f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb350\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-392\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb351\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-400\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb352\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-408\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb353\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-416\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb354\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-424\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb355\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-432\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb356\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-440\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb357\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-448\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb358\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-456\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb359\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-464\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-472\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-480\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-488\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-496\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-504\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb35f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein512-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb360\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-8\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb361\",\n        \"status\": \"draft\",\n        \"description\": \"Skein1024 consists of 128 output lengths that give different hashes\"\n    },\n    {\n        \"name\": \"skein1024-16\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb362\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-24\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb363\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-32\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb364\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-40\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb365\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-48\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb366\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-56\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb367\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-64\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb368\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-72\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb369\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-80\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-88\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-96\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-104\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-112\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-120\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb36f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-128\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb370\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-136\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb371\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-144\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb372\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-152\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb373\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-160\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb374\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-168\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb375\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-176\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb376\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-184\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb377\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-192\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb378\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-200\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb379\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-208\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-216\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-224\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-232\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-240\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-248\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb37f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-256\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb380\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-264\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb381\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-272\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb382\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-280\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb383\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-288\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb384\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-296\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb385\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-304\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb386\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-312\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb387\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-320\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb388\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-328\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb389\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-336\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-344\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-352\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-360\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-368\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-376\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb38f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-384\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb390\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-392\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb391\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-400\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb392\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-408\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb393\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-416\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb394\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-424\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb395\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-432\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb396\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-440\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb397\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-448\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb398\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-456\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb399\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-464\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39a\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-472\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39b\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-480\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39c\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-488\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39d\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-496\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39e\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-504\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb39f\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-512\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-520\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a1\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-528\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a2\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-536\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a3\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-544\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a4\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-552\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a5\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-560\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a6\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-568\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a7\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-576\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a8\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-584\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3a9\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-592\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3aa\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-600\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ab\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-608\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ac\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-616\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ad\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-624\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ae\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-632\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3af\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-640\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-648\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b1\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-656\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b2\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-664\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b3\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-672\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b4\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-680\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b5\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-688\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b6\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-696\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b7\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-704\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b8\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-712\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3b9\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-720\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ba\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-728\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3bb\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-736\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3bc\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-744\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3bd\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-752\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3be\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-760\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3bf\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-768\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-776\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c1\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-784\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c2\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-792\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c3\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-800\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c4\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-808\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c5\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-816\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c6\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-824\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c7\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-832\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c8\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-840\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3c9\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-848\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ca\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-856\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3cb\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-864\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3cc\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-872\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3cd\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-880\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3ce\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-888\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3cf\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-896\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-904\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d1\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-912\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d2\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-920\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d3\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-928\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d4\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-936\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d5\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-944\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d6\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-952\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d7\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-960\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d8\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-968\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3d9\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-976\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3da\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-984\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3db\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-992\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3dc\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-1000\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3dd\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-1008\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3de\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-1016\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3df\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"skein1024-1024\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb3e0\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"xxh-32\",\n        \"tag\": \"hash\",\n        \"code\": \"0xb3e1\",\n        \"status\": \"draft\",\n        \"description\": \"Extremely fast non-cryptographic hash algorithm\"\n    },\n    {\n        \"name\": \"xxh-64\",\n        \"tag\": \"hash\",\n        \"code\": \"0xb3e2\",\n        \"status\": \"draft\",\n        \"description\": \"Extremely fast non-cryptographic hash algorithm\"\n    },\n    {\n        \"name\": \"xxh3-64\",\n        \"tag\": \"hash\",\n        \"code\": \"0xb3e3\",\n        \"status\": \"draft\",\n        \"description\": \"Extremely fast non-cryptographic hash algorithm\"\n    },\n    {\n        \"name\": \"xxh3-128\",\n        \"tag\": \"hash\",\n        \"code\": \"0xb3e4\",\n        \"status\": \"draft\",\n        \"description\": \"Extremely fast non-cryptographic hash algorithm\"\n    },\n    {\n        \"name\": \"poseidon-bls12_381-a2-fc1\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb401\",\n        \"status\": \"permanent\",\n        \"description\": \"Poseidon using BLS12-381 and arity of 2 with Filecoin parameters\"\n    },\n    {\n        \"name\": \"poseidon-bls12_381-a2-fc1-sc\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb402\",\n        \"status\": \"draft\",\n        \"description\": \"Poseidon using BLS12-381 and arity of 2 with Filecoin parameters - high-security variant\"\n    },\n    {\n        \"name\": \"rdfc-1\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xb403\",\n        \"status\": \"draft\",\n        \"description\": \"The result of canonicalizing an input according to RDFC-1.0 and then expressing its hash value as a multihash value.\"\n    },\n    {\n        \"name\": \"ssz\",\n        \"tag\": \"serialization\",\n        \"code\": \"0xb501\",\n        \"status\": \"draft\",\n        \"description\": \"SimpleSerialize (SSZ) serialization\"\n    },\n    {\n        \"name\": \"ssz-sha2-256-bmt\",\n        \"tag\": \"multihash\",\n        \"code\": \"0xb502\",\n        \"status\": \"draft\",\n        \"description\": \"SSZ Merkle tree root using SHA2-256 as the hashing function and SSZ serialization for the block binary\"\n    },\n    {\n        \"name\": \"json-jcs\",\n        \"tag\": \"ipld\",\n        \"code\": \"0xb601\",\n        \"status\": \"draft\",\n        \"description\": \"The result of canonicalizing an input according to JCS - JSON Canonicalisation Scheme (RFC 8785)\"\n    },\n    {\n        \"name\": \"iscc\",\n        \"tag\": \"softhash\",\n        \"code\": \"0xcc01\",\n        \"status\": \"draft\",\n        \"description\": \"ISCC (International Standard Content Code) - similarity preserving hash\"\n    },\n    {\n        \"name\": \"zeroxcert-imprint-256\",\n        \"tag\": \"zeroxcert\",\n        \"code\": \"0xce11\",\n        \"status\": \"draft\",\n        \"description\": \"0xcert Asset Imprint (root hash)\"\n    },\n    {\n        \"name\": \"nonstandard-sig\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd000\",\n        \"status\": \"deprecated\",\n        \"description\": \"Namespace for all not yet standard signature algorithms\"\n    },\n    {\n        \"name\": \"es256k\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd0e7\",\n        \"status\": \"draft\",\n        \"description\": \"ES256K Siganture Algorithm (secp256k1)\"\n    },\n    {\n        \"name\": \"bls-12381-g1-sig\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd0ea\",\n        \"status\": \"draft\",\n        \"description\": \"G1 signature for BLS-12381-G2\"\n    },\n    {\n        \"name\": \"bls-12381-g2-sig\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd0eb\",\n        \"status\": \"draft\",\n        \"description\": \"G2 signature for BLS-12381-G1\"\n    },\n    {\n        \"name\": \"eddsa\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd0ed\",\n        \"status\": \"draft\",\n        \"description\": \"Edwards-Curve Digital Signature Algorithm\"\n    },\n    {\n        \"name\": \"eip-191\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd191\",\n        \"status\": \"draft\",\n        \"description\": \"EIP-191 Ethereum Signed Data Standard\"\n    },\n    {\n        \"name\": \"jwk_jcs-pub\",\n        \"tag\": \"key\",\n        \"code\": \"0xeb51\",\n        \"status\": \"draft\",\n        \"description\": \"JSON object containing only the required members of a JWK (RFC 7518 and RFC 7517) representing the public key. Serialisation based on JCS (RFC 8785)\"\n    },\n    {\n        \"name\": \"fil-commitment-unsealed\",\n        \"tag\": \"filecoin\",\n        \"code\": \"0xf101\",\n        \"status\": \"permanent\",\n        \"description\": \"Filecoin piece or sector data commitment merkle node/root (CommP & CommD)\"\n    },\n    {\n        \"name\": \"fil-commitment-sealed\",\n        \"tag\": \"filecoin\",\n        \"code\": \"0xf102\",\n        \"status\": \"permanent\",\n        \"description\": \"Filecoin sector data commitment merkle node/root - sealed and replicated (CommR)\"\n    },\n    {\n        \"name\": \"plaintextv2\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0x706c61\",\n        \"status\": \"draft\",\n        \"description\": \"\"\n    },\n    {\n        \"name\": \"holochain-adr-v0\",\n        \"tag\": \"holochain\",\n        \"code\": \"0x807124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v0 address    + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"holochain-adr-v1\",\n        \"tag\": \"holochain\",\n        \"code\": \"0x817124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v1 address    + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"holochain-key-v0\",\n        \"tag\": \"holochain\",\n        \"code\": \"0x947124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v0 public key + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"holochain-key-v1\",\n        \"tag\": \"holochain\",\n        \"code\": \"0x957124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v1 public key + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"holochain-sig-v0\",\n        \"tag\": \"holochain\",\n        \"code\": \"0xa27124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v0 signature  + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"holochain-sig-v1\",\n        \"tag\": \"holochain\",\n        \"code\": \"0xa37124\",\n        \"status\": \"draft\",\n        \"description\": \"Holochain v1 signature  + 8 R-S (63 x Base-32)\"\n    },\n    {\n        \"name\": \"skynet-ns\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xb19910\",\n        \"status\": \"draft\",\n        \"description\": \"Skynet Namespace\"\n    },\n    {\n        \"name\": \"arweave-ns\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xb29910\",\n        \"status\": \"draft\",\n        \"description\": \"Arweave Namespace\"\n    },\n    {\n        \"name\": \"subspace-ns\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xb39910\",\n        \"status\": \"draft\",\n        \"description\": \"Subspace Network Namespace\"\n    },\n    {\n        \"name\": \"kumandra-ns\",\n        \"tag\": \"namespace\",\n        \"code\": \"0xb49910\",\n        \"status\": \"draft\",\n        \"description\": \"Kumandra Network Namespace\"\n    },\n    {\n        \"name\": \"es256\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd01200\",\n        \"status\": \"draft\",\n        \"description\": \"ES256 Signature Algorithm\"\n    },\n    {\n        \"name\": \"es284\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd01201\",\n        \"status\": \"draft\",\n        \"description\": \"ES384 Signature Algorithm\"\n    },\n    {\n        \"name\": \"es512\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd01202\",\n        \"status\": \"draft\",\n        \"description\": \"ES512 Signature Algorithm\"\n    },\n    {\n        \"name\": \"rs256\",\n        \"tag\": \"varsig\",\n        \"code\": \"0xd01205\",\n        \"status\": \"draft\",\n        \"description\": \"RS256 Signature Algorithm\"\n    },\n    {\n        \"name\": \"scion\",\n        \"tag\": \"multiaddr\",\n        \"code\": \"0xd02000\",\n        \"status\": \"draft\",\n        \"description\": \"SCION Internet architecture\"\n    }\n]"
  },
  {
    "path": "poetry.toml",
    "content": "[virtualenvs]\nin-project = true\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[tool.poetry]\nname = \"algokit\"\nversion = \"2.10.2\"\ndescription = \"Algorand development kit command-line interface\"\nauthors = [\"Algorand Foundation <contact@algorand.foundation>\"]\nlicense = \"MIT\"\nreadme = \"README.md\"\n\n[tool.poetry.dependencies]\npython = \">=3.10,<3.15\"\nclick = \"8.1.8\" # TODO: 8.2.0 upgrade requires refinements in click_invoker.py due to breaking changes\nhttpx = \">=0.23.1,<=0.28.1\"\ncopier = \"^9.11.3\" # Using caret to allow patch updates. Major/minor updates should be tested before bumping.\nquestionary = \"^1.10.0\"\npyclip = \"^0.7.0\"\nshellingham = \"^1.5.4\"\ntomli = { version = \"^2.0.1\", python = \"<3.11\" }\npython-dotenv = \"^1.0.0\"\nmslex = \"^1.1.0\"\nkeyring = \"25.6.0\"\n# pyjwt is locked to version ^2.10.1 because its explicitly\n# vendored from auth0 repo, to reduce depedency on auth0 package that caused many adhoc transitive dependency errors in cli\n# see header in src/algokit/core/_vendor/auth0/authentication/token_verifier.py\n# this version has been tested to work with the vendored file\npyjwt = \"^2.10.1\" \ncryptography = \"^44.0.2\" # pyjwt has a weak dependency on cryptography and explicitly requires it in the vendored file, hence the lock\nalgokit-utils = \"^4.0.1\"\nmultiformats = \"0.3.1\"\nmultiformats_config = \"0.3.1\" # pinned this to be in lockstep with multiformats\njsondiff = \"^2.0.0\"\nrequests = \"^2.31.0\"\ntextual = \"^3.0.1\"\nprompt_toolkit = \"3.0.51\" # dependency of questionary, pinned to avoid 3.0.52 which has breaking changes\npywin32 = { version = \"^311\", markers = \"sys_platform == 'win32'\" } # transitive dep of pyclip, pinned to support Python 3.14 on Windows\n\n[tool.poetry.group.dev.dependencies]\npyinstaller = {version = \"^6.10.0\"}\npytest = \"^8\"\nruff = \">=0.1.6,<0.13.0\"\npip-audit = \"^2.4.7\"\napprovaltests = \"^7.2.0\"\npytest-mock = \"^3.10.0\"\nmypy = \"^1.0.0\"\npytest-httpx = \"^0.35.0\"\npython-semantic-release = \"^7.32.2\"\npytest-cov = \"^4.0.0\"\npre-commit = \">=2.20,<4.0\"\nsphinx = \"^6.0.0\"\nsphinx-click = \"^4.4.0\"\npoethepoet = \">=0.17.1,<0.37.0\"\ngfm-toc = \"^0.0.7\"\npytest-xdist = \"^3.4.0\"\npytest-sugar = \"^1.0.0\"\ntypes-pyyaml = \"^6.0.12.20250402\"\n\n[tool.poetry.group.docs]\noptional = true\n\n[tool.poetry.group.docs.dependencies]\nsphinxnotes-markdown-builder = \"^0.5.6\"\n\n[build-system]\nrequires = [\"poetry-core\"]\nbuild-backend = \"poetry.core.masonry.api\"\n\n[tool.poetry.scripts]\nalgokit = \"algokit.cli:algokit\"\n\n[tool.poe.tasks]\ndocs_generate = \"sphinx-build -b markdown -E docs/sphinx docs/cli\"\ndocs_toc = \"gfm-toc docs/cli/index.md -e 3\"\ndocs_title = {shell = \"(echo \\\"# AlgoKit CLI Reference Documentation\\\\n\\\\n\\\"; cat docs/cli/index.md) > docs/cli/temp.md && mv docs/cli/temp.md docs/cli/index.md\"}\ndocs = [\"docs_generate\", \"docs_toc\", \"docs_title\"]\npackage_unix = \"pyinstaller --clean --onedir --hidden-import jinja2_ansible_filters --hidden-import multiformats_config --copy-metadata algokit --name algokit --noconfirm src/algokit/__main__.py --add-data './misc/multiformats_config:multiformats_config/' --add-data './src/algokit/resources:algokit/resources/'\"\npackage_windows = { cmd = \"scripts/package_windows.bat\" }\npackage_mac = { cmd = \"scripts/package_mac.sh\" }\n\n[tool.ruff]\nline-length = 120\nlint.select = [\n  # all possible codes as of this ruff version are listed here,\n  # ones we don't want/need are commented out to make it clear\n  # which have been omitted on purpose vs which ones get added\n  # in new ruff releases and should be considered for enabling\n  \"F\",      # pyflakes\n  \"E\", \"W\", # pycodestyle\n  \"C90\",    # mccabe\n  \"I\",      # isort\n  \"N\",      # PEP8 naming\n  \"UP\",     # pyupgrade\n  \"YTT\",    # flake8-2020\n  \"ANN\",    # flake8-annotations\n  # \"S\",    # flake8-bandit\n  # \"BLE\",  # flake8-blind-except\n  \"FBT\",    # flake8-boolean-trap\n  \"B\",      # flake8-bugbear\n  \"A\",      # flake8-builtins\n  # \"COM\",  # flake8-commas\n  \"C4\",     # flake8-comprehensions\n  \"DTZ\",    # flake8-datetimez\n  \"T10\",    # flake8-debugger\n  # \"DJ\",   # flake8-django\n  # \"EM\",   # flake8-errmsg\n  # \"EXE\",  # flake8-executable\n  \"ISC\",    # flake8-implicit-str-concat\n  \"ICN\",    # flake8-import-conventions\n  # \"G\",    # flake8-logging-format\n  # \"INP\",  # flake8-no-pep420\n  \"PIE\",    # flake8-pie\n  \"T20\",    # flake8-print\n  \"PYI\",    # flake8-pyi\n  \"PT\",     # flake8-pytest-style\n  \"Q\",      # flake8-quotes\n  \"RSE\",    # flake8-raise\n  \"RET\",    # flake8-return\n  \"SLF\",    # flake8-self\n  \"SIM\",    # flake8-simplify\n  \"TID\",    # flake8-tidy-imports\n  \"TCH\",    # flake8-type-checking\n  \"ARG\",    # flake8-unused-arguments\n  \"PTH\",    # flake8-use-pathlib\n  \"ERA\",    # eradicate\n  # \"PD\",   # pandas-vet\n  \"PGH\",    # pygrep-hooks\n  \"PL\",     # pylint\n  # \"TRY\",  # tryceratops\n  # \"NPY\",  # NumPy-specific rules\n  \"RUF\",    # Ruff-specific rules\n]\nlint.ignore = [\n  \"PLC0415\", # allow lazy imports\n  \"RET505\", # allow else after return\n  \"SIM108\", # allow if-else in place of ternary\n  # To avoid conflict with ruff formatter. More details on https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules\n  \"E111\", # indentation is not a multiple of four\n  \"E117\", # over-indented\n  \"ISC001\", # single line implicit string concatenation\n  \"ISC002\", # multi line implicit string concatenation\n  \"Q000\", # bad quotes inline string\n  \"Q001\", # bad quotes multiline string\n  \"Q002\", # bad quotes docstring\n  \"Q003\", # avoidable escaped quotes\n  \"W191\", # indentation contains tabs\n]\n# Exclude a variety of commonly ignored directories.\nexclude = [\n    \".direnv\",\n    \".git\",\n    \".mypy_cache\",\n    \".ruff_cache\",\n    \".venv\",\n    \"__pypackages__\",\n    \"_build\",\n    \"build\",\n    \"dist\",\n    \"node_modules\",\n    \"venv\",\n    \"docs/sphinx\",\n    \"src/algokit/core/_vendor\",\n]\n# Allow unused variables when underscore-prefixed.\nlint.dummy-variable-rgx = \"^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$\"\n# Assume Python 3.10.\ntarget-version = \"py310\"\n\n[tool.ruff.lint.per-file-ignores]\n\"tests/**/test_*.py\" = [\"PLR0913\"] # too many args\n\n[tool.ruff.lint.flake8-annotations]\nallow-star-arg-any = true\nsuppress-none-returning = true\n\n[tool.pytest.ini_options]\npythonpath = [\"src\", \"tests\"]\nmarkers = [\n    \"mock_platform_system\",\n    \"pyinstaller_binary_tests\",\n    \"use_real_image_version_cache: opt-out of the auto-mocked image version cache checks\"\n\n]\naddopts = \"-m 'not pyinstaller_binary_tests'\" # by default, exclude pyinstaller_binary_tests\n[tool.mypy]\nfiles = [\"src\", \"tests\"]\nexclude = [\"dist\", \"src/algokit/core/_vendor/\"]\npython_version = \"3.10\"\nwarn_unused_ignores = true\nwarn_redundant_casts = true\nwarn_unused_configs = true\nwarn_unreachable = true\nwarn_return_any = true\nstrict = true\ndisallow_untyped_decorators = true\ndisallow_any_generics = false\nimplicit_reexport = false\n\n[[tool.mypy.overrides]]\nmodule = [\"approvaltests.*\", \"jsondiff.*\"]\nignore_missing_imports = true\n\n[tool.semantic_release]\nversion_toml = \"pyproject.toml:tool.poetry.version\"\nremove_dist = false\nbuild_command = \"poetry build --format wheel\"\nversion_source = \"tag\"\nmajor_on_zero = true\nupload_to_repository = false\ntag_commit = true\nbranch = \"main\"\ncommit_message = \"{version}\\n\\n[skip ci]\"\n"
  },
  {
    "path": "scripts/package_mac.sh",
    "content": "#!/bin/bash\n\nCMD=\"pyinstaller --clean --onedir --hidden-import jinja2_ansible_filters --hidden-import multiformats_config --copy-metadata algokit --name algokit --noconfirm src/algokit/__main__.py --add-data './misc/multiformats_config/multibase-table.json:multiformats_config/' --add-data './misc/multiformats_config/multicodec-table.json:multiformats_config/' --add-data './src/algokit/resources:algokit/resources/'\"\n\nif [ ! -z \"$APPLE_BUNDLE_ID\" ]; then\n    CMD=\"$CMD --osx-bundle-identifier \\\"$APPLE_BUNDLE_ID\\\"\"\nfi\n\nif [ ! -z \"$APPLE_CERT_ID\" ]; then\n    CMD=\"$CMD --codesign-identity \\\"$APPLE_CERT_ID\\\"\"\nfi\n\nif [ -f \"./entitlements.xml\" ]; then\n    CMD=\"$CMD --osx-entitlements-file './entitlements.xml'\"\nfi\n\neval $CMD \n"
  },
  {
    "path": "scripts/package_windows.bat",
    "content": "@echo off\npyinstaller --clean --onedir --hidden-import jinja2_ansible_filters --hidden-import multiformats_config --copy-metadata algokit --name algokit --noconfirm src/algokit/__main__.py --add-data ./misc/multiformats_config;multiformats_config/ --add-data ./src/algokit/resources;algokit/resources/\n"
  },
  {
    "path": "scripts/snap/create-snapcraft-yaml.sh",
    "content": "#!/bin/bash\n\n# Ensure the script fails on errors\nset -e\n\n# Check if the correct number of arguments are passed\nif [ \"$#\" -ne 4 ]; then\n  echo \"Usage: $0 <destination_directory> <release_version> <artifact_path> <grade>\"\n  exit 1\nfi\n\n# Assign arguments to variables\nDESTINATION_DIR=\"$1\"\nRELEASE_VERSION=\"$2\"\nARTIFACT_PATH=\"$3\"\nGRADE=\"$4\"\n\n# Ensure the destination directory exists\nmkdir -p \"${DESTINATION_DIR}/snap\"\n\n# Use the provided ARTIFACT_PATH\nSOURCE=\"$ARTIFACT_PATH\"\n\n# Create the snapcraft.yaml file\ncat > \"${DESTINATION_DIR}/snap/snapcraft.yaml\" <<EOF\nname: algokit\nversion: \"$RELEASE_VERSION\"\nsummary: The AlgoKit CLI is the one-stop shop tool for developers building on Algorand\ndescription: |\n  AlgoKit gets developers of all levels up and running with a familiar, \n  fun and productive development environment in minutes. \n  The goal of AlgoKit is to help developers build and launch \n  secure, automated production-ready applications rapidly.\n\nbase: core22\nconfinement: classic\ngrade: $GRADE\n\nparts:\n  algokit:\n    plugin: dump\n    source: $SOURCE\n\napps:\n  algokit:\n    command: algokit\nEOF\n\necho \"snapcraft.yaml has been created at ${DESTINATION_DIR}/snap/snapcraft.yaml\"\n   "
  },
  {
    "path": "scripts/update-brew-cask.sh",
    "content": "#!/bin/bash\n\n#script arguments\nwheel_files=( $1 )\nwheel_file=${wheel_files[0]}\narm_artifacts=( $2 )\narm_artifact=${arm_artifacts[0]}\nintel_artifacts=( $3 )\nintel_artifact=${intel_artifacts[0]}\nhomebrew_tap_repo=$4\n\n#globals\ncommand=algokit\n\n#error codes\nMISSING_WHEEL=1\nMISSING_EXECUTABLE=2\nCASK_GENERATION_FAILED=3\nPR_CREATION_FAILED=4\n\nif [[ ! -f $wheel_file ]]; then\n  >&2 echo \"$wheel_file not found. 🚫\"\n  exit $MISSING_WHEEL\nelse\n  echo \"Found $wheel_file 🎉\"\nfi\n\nif [[ ! -f $arm_artifact ]]; then\n  >&2 echo \"$arm_artifact not found. 🚫\"\n  exit $MISSING_EXECUTABLE\nelse\n  echo \"Found $arm_artifact 🎉\"\nfi\n\nif [[ ! -f $intel_artifact ]]; then\n  >&2 echo \"$intel_artifact not found. 🚫\"\n  exit $MISSING_EXECUTABLE\nelse\n  echo \"Found $intel_artifact 🎉\"\nfi\n\n\nget_metadata() {\n  local field=$1\n  grep \"^$field:\" $metadata | cut -f 2 -d : | xargs\n}\n\ncreate_cask() {\n  repo=\"https://github.com/${GITHUB_REPOSITORY}\"\n  homepage=\"$repo\"\n  \n  echo \"Creating brew cask\"\n\n  # determine package_name, version and release tag from .whl\n  wheel=`basename $wheel_file`\n  package_name=`echo $wheel | cut -d- -f1`\n\n  version=None\n  version_regex=\"-([0-9]+\\.[0-9]+\\.[0-9]+)b?([0-9]*)-\"\n  if [[ $wheel_file =~ $version_regex ]]; then\n    version=${BASH_REMATCH[1]}\n    version_beta=${BASH_REMATCH[2]}\n  fi\n\n  release_tag=\"v${version}\"\n  if [[ -n $version_beta ]]; then\n    release_tag=${release_tag}-beta.${version_beta}\n  fi\n\n  echo Version: $version\n  echo Release Tag: $release_tag\n\n  # get other metadata from wheel\n  unzip -o $wheel_file -d . >/dev/null 2>&1\n  metadata=`echo $wheel | cut -f 1,2 -d \"-\"`.dist-info/METADATA\n\n  desc=`get_metadata Summary`\n  license=`get_metadata License`\n\n  arm_binary_url=\"$repo/releases/download/$release_tag/$(basename $arm_artifact)\"\n  echo \"Calculating sha256 of $arm_binary_url...\"\n  arm_sha256=`curl -s -L $arm_binary_url | sha256sum | cut -f 1 -d ' '`\n\n  intel_binary_url=\"$repo/releases/download/$release_tag/$(basename $intel_artifact)\"\n  echo \"Calculating sha256 of $intel_binary_url...\"\n  intel_sha256=`curl -s -L $intel_binary_url | sha256sum | cut -f 1 -d ' '`\n\n  cask_file=${command}.rb\n  \n  echo \"Outputting $cask_file...\"\n\ncat << EOF > $cask_file\ncask \"$package_name\" do\n  arch arm: \"arm64\", intel: \"x64\"\n\n  version \"$version\"\n  sha256 arm:   \"$arm_sha256\",\n         intel: \"$intel_sha256\"\n\n  url \"$repo/releases/download/v#{version}/algokit-#{version}-macos_#{arch}-brew.tar.gz\"\n  name \"$package_name\"\n  desc \"$desc\"\n  homepage \"$homepage\"\n\n  binary \"#{staged_path}/#{token}\"\nend\nEOF\n\n  if [[ ! -f $cask_file ]]; then\n    >&2 echo \"Failed to generate $cask_file 🚫\"\n    exit $CASK_GENERATION_FAILED\n  else\n    echo \"Created $cask_file 🎉\"\n  fi\n}\n\ncreate_pr() {\n  local full_cask_filepath=`realpath $cask_file`  \n  echo \"Cloning $homebrew_tap_repo...\"\n  clone_dir=`mktemp -d`\n  git clone \"https://oauth2:${TAP_GITHUB_TOKEN}@github.com/${homebrew_tap_repo}.git\" $clone_dir\n\n  echo \"Commiting Casks/$cask_file...\"\n  pushd $clone_dir\n  dest_branch=\"$command-update-$version\"\n  git checkout -b $dest_branch\n  mkdir -p $clone_dir/Casks\n  cp $full_cask_filepath $clone_dir/Casks\n  message=\"Updating $command to $version\"\n  git add .\n  git commit --message \"$message\"\n\n  echo \"Pushing $dest_branch...\"\n  git push -u origin HEAD:$dest_branch\n\n  echo \"Creating a pull request...\"\n  # can't use gh because it doesn't support fine grained access tokens yet https://github.com/github/roadmap/issues/622\ncat << EOF > pr_body.json\n{\n  \"title\": \"${message}\",\n  \"head\": \"${dest_branch}\",\n  \"base\": \"main\"\n}\nEOF\n\n  curl \\\n    --fail \\\n    -X POST \\\n    -H \"Accept: application/vnd.github+json\" \\\n    -H \"Authorization: Bearer $TAP_GITHUB_TOKEN\"\\\n    -H \"X-GitHub-Api-Version: 2022-11-28\" \\\n    https://api.github.com/repos/${homebrew_tap_repo}/pulls \\\n    -d @pr_body.json\n  pr_exit_code=$?\n\n  popd\n\n  echo \"Cleanup.\"\n  rm -rf $clone_dir\n\n  if [[ $pr_exit_code != 0 ]]; then\n    >&2 echo \"PR creation failed 🚫\"\n    exit $PR_CREATION_FAILED\n  else\n    echo \"PR creation successful 🎉\"\n  fi\n}\n\ncreate_cask\ncreate_pr\n\necho Done.\necho\n"
  },
  {
    "path": "scripts/winget/build-installer.ps1",
    "content": "Param(\n  [Parameter(Mandatory = $true)]\n  [String]\n  $binaryDir,\n\n  [Parameter(Mandatory = $false)]\n  [AllowEmptyString()]\n  [String]\n  $releaseVersion,\n\n  [Parameter(Mandatory = $true)]\n  [String]\n  $outputFile\n)\n\nFunction ThrowOnNonZeroExit {\n  Param( [String]$Message )\n  If ($LastExitCode -ne 0) {\n    Throw $Message\n  }\n}\n\n$ErrorActionPreference = 'Stop'\n\nRemove-Item -Path build -Recurse -ErrorAction Ignore\n$buildDir = New-Item -ItemType Directory -Path .\\build\\winget\\installer\n$installerContentDir = '.\\scripts\\winget\\installer'\n\n# Add installer assets\n$assetsDir = New-Item -ItemType Directory -Path (Join-Path $buildDir assets)\nCopy-Item -Path \"$installerContentDir\\assets\\*\" -Destination $assetsDir -Recurse | Out-Null\n\n# Add manifest file\n$version = if ($releaseVersion) { \n  # Strip the pre-release meta, as it's not valid\n  $releaseVersion -replace '-\\w+(\\.\\d+)?|\\+.+$', ''\n}\nelse {\n  '0.0.1'\n} \n(Get-Content (Resolve-Path \"$installerContentDir\\AppxManifest.xml\")).Replace('\"0.0.1.0\"', \"`\"$version.0`\"\") | Set-Content (Join-Path $buildDir AppxManifest.xml)\n\n# Generate pri resource map for installer assets\n$priConfig = (Resolve-Path \"$installerContentDir\\priconfig.xml\")\nPush-Location $buildDir\nmakepri new /ProjectRoot $buildDir /ConfigXml $priConfig | Out-Null\nThrowOnNonZeroExit \"Failed to create pri file\"\nPop-Location\n\n# Add algokit binaries\nCopy-Item -Path (Join-Path $binaryDir *) -Destination $buildDir -Recurse | Out-Null\n\n# Generate msix\nmakeappx pack /o /h SHA256 /d $buildDir /p $outputFile | Out-Null\nThrowOnNonZeroExit \"Failed to build msix\"\n\n"
  },
  {
    "path": "scripts/winget/installer/AppxManifest.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Package xmlns=\"http://schemas.microsoft.com/appx/manifest/foundation/windows10\" xmlns:uap=\"http://schemas.microsoft.com/appx/manifest/uap/windows10\" xmlns:uap10=\"http://schemas.microsoft.com/appx/manifest/uap/windows10/10\" xmlns:uap3=\"http://schemas.microsoft.com/appx/manifest/uap/windows10/3\" xmlns:rescap=\"http://schemas.microsoft.com/appx/manifest/foundation/windows10/restrictedcapabilities\" xmlns:desktop=\"http://schemas.microsoft.com/appx/manifest/desktop/windows10\" IgnorableNamespaces=\"uap uap3 uap10 desktop rescap\">\n  <Identity Name=\"algokit\" Publisher=\"CN=Algorand Foundation, O=Algorand Foundation, L=Singapore, C=SG\" Version=\"0.0.1.0\" ProcessorArchitecture=\"x64\" />\n  <Properties>\n    <DisplayName>AlgoKit</DisplayName>\n    <PublisherDisplayName>Algorand Foundation</PublisherDisplayName>\n    <Description>The Algorand AlgoKit CLI is the one-stop shop tool for developers building on the Algorand network.</Description>\n    <Logo>assets\\Square70x70Logo.png</Logo>\n  </Properties>\n  <Resources>\n    <Resource Language=\"en-us\" />\n  </Resources>\n  <Dependencies>\n    <TargetDeviceFamily Name=\"Windows.Universal\" MinVersion=\"10.0.17763.0\" MaxVersionTested=\"10.0.22000.0\" />\n  </Dependencies>\n  <Applications>\n    <Application Id=\"algokit\" Executable=\"algokit.exe\" EntryPoint=\"Windows.FullTrustApplication\">\n      <uap:VisualElements BackgroundColor=\"transparent\" DisplayName=\"Algorand AlgoKit CLI\" Square150x150Logo=\"assets\\Square150x150Logo.png\" Square44x44Logo=\"assets\\Square44x44Logo.png\" Description=\"The Algorand AlgoKit CLI is the one-stop shop tool for developers building on the Algorand network.\">\n        <uap:DefaultTile ShortName=\"AlgoKit\" Square71x71Logo=\"assets\\Square70x70Logo.png\" />\n      </uap:VisualElements>\n      <Extensions>\n        <uap3:Extension Category=\"windows.appExecutionAlias\">\n          <uap3:AppExecutionAlias>\n            <desktop:ExecutionAlias Alias=\"algokit.exe\" />\n          </uap3:AppExecutionAlias>\n        </uap3:Extension>\n      </Extensions>\n    </Application>\n  </Applications>\n  <Capabilities>\n    <rescap:Capability Name=\"runFullTrust\" />\n  </Capabilities>\n</Package>\n"
  },
  {
    "path": "scripts/winget/installer/priconfig.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<resources targetOsVersion=\"10.0.0\" majorVersion=\"1\">\n\t<packaging>\n\t\t<autoResourcePackage qualifier=\"Language\"/>\n\t\t<autoResourcePackage qualifier=\"Scale\"/>\n\t\t<autoResourcePackage qualifier=\"DXFeatureLevel\"/>\n\t</packaging>\n\t<index root=\"\\\" startIndexAt=\"\\\">\n\t\t<default>\n\t\t\t<qualifier name=\"Language\" value=\"en-US\"/>\n\t\t\t<qualifier name=\"Contrast\" value=\"standard\"/>\n\t\t\t<qualifier name=\"Scale\" value=\"100\"/>\n\t\t\t<qualifier name=\"HomeRegion\" value=\"001\"/>\n\t\t\t<qualifier name=\"TargetSize\" value=\"256\"/>\n\t\t\t<qualifier name=\"LayoutDirection\" value=\"LTR\"/>\n\t\t\t<qualifier name=\"Theme\" value=\"dark\"/>\n\t\t\t<qualifier name=\"AlternateForm\" value=\"\"/>\n\t\t\t<qualifier name=\"DXFeatureLevel\" value=\"DX9\"/>\n\t\t\t<qualifier name=\"Configuration\" value=\"\"/>\n\t\t\t<qualifier name=\"DeviceFamily\" value=\"Universal\"/>\n\t\t\t<qualifier name=\"Custom\" value=\"\"/>\n\t\t</default>\n\t\t<indexer-config type=\"folder\" foldernameAsQualifier=\"true\" filenameAsQualifier=\"true\" qualifierDelimiter=\".\"/>\n\t\t<indexer-config type=\"resw\" convertDotsToSlashes=\"true\" initialPath=\"\"/>\n\t\t<indexer-config type=\"resjson\" initialPath=\"\"/>\n\t\t<indexer-config type=\"PRI\"/>\n\t</index>\n</resources>\n"
  },
  {
    "path": "scripts/winget/update-package.ps1",
    "content": "Param(\n  [Parameter(Mandatory = $true)]\n  [String]\n  $releaseVersion\n)\n\nFunction ThrowOnNonZeroExit {\n  Param( [String]$Message )\n  If ($LastExitCode -ne 0) {\n    Throw $Message\n  }\n}\n\n$wingetPackage = 'AlgorandFoundation.AlgoKit'\n$release = Invoke-RestMethod -uri \"https://api.github.com/repos/algorandfoundation/algokit-cli/releases/tags/v$releaseVersion\"\n$installerUrl = $release | Select -ExpandProperty assets -First 1 | Where-Object -Property name -match '-windows_x64-winget\\.msix$' | Select -ExpandProperty browser_download_url\n\n$wingetDir = New-Item -Force -ItemType Directory -Path .\\build\\winget\n$wingetExecutable = \"$wingetDir\\wingetcreate.exe\"\nInvoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile $wingetExecutable\n& $wingetExecutable update $wingetPackage -s -v $releaseVersion -u \"$installerUrl\" -t \"$env:WINGET_GITHUB_TOKEN\"\nThrowOnNonZeroExit \"Failed to update winget package\"\n"
  },
  {
    "path": "src/algokit/__init__.py",
    "content": "import platform\nimport sys\n\n# this isn't beautiful, but to avoid confusing user errors we need this check before we start importing our own modules\nif sys.version_info < (3, 10, 0):  # noqa: UP036\n    print(  # noqa: T201\n        f\"Unsupported CPython version: {platform.python_version()} detected.\\n\"\n        \"The minimum version of Python supported is CPython 3.10.\\n\"\n        \"If you need help installing then this is a good starting point: \\n\"\n        \"https://www.python.org/about/gettingstarted/\",\n        file=sys.stderr,\n    )\n    sys.exit(-1)\n\ntry:\n    from algokit.core.log_handlers import initialise_logging, uncaught_exception_logging_handler\nexcept ImportError as ex:\n    # the above should succeed both in importing \"algokit\" itself, and we also know that \"click\" will\n    # be imported too, if those basic packages aren't present, something is very wrong\n    print(  # noqa: T201\n        f\"{ex}\\nUnable to import require package(s), your install may be broken :(\",\n        file=sys.stderr,\n    )\n    sys.exit(-1)\n\n\ninitialise_logging()\nsys.excepthook = uncaught_exception_logging_handler\n\n\nif __name__ == \"__main__\":\n    from algokit.cli import algokit\n\n    algokit()\n"
  },
  {
    "path": "src/algokit/__main__.py",
    "content": "from multiprocessing import freeze_support\n\nfrom algokit.cli import algokit\n\nfreeze_support()\nalgokit()\n"
  },
  {
    "path": "src/algokit/cli/__init__.py",
    "content": "import click\n\nfrom algokit.cli.compile import compile_group\nfrom algokit.cli.completions import completions_group\nfrom algokit.cli.config import config_group\nfrom algokit.cli.dispenser import dispenser_group\nfrom algokit.cli.doctor import doctor_command\nfrom algokit.cli.explore import explore_command\nfrom algokit.cli.generate import generate_group\nfrom algokit.cli.goal import goal_command\nfrom algokit.cli.init import init_group\nfrom algokit.cli.localnet import localnet_group\nfrom algokit.cli.project import project_group\nfrom algokit.cli.project.bootstrap import bootstrap_group\nfrom algokit.cli.project.deploy import deploy_command\nfrom algokit.cli.task import task_group\nfrom algokit.core.conf import PACKAGE_NAME\nfrom algokit.core.config_commands.version_prompt import do_version_prompt, skip_version_check_option\nfrom algokit.core.log_handlers import color_option, verbose_option\n\nHIDDEN_COMMANDS: dict[str, click.Command] = {\"deploy\": deploy_command, \"bootstrap\": bootstrap_group}\n\n\nclass CustomGroup(click.Group):\n    def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n        rv = click.Group.get_command(self, ctx, cmd_name)\n        if rv is not None:\n            return rv\n\n        # ensures hidden commands are still invocable yet not visible in help\n        if cmd_name in HIDDEN_COMMANDS:\n            return HIDDEN_COMMANDS[cmd_name]\n\n        return None\n\n\n@click.group(\n    context_settings={\n        \"help_option_names\": [\"-h\", \"--help\"],\n        \"max_content_width\": 120,\n    },\n    cls=CustomGroup,\n)\n@click.version_option(package_name=PACKAGE_NAME)\n@verbose_option\n@color_option\n@skip_version_check_option\ndef algokit(*, skip_version_check: bool) -> None:\n    \"\"\"\n    AlgoKit is your one-stop shop to develop applications on the Algorand blockchain.\n\n    If you are getting started, please see the quick start tutorial: https://dev.algorand.co/getting-started/algokit-quick-start/.\n    \"\"\"\n    if not skip_version_check:\n        do_version_prompt()\n\n\nalgokit.add_command(completions_group)\nalgokit.add_command(config_group)\nalgokit.add_command(doctor_command)\nalgokit.add_command(explore_command)\nalgokit.add_command(goal_command)\nalgokit.add_command(init_group)\nalgokit.add_command(localnet_group)\nalgokit.add_command(generate_group)\nalgokit.add_command(dispenser_group)\nalgokit.add_command(task_group)\nalgokit.add_command(compile_group)\nalgokit.add_command(project_group)\n"
  },
  {
    "path": "src/algokit/cli/codespace.py",
    "content": "import logging\nimport subprocess\nfrom time import time\nfrom typing import Any\n\nimport click\n\nfrom algokit.core import questionary_extensions\nfrom algokit.core.codespace import (\n    CODESPACE_FORWARD_TIMEOUT_MAX,\n    CODESPACE_FORWARD_TIMEOUT_MIN,\n    CODESPACE_NAME_PREFIX,\n    authenticate_with_github,\n    create_codespace,\n    delete_codespace,\n    delete_codespaces_with_prefix,\n    ensure_github_cli_installed,\n    forward_ports_for_codespace,\n    is_codespace_ready,\n    list_github_codespaces,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _validate_run_timeout(_ctx: click.Context, _param: click.Parameter, value: int) -> int:\n    if value < CODESPACE_FORWARD_TIMEOUT_MIN or value > CODESPACE_FORWARD_TIMEOUT_MAX:\n        raise click.BadParameter(\n            f\"Timeout must be between {CODESPACE_FORWARD_TIMEOUT_MIN} and {CODESPACE_FORWARD_TIMEOUT_MAX} minutes.\"\n        )\n    return value\n\n\n@click.command(\"codespace\")\n@click.option(\n    \"-m\",\n    \"--machine\",\n    type=click.Choice(\n        [\"basicLinux32gb\", \"standardLinux32gb\", \"premiumLinux\", \"largePremiumLinux\"], case_sensitive=True\n    ),\n    default=\"basicLinux32gb\",\n    required=False,\n    help=\"The GitHub Codespace machine type to use. Defaults to base tier.\",\n)\n@click.option(\n    \"-a\", \"--algod-port\", default=4001, required=False, help=\"The port for the Algorand daemon. Defaults to 4001.\"\n)\n@click.option(\n    \"-i\", \"--indexer-port\", default=8980, required=False, help=\"The port for the Algorand indexer. Defaults to 8980.\"\n)\n@click.option(\"-k\", \"--kmd-port\", default=4002, required=False, help=\"The port for the Algorand kmd. Defaults to 4002.\")\n@click.option(\n    \"-n\",\n    \"--codespace-name\",\n    default=\"\",\n    required=False,\n    help=f\"The name of the codespace. Defaults to '{CODESPACE_NAME_PREFIX}_timestamp'.\",\n)\n@click.option(\n    \"-r\",\n    \"--repo-url\",\n    required=False,\n    default=\"algorandfoundation/algokit-base-template\",\n    help=\"The URL of the repository. Defaults to algokit base template repo.\",\n)\n@click.option(\n    \"-t\",\n    \"--timeout\",\n    \"timeout_minutes\",\n    default=240,\n    required=False,\n    callback=_validate_run_timeout,\n    help=\"Default max runtime timeout in minutes. Upon hitting the timeout a codespace will be shutdown to \"\n    \"prevent accidental spending over GitHub Codespaces quota. Defaults to 4 hours.\",\n)\n@click.option(\n    \"--force\",\n    \"-f\",\n    is_flag=True,\n    required=False,\n    default=False,\n    type=click.BOOL,\n    help=(\n        \"Force delete previously used codespaces with `{CODESPACE_NAME_PREFIX}*` name prefix and skip prompts. \"\n        \"Defaults to explicitly prompting for confirmation.\"\n    ),\n)\ndef codespace_command(  # noqa: PLR0913\n    *,\n    machine: str,\n    algod_port: int,\n    indexer_port: int,\n    kmd_port: int,\n    codespace_name: str,\n    repo_url: str,\n    timeout_minutes: int,\n    force: bool,\n) -> None:\n    \"\"\"Manage the AlgoKit LocalNet in GitHub Codespaces.\"\"\"\n    ensure_github_cli_installed()\n\n    if not authenticate_with_github():\n        return\n\n    codespaces = list_github_codespaces()\n\n    # Delete existing codespaces with the default name\n    if codespaces and (\n        force\n        or questionary_extensions.prompt_confirm(\n            f\"Delete previously used codespaces with `{CODESPACE_NAME_PREFIX}*` name prefix?\", default=True\n        )\n    ):\n        delete_codespaces_with_prefix(codespaces, CODESPACE_NAME_PREFIX)\n\n    # Create a new codespace\n    codespace_name = codespace_name or f\"{CODESPACE_NAME_PREFIX}_{int(time())}\"\n    # Add a 5 minute timeout buffer, so the codespace doesn't terminate before the port forwarding\n    codespace_timeout = (\n        (timeout_minutes + 5)\n        if (timeout_minutes + 5) < CODESPACE_FORWARD_TIMEOUT_MAX\n        else CODESPACE_FORWARD_TIMEOUT_MAX\n    )\n    create_codespace(\n        repo_url,\n        codespace_name,\n        machine,\n        codespace_timeout,\n    )\n\n    codespace_data: dict[str, Any] | None = None\n\n    try:\n        logger.info(f\"Waiting for codespace {codespace_name} to be ready...\")\n        codespace_data = is_codespace_ready(codespace_name)\n        if not codespace_data:\n            raise RuntimeError(\"Error creating codespace. Please check your internet connection and try again.\")\n\n        logger.info(f\"Codespace {codespace_name} is now ready.\")\n        logger.warning(\n            \"Keep the terminal open during the LocalNet session. \"\n            \"Terminating the session will delete the codespace instance.\"\n        )\n\n        forward_ports_for_codespace(\n            codespace_data[\"name\"], algod_port, kmd_port, indexer_port, timeout=timeout_minutes * 60\n        )\n        logger.info(\"LocalNet started in GitHub Codespace\")\n\n    except subprocess.TimeoutExpired:\n        logger.warning(\"Timeout reached. Shutting down the codespace...\")\n    except KeyboardInterrupt:\n        logger.warning(\"Keyboard interrupt received. Shutting down the codespace...\")\n    except Exception as e:\n        logger.error(e)\n    finally:\n        logger.info(\"Exiting...\")\n        if codespace_data:\n            delete_codespace(codespace_data=codespace_data, force=force)\n"
  },
  {
    "path": "src/algokit/cli/common/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/common/constants.py",
    "content": "# Common constants, variables and enums used by the CLI.\n\nfrom enum import Enum\n\n\n# >= Py 3.11, there is a built-in StrEnum, however,\n# we still support older versions of Python.\n# Hence, a custom implementation for now.\nclass StringEnum(str, Enum):\n    def __str__(self) -> str:\n        return str(self.value)\n\n    @classmethod\n    def to_list(cls) -> list[str]:\n        return [member.value for member in cls]\n\n\nclass ExplorerEntityType(StringEnum):\n    \"\"\"\n    Used to indicate type of entity when used with `get_explorer_url` function.\n    \"\"\"\n\n    TRANSACTION = \"transaction\"\n    ASSET = \"asset\"\n    ADDRESS = \"account\"\n\n\nclass AlgorandNetwork(StringEnum):\n    \"\"\"\n    Used to indicate the Algorand network.\n    \"\"\"\n\n    LOCALNET = \"localnet\"\n    TESTNET = \"testnet\"\n    MAINNET = \"mainnet\"\n"
  },
  {
    "path": "src/algokit/cli/common/utils.py",
    "content": "# Cli/Click related helper functions and classes\n\n\nimport typing as t\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.cli.common.constants import ExplorerEntityType\nfrom algokit.core.utils import is_windows\n\n\nclass MutuallyExclusiveOption(click.Option):\n    \"\"\"\n    A Click option that defines mutually exclusive command line options.\n\n    Args:\n        *args: Positional arguments passed to the parent class constructor.\n        **kwargs: Keyword arguments passed to the parent class constructor.\n            not_required_if (list): A list of options that the current option is mutually exclusive with.\n\n    Attributes:\n        not_required_if (list): A list of options that the current option is mutually exclusive with.\n\n    Raises:\n        AssertionError: If the `not_required_if` parameter is not provided.\n\n    Example:\n        ```python\n        @click.command()\n        @click.option('--option1', help='Option 1')\n        @click.option('--option2', help='Option 2')\n        @click.option('--option3', help='Option 3', cls=MutuallyExclusiveOption, not_required_if=['option1', 'option2'])\n        def my_command(option1, option2, option3):\n            # Command logic here\n            pass\n        ```\n\n        In the example above, the `MutuallyExclusiveOption` class is used to define the `option3` command line option.\n        This option is mutually exclusive with `option1` and `option2`,\n        meaning that if either `option1` or `option2` is provided, `option3` cannot be used.\n        If `option3` is provided along with `option1` or `option2`, a `click.UsageError` is raised.\n    \"\"\"\n\n    def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:\n        self.not_required_if: list = kwargs.pop(\"not_required_if\")\n\n        assert self.not_required_if, \"'not_required_if' parameter required\"\n        kwargs[\"help\"] = (\n            kwargs.get(\"help\", \"\") + \" Option is mutually exclusive with \" + \", \".join(self.not_required_if) + \".\"\n        ).strip()\n        super().__init__(*args, **kwargs)\n\n    def handle_parse_result(\n        self, ctx: click.Context, opts: t.Mapping[str, t.Any], args: list[str]\n    ) -> tuple[t.Any, list[str]]:\n        \"\"\"\n        Overrides the `handle_parse_result` method of the `click.Option` class.\n\n        This method checks if the current option is present in the provided options (`opts`)\n        and if any of the mutually exclusive options are also present.\n        If both the current option and a mutually exclusive option are present, it raises a `click.UsageError`.\n        Otherwise, it returns the result of the parent `handle_parse_result` method.\n\n        Args:\n            ctx (click.Context): The Click context object.\n            opts (dict): The dictionary of parsed options.\n            args (list): The list of remaining arguments.\n\n        Returns:\n            The result of the parent `handle_parse_result` method.\n\n        Raises:\n            click.UsageError: If the current option and a mutually exclusive option are both present.\n        \"\"\"\n\n        current_opt: bool = self.name in opts\n        for mutex_opt in self.not_required_if:\n            if mutex_opt in opts:\n                if current_opt:\n                    raise click.UsageError(\n                        \"Illegal usage: '\" + str(self.name) + \"' is mutually exclusive with \" + str(mutex_opt) + \".\"\n                    )\n                self.prompt = None\n        return super().handle_parse_result(ctx, opts, args)\n\n\ndef get_explorer_url(identifier: str | int, network: str, entity_type: ExplorerEntityType) -> str:\n    \"\"\"\n    Returns a URL for exploring a specified type (transaction, asset, address) on the specified network.\n\n    Args:\n        identifier (str | int): The ID of the transaction, asset, or address to explore.\n        network (str): The name of the network (e.g., \"localnet\", \"testnet\", \"mainnet\").\n        entity_type (ExplorerEntityType): The type to explore (e.g., ExplorerEntityType.TRANSACTION,\n        ExplorerEntityType.ASSET, ExplorerEntityType.ADDRESS).\n\n    Returns:\n        str: The URL for exploring the specified type on the specified network.\n\n    Raises:\n        ValueError: If the network or explorer type is invalid.\n    \"\"\"\n\n    return f\"https://explore.algokit.io/{network}/{entity_type.value}/{identifier}\"\n\n\ndef sanitize_extra_args(extra_args: t.Sequence[str]) -> tuple[str, ...]:\n    \"\"\"\n    Sanitizes and formats extra arguments for command execution across different OSes.\n\n    Args:\n        extra_args (t.Sequence[str]): A sequence of extra arguments to sanitize.\n\n    Returns:\n        tuple[str, ...]: A sanitized list of extra arguments.\n\n    Examples:\n        >>> sanitize_extra_args([\"arg1\", \"arg with spaces\", \"--flag=value\"])\n        ['arg1', 'arg with spaces', '--flag=value']\n        >>> sanitize_extra_args((\"--extra bla bla bla\",))\n        ['--extra', 'bla', 'bla', 'bla']\n        >>> sanitize_extra_args([\"--complex='quoted value'\", \"--multi word\"])\n        [\"--complex='quoted value'\", '--multi', 'word']\n        >>> sanitize_extra_args([r\"C:\\\\Program Files\\\\My App\", \"%PATH%\"])\n        ['C:\\\\Program Files\\\\My App', '%PATH%']\n    \"\"\"\n\n    lex = __import__(\"mslex\" if is_windows() else \"shlex\")\n\n    def sanitize_arg(arg: str) -> str:\n        # Normalize path separators\n        arg = str(Path(arg))\n\n        # Handle environment variables\n        if arg.startswith(\"%\") and arg.endswith(\"%\"):\n            return arg  # Keep Windows-style env vars as-is\n        elif arg.startswith(\"$\"):\n            return arg  # Keep Unix-style env vars as-is\n\n        # Escape special characters and handle Unicode\n        return lex.quote(arg)  # type: ignore[no-any-return]\n\n    sanitized_args: tuple[str, ...] = ()\n    for arg in extra_args:\n        # Split the argument if it contains multiple space-separated values\n        split_args = lex.split(arg)\n        for split_arg in split_args:\n            sanitized_arg = sanitize_arg(split_arg)\n            sanitized_args += (sanitized_arg,)\n\n    return sanitized_args\n"
  },
  {
    "path": "src/algokit/cli/compile.py",
    "content": "import logging\n\nimport click\n\nfrom algokit.cli.compilers.python import py, python\nfrom algokit.cli.compilers.typescript import ts, typescript\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(\n    \"compile\",\n    short_help=(\n        \"Compile smart contracts and smart signatures written in a supported high-level language \"\n        \"to a format deployable on the Algorand Virtual Machine (AVM).\"\n    ),\n)\n@click.option(\n    \"-v\",\n    \"--version\",\n    \"version\",\n    required=False,\n    default=None,\n    help=(\n        \"The compiler version to pin to, for example, 1.0.0. \"\n        \"If no version is specified, AlgoKit checks if the compiler is installed and runs the installed version. \"\n        \"If the compiler is not installed, AlgoKit runs the latest version. \"\n        \"If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. \"\n        \"Otherwise, AlgoKit runs the specified version.\"\n    ),\n)\n@click.pass_context\ndef compile_group(context: click.Context, version: str | None) -> None:\n    \"\"\"\n    Compile smart contracts and smart signatures written in a supported high-level language\n    to a format deployable on the Algorand Virtual Machine (AVM).\n    \"\"\"\n    context.ensure_object(dict)\n    context.obj[\"version\"] = version\n\n\ncompile_group.add_command(python, \"python\")\ncompile_group.add_command(py, \"py\")\ncompile_group.add_command(typescript, \"typescript\")\ncompile_group.add_command(ts, \"ts\")\n"
  },
  {
    "path": "src/algokit/cli/compilers/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/compilers/python.py",
    "content": "import logging\nimport os\nfrom collections.abc import Callable\nfrom typing import Any\n\nimport click\n\nfrom algokit.core.compilers.python import find_valid_puyapy_command\nfrom algokit.core.proc import run\n\nlogger = logging.getLogger(__name__)\n_AnyCallable = Callable[..., Any]\n\n\ndef invoke_puyapy(context: click.Context, puyapy_args: list[str]) -> None:\n    version = str(context.obj[\"version\"]) if context.obj[\"version\"] else None\n\n    puyapy_command = find_valid_puyapy_command(version)\n\n    run_result = run(\n        [\n            *puyapy_command,\n            *puyapy_args,\n        ],\n        env=(dict(os.environ) | {\"NO_COLOR\": \"1\"}) if context.color is False else None,\n    )\n    click.echo(run_result.output)\n\n    if run_result.exit_code != 0:\n        click.secho(\n            \"An error occurred during compile. Please ensure that any supplied arguments are valid \"\n            \"and any files passed are valid Algorand Python code before retrying.\",\n            err=True,\n            fg=\"red\",\n        )\n        raise click.exceptions.Exit(run_result.exit_code)\n\n\ndef common_puyapy_command_options(function: _AnyCallable) -> click.Command:\n    function = click.argument(\"puyapy_args\", nargs=-1, type=click.UNPROCESSED)(function)\n    function = click.pass_context(function)\n    return click.command(\n        context_settings={\n            \"ignore_unknown_options\": True,\n        },\n        add_help_option=False,\n        help=\"Compile Algorand Python contract(s) using the PuyaPy compiler.\",\n    )(function)\n\n\n@common_puyapy_command_options\ndef python(context: click.Context, puyapy_args: list[str]) -> None:\n    invoke_puyapy(context, puyapy_args)\n\n\n@common_puyapy_command_options\ndef py(context: click.Context, puyapy_args: list[str]) -> None:\n    invoke_puyapy(context, puyapy_args)\n"
  },
  {
    "path": "src/algokit/cli/compilers/typescript.py",
    "content": "import logging\nimport os\nfrom collections.abc import Callable\nfrom typing import Any\n\nimport click\n\nfrom algokit.core.compilers.typescript import find_valid_puyats_command\nfrom algokit.core.proc import run\nfrom algokit.core.utils import extract_semantic_version\n\nlogger = logging.getLogger(__name__)\n_AnyCallable = Callable[..., Any]\n\n\ndef invoke_puyats(context: click.Context, puyats_args: list[str]) -> None:\n    version = extract_semantic_version(str(context.obj[\"version\"])) if context.obj[\"version\"] else None\n\n    puyats_command = find_valid_puyats_command(version)\n\n    run_result = run(\n        [\n            *puyats_command,\n            *puyats_args,\n        ],\n        env=(dict(os.environ) | {\"NO_COLOR\": \"1\"}) if context.color is False else None,\n    )\n    click.echo(run_result.output)\n\n    if run_result.exit_code != 0:\n        click.secho(\n            \"An error occurred during compile. Please ensure that any supplied arguments are valid \"\n            \"and any files passed are valid Algorand TypeScript code before retrying.\",\n            err=True,\n            fg=\"red\",\n        )\n        raise click.exceptions.Exit(run_result.exit_code)\n\n\ndef common_puyats_command_options(function: _AnyCallable) -> click.Command:\n    function = click.argument(\"puyats_args\", nargs=-1, type=click.UNPROCESSED)(function)\n    function = click.pass_context(function)\n    return click.command(\n        context_settings={\n            \"ignore_unknown_options\": True,\n        },\n        add_help_option=False,\n        help=\"Compile Algorand TypeScript contract(s) using the PuyaTs compiler.\",\n    )(function)\n\n\n@common_puyats_command_options\ndef typescript(context: click.Context, puyats_args: list[str]) -> None:\n    invoke_puyats(context, puyats_args)\n\n\n@common_puyats_command_options\ndef ts(context: click.Context, puyats_args: list[str]) -> None:\n    invoke_puyats(context, puyats_args)\n"
  },
  {
    "path": "src/algokit/cli/completions.py",
    "content": "import logging\nfrom pathlib import Path\n\nimport click\nimport click.shell_completion\nimport shellingham  # type: ignore[import-untyped]\n\nfrom algokit.core.atomic_write import atomic_write\nfrom algokit.core.conf import get_app_config_dir\n\nlogger = logging.getLogger(__name__)\n\nSUPPORTED_SHELLS = [\"bash\", \"zsh\"]\n\n\n@click.group(\"completions\", short_help=\"Install and Uninstall AlgoKit shell integrations.\")\ndef completions_group() -> None:\n    pass\n\n\nshell_option = click.option(\n    \"--shell\", type=click.Choice(SUPPORTED_SHELLS), help=\"Specify shell to install algokit completions for.\"\n)\n\n\n@completions_group.command(\n    \"install\",\n    short_help=\"Install shell completions\",\n)\n@shell_option\ndef install(shell: str | None) -> None:\n    \"\"\"Install shell completions, this command will attempt to update the interactive profile script\n    for the current shell to support algokit completions. To specify a specific shell use --shell.\"\"\"\n    shell_completion = ShellCompletion(shell)\n    shell_completion.install()\n\n\n@completions_group.command(\n    \"uninstall\",\n    short_help=\"Uninstall shell completions\",\n)\n@shell_option\ndef uninstall(shell: str | None) -> None:\n    \"\"\"Uninstall shell completions, this command will attempt to update the interactive profile script\n    for the current shell to remove any algokit completions that have been added.\n    To specify a specific shell use --shell.\"\"\"\n    shell_completion = ShellCompletion(shell)\n    shell_completion.uninstall()\n\n\nclass ShellCompletion:\n    def __init__(self, shell: str | None) -> None:\n        shell = shell or _get_current_shell()\n        self.shell = shell\n        self.source_path = get_app_config_dir() / f\".algokit-completions.{shell}\"\n        self.profile_path = Path(f\"~/.{shell}rc\").expanduser()\n        home_based_source_path = _get_home_based_path(self.source_path)\n        self.profile_line = f\". {home_based_source_path}\\n\"\n\n    def install(self) -> None:\n        try:\n            self._save_source()\n        except click.exceptions.Exit:\n            raise\n        if self._insert_profile_line():\n            logger.info(f\"AlgoKit completions installed for {self.shell} 🎉\")\n        else:\n            logger.info(f\"{self.profile_path} already contains completion source 🤔\")\n        home_based_profile_path = _get_home_based_path(self.profile_path)\n        logger.info(f\"Restart shell or run `. {home_based_profile_path}` to enable completions\")\n\n    def uninstall(self) -> None:\n        self._remove_source()\n        if self._remove_profile_line():\n            logger.info(f\"AlgoKit completions uninstalled for {self.shell} 🎉\")\n        else:\n            logger.info(f\"AlgoKit completions not installed for {self.shell} 🤔\")\n\n    @property\n    def source(self) -> str:\n        completion_class = click.shell_completion.get_completion_class(self.shell)\n        if completion_class is None:\n            raise click.ClickException(f\"Unsupported shell for completions: {self.shell}\")\n        completion = completion_class(\n            # class is only instantiated to get source snippet, so don't need to pass a real command\n            cli=None,  # type: ignore[arg-type]\n            ctx_args={},\n            prog_name=\"algokit\",\n            complete_var=\"_ALGOKIT_COMPLETE\",\n        )\n        try:\n            return completion.source()\n        except RuntimeError as ex:\n            logger.debug(f\"Failed to generate completion source. {ex}\")\n            if self.shell == \"bash\":\n                logger.error(\"Shell completion is not supported for Bash versions older than 4.4.\")\n            else:\n                logger.error(\"Failed to install completions 😢.\")\n            raise click.exceptions.Exit(code=1) from ex\n\n    def _save_source(self) -> None:\n        # grab source before attempting to write file in case it fails\n        source = self.source\n        logger.debug(f\"Writing source script {self.source_path}\")\n        self.source_path.write_text(source, encoding=\"utf-8\")\n\n    def _remove_source(self) -> None:\n        logger.debug(f\"Removing source script {self.source_path}\")\n        self.source_path.unlink(missing_ok=True)\n\n    def _insert_profile_line(self) -> bool:\n        try:\n            content = self.profile_path.read_text(encoding=\"utf-8\")\n        except FileNotFoundError:\n            pass\n        else:\n            if self.profile_line in content:\n                # profile already contains source of completion script. nothing to do\n                return False\n\n        logger.debug(f\"Appending completion source to {self.profile_path}\")\n        # got to end of file, so append profile line\n        atomic_write(self.profile_line, self.profile_path, \"a\")\n        return True\n\n    def _remove_profile_line(self) -> bool:\n        try:\n            content = self.profile_path.read_text(encoding=\"utf-8\")\n        except FileNotFoundError:\n            logger.debug(f\"{self.profile_path} not found\")\n            return False\n        # see if profile script contains profile_line, if it does remove it\n        if self.profile_line not in content:\n            return False\n        logger.debug(f\"Completion source found in {self.profile_path}\")\n        content = content.replace(self.profile_line, \"\")\n\n        logger.debug(f\"Removing completion source found in {self.profile_path}\")\n        atomic_write(content, self.profile_path, \"w\")\n        return True\n\n\ndef _get_home_based_path(path: Path) -> Path:\n    home = Path.home()\n    try:\n        home_based_path = path.relative_to(home)\n    except ValueError:\n        return path\n    else:\n        return \"~\" / home_based_path\n\n\ndef _get_current_shell() -> str:\n    try:\n        shell_name, *_ = shellingham.detect_shell()\n    except Exception as ex:\n        logger.debug(\"Could not determine current shell\", exc_info=ex)\n        logger.warning(\"Could not determine current shell. Try specifying a supported shell with --shell\")\n        raise click.exceptions.Exit(code=1) from ex\n\n    if shell_name not in SUPPORTED_SHELLS:\n        logger.warning(f\"{shell_name} is not a supported shell. 😢\")\n        raise click.exceptions.Exit(code=1)\n    return str(shell_name)\n"
  },
  {
    "path": "src/algokit/cli/config.py",
    "content": "import click\n\nfrom algokit.core.config_commands.container_engine import container_engine_configuration_command\nfrom algokit.core.config_commands.js_package_manager import js_package_manager_configuration_command\nfrom algokit.core.config_commands.py_package_manager import py_package_manager_configuration_command\nfrom algokit.core.config_commands.version_prompt import version_prompt_configuration_command\n\n\n@click.group(\"config\", short_help=\"Configure AlgoKit settings.\")\ndef config_group() -> None:\n    \"\"\"Configure settings used by AlgoKit\"\"\"\n\n\nconfig_group.add_command(version_prompt_configuration_command)\nconfig_group.add_command(container_engine_configuration_command)\nconfig_group.add_command(js_package_manager_configuration_command)\nconfig_group.add_command(py_package_manager_configuration_command)\n"
  },
  {
    "path": "src/algokit/cli/dispenser.py",
    "content": "import enum\nimport logging\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.cli.common.constants import ExplorerEntityType\nfrom algokit.cli.common.utils import get_explorer_url\nfrom algokit.cli.tasks.utils import get_address\nfrom algokit.core.dispenser import (\n    DISPENSER_ACCESS_TOKEN_KEY,\n    DispenserApiAudiences,\n    clear_dispenser_credentials,\n    get_oauth_tokens,\n    is_authenticated,\n    process_dispenser_request,\n    revoke_refresh_token,\n    set_dispenser_credentials,\n)\nfrom algokit.core.utils import is_network_available\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass DispenserAsset:\n    asset_id: int\n    decimals: int\n    description: str\n\n\nclass OutputMode(enum.Enum):\n    STDOUT = \"stdout\"\n    FILE = \"file\"\n\n\nclass DispenserAssetName(enum.IntEnum):\n    ALGO = 0\n\n\nDISPENSER_ASSETS = {\n    DispenserAssetName.ALGO: DispenserAsset(\n        asset_id=0,\n        decimals=6,\n        description=\"Algo\",\n    ),\n}\n\nDEFAULT_CI_TOKEN_FILENAME = \"algokit_ci_token.txt\"\n\nNOT_AUTHENTICATED_MESSAGE = \"Please login first by running `algokit dispenser login` command\"\n\n\ndef _handle_ci_token(output_mode: str, output_filename: str, token_data: dict) -> None:\n    if output_mode == OutputMode.STDOUT.value:\n        click.echo(f\"\\n{DISPENSER_ACCESS_TOKEN_KEY} (valid for 30 days):\\n\\n{token_data['access_token']}\\n\")\n        logger.warning(\n            \"Your CI access token has been printed to stdout.\\n\"\n            \"Please ensure you keep this token safe!\\n\"\n            \"If needed, clear your terminal history after copying the token!\"\n        )\n    else:\n        with Path.open(Path(output_filename), mode=\"w\", encoding=\"utf-8\") as token_file:\n            token_file.write(token_data[\"access_token\"])\n        logger.warning(\n            f\"Your CI access token has been saved to `{output_filename}`.\\n\"\n            \"Please ensure you keep this file safe or remove after copying the token!\"\n        )\n\n\nclass DispenserGroup(click.Group):\n    def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n        return_value = super().get_command(ctx, cmd_name)\n\n        if return_value is None:\n            return None\n        elif is_network_available():\n            return return_value\n        else:\n            logger.error(\"Please connect to internet first\")\n            raise click.exceptions.Exit(code=1)\n\n\n@click.group(\"dispenser\", cls=DispenserGroup)\ndef dispenser_group() -> None:\n    \"\"\"Interact with the AlgoKit TestNet Dispenser.\"\"\"\n\n\n@dispenser_group.command(\"logout\", help=\"Logout of your Dispenser API account.\")\ndef logout_command() -> None:\n    if is_authenticated():\n        try:\n            revoke_refresh_token()\n            clear_dispenser_credentials()\n        except Exception as e:\n            logger.debug(f\"Error logging out {e}\")\n            raise click.ClickException(\"Error logging out\") from e\n        logger.info(\"Logout successful\")\n    else:\n        logger.warning(\"Already logged out\")\n\n\n@dispenser_group.command(\"login\", help=\"Login to your Dispenser API account.\")\n@click.option(\n    \"--ci\", help=\"Generate an access token for CI. Issued for 30 days.\", is_flag=True, default=False, required=False\n)\n@click.option(\n    \"--output\",\n    \"-o\",\n    \"output_mode\",\n    required=False,\n    type=click.Choice([OutputMode.STDOUT.value, OutputMode.FILE.value], case_sensitive=False),\n    default=OutputMode.STDOUT.value,\n    help=\"Choose the output method for the access token. Defaults to `stdout`. Only applicable when --ci flag is set.\",\n)\n@click.option(\n    \"--file\",\n    \"-f\",\n    \"output_filename\",\n    required=False,\n    type=str,\n    help=(\n        \"Output filename where you want to store the generated access token.\"\n        f\"Defaults to `{DEFAULT_CI_TOKEN_FILENAME}`. Only applicable when --ci flag is set and --output mode is `file`.\"\n    ),\n    default=DEFAULT_CI_TOKEN_FILENAME,\n)\ndef login_command(*, ci: bool, output_mode: str, output_filename: str) -> None:\n    if not ci and is_authenticated():\n        logger.info(\"You are already logged in\")\n        return\n\n    try:\n        audience = DispenserApiAudiences.CI if ci else DispenserApiAudiences.USER\n        custom_scopes = None if ci else \"offline_access\"\n        token_data = get_oauth_tokens(api_audience=audience, custom_scopes=custom_scopes)\n\n        if not token_data:\n            raise click.ClickException(\"Error obtaining auth token\")\n\n        if ci:\n            _handle_ci_token(output_mode, output_filename, token_data)\n        else:\n            set_dispenser_credentials(token_data)\n            logger.info(\"Login successful\")\n\n    except Exception as e:\n        raise click.ClickException(str(e)) from e\n\n\n@dispenser_group.command(\"fund\", help=\"Fund your wallet address with TestNet ALGOs.\")\n@click.option(\n    \"--receiver\",\n    \"-r\",\n    required=True,\n    help=\"Address or alias of the receiver to fund with TestNet ALGOs.\",\n    type=click.STRING,\n)\n@click.option(\n    \"--amount\", \"-a\", required=True, help=\"Amount to fund. Defaults to microAlgos.\", default=1000000, type=click.INT\n)\n@click.option(\n    \"--whole-units\",\n    \"whole_units\",\n    is_flag=True,\n    help=\"Use whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\",\n    default=False,\n    type=click.BOOL,\n)\ndef fund_command(*, receiver: str, amount: int, whole_units: bool) -> None:\n    if not is_authenticated():\n        logger.error(NOT_AUTHENTICATED_MESSAGE)\n        return\n\n    receiver_address = get_address(receiver)\n\n    default_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n    if whole_units:\n        amount = amount * (10**default_asset.decimals)\n        logger.debug(f\"Converted algos to microAlgos: {amount}\")\n\n    try:\n        response = process_dispenser_request(\n            url_suffix=f\"fund/{DISPENSER_ASSETS[DispenserAssetName.ALGO].asset_id}\",\n            data={\"receiver\": receiver_address, \"amount\": amount, \"assetID\": default_asset.asset_id},\n            method=\"POST\",\n        )\n    except Exception as e:\n        logger.error(f\"Error: {e}\")\n    else:\n        response_body = response.json()\n        processed_amount = (\n            response_body[\"amount\"] / (10**default_asset.decimals) if whole_units else response_body[\"amount\"]\n        )\n        asset_description = default_asset.description if whole_units else f\"μ{default_asset.description}\"\n        txn_url = get_explorer_url(\n            identifier=response_body[\"txID\"], network=\"testnet\", entity_type=ExplorerEntityType.TRANSACTION\n        )\n        logger.info(f\"Successfully funded {processed_amount} {asset_description}. Browse transaction at {txn_url}\")\n\n\n@dispenser_group.command(\"refund\", help=\"Refund ALGOs back to the dispenser wallet address.\")\n@click.option(\"--txID\", \"-t\", \"tx_id\", required=True, help=\"Transaction ID of your refund operation.\")\ndef refund_command(*, tx_id: str) -> None:\n    if not is_authenticated():\n        logger.error(NOT_AUTHENTICATED_MESSAGE)\n        return\n\n    try:\n        process_dispenser_request(url_suffix=\"refund\", data={\"refundTransactionID\": tx_id})\n    except Exception as e:\n        logger.error(f\"Error: {e}\")\n    else:\n        logger.info(\"Successfully processed refund transaction\")\n\n\n@dispenser_group.command(\"limit\", help=\"Get information about current fund limit on your account. Resets daily.\")\n@click.option(\n    \"--whole-units\",\n    \"whole_units\",\n    is_flag=True,\n    help=\"Use whole units (Algos) instead of smallest divisible units (microAlgos). Disabled by default.\",\n    default=False,\n)\ndef get_fund_limit(*, whole_units: bool) -> None:\n    if not is_authenticated():\n        logger.error(NOT_AUTHENTICATED_MESSAGE)\n        return\n\n    default_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n    try:\n        response = process_dispenser_request(url_suffix=f\"fund/{default_asset.asset_id}/limit\", data={}, method=\"GET\")\n    except Exception as e:\n        logger.error(f\"Error: {e}\")\n    else:\n        response_amount = response.json()[\"amount\"]\n        processed_amount = response_amount / (10**default_asset.decimals) if whole_units else response_amount\n        asset_description = default_asset.description if whole_units else f\"μ{default_asset.description}\"\n\n        logger.info(f\"Remaining daily fund limit: {processed_amount} {asset_description}\")\n"
  },
  {
    "path": "src/algokit/cli/doctor.py",
    "content": "import datetime as dt\nimport logging\nimport platform\nimport sys\n\nimport click\nimport pyclip  # type: ignore[import-untyped]\n\nfrom algokit.core.conf import get_current_package_version\nfrom algokit.core.config_commands.version_prompt import get_latest_github_version\nfrom algokit.core.doctor import DoctorResult, check_dependency\nfrom algokit.core.log_handlers import CONSOLE_LOG_HANDLER_NAME\nfrom algokit.core.sandbox import (\n    COMPOSE_VERSION_COMMAND,\n    get_min_compose_version,\n)\nfrom algokit.core.utils import is_binary_mode\nfrom algokit.core.utils import is_windows as get_is_windows\n\nlogger = logging.getLogger(__name__)\n\nWARNING_COLOR = \"yellow\"\nCRITICAL_COLOR = \"red\"\n\n\n@click.command(\n    \"doctor\",\n    short_help=\"Diagnose potential environment issues that may affect AlgoKit.\",\n    context_settings={\n        \"ignore_unknown_options\": True,\n    },\n)\n@click.option(\n    \"--copy-to-clipboard\",\n    \"-c\",\n    help=\"Copy the contents of the doctor message (in Markdown format) in your clipboard.\",\n    is_flag=True,\n    default=False,\n)\ndef doctor_command(*, copy_to_clipboard: bool) -> None:  # noqa: C901, PLR0912\n    \"\"\"Diagnose potential environment issues that may affect AlgoKit.\n\n    Will search the system for AlgoKit dependencies and show their versions, as well as identifying any\n    potential issues.\"\"\"\n    from algokit.core.config_commands.container_engine import get_container_engine\n\n    # Check if we're in verbose mode by examining the console log handler level\n    verbose = False\n    for handler in logging.getLogger().handlers:\n        if handler.name == CONSOLE_LOG_HANDLER_NAME and handler.level <= logging.DEBUG:\n            verbose = True\n            break\n\n    os_type = platform.system()\n    is_windows = get_is_windows()\n    container_engine = get_container_engine()\n    docs_url = f\"https://{container_engine}.io\"\n    compose_minimum_version = get_min_compose_version()\n    service_outputs = {\n        \"timestamp\": DoctorResult(ok=True, output=dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat()),\n        \"AlgoKit\": _get_algokit_version_output(),\n        \"AlgoKit Python\": DoctorResult(ok=True, output=f\"{sys.version} (location: {sys.prefix})\"),\n        \"OS\": DoctorResult(ok=True, output=platform.platform()),\n        container_engine: check_dependency(\n            [container_engine, \"--version\"],\n            missing_help=[f\"`{container_engine}` required to run `algokit localnet` command; install via {docs_url}\"],\n        ),\n        f\"{container_engine} compose\": check_dependency(\n            COMPOSE_VERSION_COMMAND,\n            minimum_version=compose_minimum_version,\n            minimum_version_help=[\n                f\"{container_engine.capitalize()} Compose {compose_minimum_version} required to run `algokit localnet` command;\",  # noqa: E501\n                f\"install via {docs_url}\",\n            ],\n        ),\n        \"git\": check_dependency(\n            [\"git\", \"--version\"],\n            missing_help=(\n                [\n                    \"Git required to `run algokit init`; install via `winget install -e --id Git.Git` if using winget,\",\n                    \"or via https://github.com/git-guides/install-git#install-git-on-windows\",\n                ]\n                if is_windows\n                else [\"Git required to run `algokit init`; install via https://github.com/git-guides/install-git\"]\n            ),\n        ),\n        \"python\": check_dependency([\"python\", \"--version\"], include_location=True),\n        \"python3\": check_dependency([\"python3\", \"--version\"], include_location=True),\n        \"pipx\": check_dependency(\n            [\"pipx\", \"--version\"],\n            missing_help=[\n                \"pipx is required if poetry is not installed in order to install it automatically;\",\n                \"install via https://pypa.github.io/pipx/\",\n            ],\n        ),\n        \"poetry\": check_dependency(\n            [\"poetry\", \"--version\"],\n            missing_help=[\n                \"Poetry is required for some Python-based templates;\",\n                \"install via `algokit project bootstrap` within project directory, or via:\",\n                \"https://python-poetry.org/docs/#installation\",\n            ],\n        ),\n        \"node\": check_dependency(\n            [\"node\", \"--version\"],\n            missing_help=[\n                \"Node.js is required for some Node.js-based templates;\",\n                \"install via `algokit project bootstrap` within project directory, or via:\",\n                \"https://nodejs.dev/en/learn/how-to-install-nodejs/\",\n            ],\n        ),\n        \"npm\": check_dependency([\"npm\" if not is_windows else \"npm.cmd\", \"--version\"]),\n    }\n    if is_windows:\n        service_outputs[\"winget\"] = check_dependency([\"winget\", \"--version\"])\n    elif os_type == \"Darwin\":\n        service_outputs[\"brew\"] = check_dependency([\"brew\", \"--version\"])\n\n    critical_services = [container_engine, f\"{container_engine} compose\", \"git\"]\n    # Print the status details\n    for key, value in service_outputs.items():\n        if value.ok:\n            color = None\n        else:\n            color = CRITICAL_COLOR if key in critical_services else WARNING_COLOR\n        msg = click.style(f\"{key}: \", bold=True) + click.style(value.output, fg=color)\n        for ln in value.extra_help or []:\n            msg += f\"\\n  {ln}\"\n        logger.info(msg)\n\n    # Get dependencies info if in verbose mode and not running from a binary\n    dependencies = {}\n    if verbose and not is_binary_mode():\n        # Add package dependencies section\n        logger.info(\"\\nCLI package dependencies:\")\n        dependencies = _get_production_dependencies()\n        for package, version in dependencies.items():\n            logger.info(f\"{package}: {version}\")\n\n    # print end message anyway\n    logger.info(\n        \"\\n\"\n        \"If you are experiencing a problem with AlgoKit, feel free to submit an issue via:\\n\"\n        \"https://github.com/algorandfoundation/algokit-cli/issues/new\\n\"\n        \"Please include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\"\n    )\n\n    if copy_to_clipboard:\n        output_lines = []\n\n        # Add service outputs\n        for key, value in service_outputs.items():\n            output_lines.append(f\"* {key}: \" + \"\\n  \".join([value.output, *(value.extra_help or [])]))\n\n        # Add package dependencies if verbose\n        if verbose:\n            output_lines.append(\"\\n* Package dependencies:\")\n            for package, version in dependencies.items():\n                output_lines.append(f\"  * {package}: {version}\")\n\n        pyclip.copy(\"\\n\".join(output_lines))\n\n    if any(not value.ok for value in service_outputs.values()):\n        raise click.exceptions.Exit(code=1)\n\n\ndef _get_algokit_version_output() -> DoctorResult:\n    current = get_current_package_version()\n    try:\n        latest = get_latest_github_version()\n    except Exception as ex:\n        logger.warning(\"Failed to check latest AlgoKit release version\", exc_info=ex)\n        latest = None\n    if latest is None or current == latest:\n        output = current\n    else:\n        output = click.style(current, fg=WARNING_COLOR) + f\" (latest: {latest})\"\n    return DoctorResult(ok=True, output=output)\n\n\ndef _get_production_dependencies() -> dict[str, str]:\n    \"\"\"Gets versions of all direct production dependencies.\"\"\"\n    try:\n        import importlib.metadata\n        import re\n\n        # Get package dependencies from metadata\n        dist = importlib.metadata.distribution(\"algokit\")\n        requires = dist.requires or []\n\n        result = {}\n        for req in requires:\n            if match := re.match(r\"^([A-Za-z0-9_\\-\\.]+)\", req):\n                dep = match.group(1)\n                try:\n                    result[dep] = importlib.metadata.version(dep)\n                except importlib.metadata.PackageNotFoundError:\n                    result[dep] = \"Not installed\"\n\n        return result\n\n    except Exception:\n        return {\"Error\": \"Could not retrieve dependencies\"}\n"
  },
  {
    "path": "src/algokit/cli/explore.py",
    "content": "import logging\nimport os\nfrom typing import TypedDict\nfrom urllib.parse import urlencode\n\nimport click\n\nfrom algokit.core.sandbox import DEFAULT_ALGOD_PORT, DEFAULT_ALGOD_SERVER, DEFAULT_ALGOD_TOKEN, DEFAULT_INDEXER_PORT\nfrom algokit.core.utils import is_wsl\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkConfigurationRequired(TypedDict):\n    algod_url: str\n    indexer_url: str\n\n\nclass NetworkConfiguration(NetworkConfigurationRequired, total=False):\n    algod_port: int\n    algod_token: str\n\n    indexer_port: int\n    indexer_token: str\n\n    kmd_token: str\n    kmd_url: str\n    kmd_port: int\n\n\nGITPOD_URL = os.environ.get(\"GITPOD_WORKSPACE_URL\")\nCODESPACE_NAME = os.environ.get(\"CODESPACE_NAME\")\n\nif GITPOD_URL:\n    algod_url = GITPOD_URL.replace(\"https://\", \"https://4001-\")\n    indexer_url = GITPOD_URL.replace(\"https://\", \"https://8980-\")\n    kmd_url = GITPOD_URL.replace(\"https://\", \"https://4002-\")\n    algod_port = 443\n    indexer_port = 443\n    kmd_port = 443\nelif CODESPACE_NAME:\n    algod_url = f\"https://{CODESPACE_NAME}-4001.app.github.dev\"\n    indexer_url = f\"https://{CODESPACE_NAME}-8980.app.github.dev\"\n    kmd_url = f\"https://{CODESPACE_NAME}-4002.app.github.dev\"\n    algod_port = 443\n    indexer_port = 443\n    kmd_port = 443\nelse:\n    algod_url = DEFAULT_ALGOD_SERVER\n    indexer_url = DEFAULT_ALGOD_SERVER\n    kmd_url = DEFAULT_ALGOD_SERVER\n    algod_port = DEFAULT_ALGOD_PORT\n    indexer_port = DEFAULT_INDEXER_PORT\n    kmd_port = DEFAULT_ALGOD_PORT + 1\n\nNETWORKS: dict[str, NetworkConfiguration] = {\n    \"localnet\": {\n        \"algod_url\": algod_url,\n        \"indexer_url\": indexer_url,\n        \"algod_port\": algod_port,\n        \"algod_token\": DEFAULT_ALGOD_TOKEN,\n        \"indexer_port\": indexer_port,\n        \"indexer_token\": DEFAULT_ALGOD_TOKEN,\n        \"kmd_token\": DEFAULT_ALGOD_TOKEN,\n        \"kmd_port\": kmd_port,\n        \"kmd_url\": kmd_url,\n    },  # TODO: query these instead of using constants\n    \"testnet\": {\n        \"algod_url\": \"https://testnet-api.algonode.cloud\",\n        \"indexer_url\": \"https://testnet-idx.algonode.cloud\",\n    },\n    \"mainnet\": {\n        \"algod_url\": \"https://mainnet-api.algonode.cloud\",\n        \"indexer_url\": \"https://mainnet-idx.algonode.cloud\",\n    },\n}\n\n\ndef get_algokit_url(network: str) -> str:\n    return f\"https://explore.algokit.io/{network}\"\n\n\ndef get_explore_url(network: str) -> str:\n    if network == \"localnet\" and NETWORKS[network].get(\"algod_url\") != DEFAULT_ALGOD_SERVER:\n        query_string = urlencode(\n            [\n                (key, value)\n                for key, value in NETWORKS[network].items()\n                if key in [\"algod_url\", \"algod_port\", \"indexer_url\", \"indexer_port\", \"kmd_url\", \"kmd_port\"]\n            ]\n        )\n        return f\"{get_algokit_url(network)}?{query_string}\"\n\n    return get_algokit_url(network)\n\n\n@click.command(\"explore\", help=\"Explore the specified network using lora.\")\n@click.argument(\"network\", type=click.Choice(list(NETWORKS)), default=\"localnet\", required=False)\ndef explore_command(network: str) -> None:\n    url = get_explore_url(network)\n    logger.info(f\"Opening {network} explorer in your default browser\")\n    logger.info(f\"URL: {url}\")\n\n    import webbrowser\n\n    if is_wsl():\n        warning = (\n            \"Unable to open browser from WSL environment.\\n\"\n            \"Ensure 'wslu' is installed: (https://wslutiliti.es/wslu/install.html),\\n\"\n            f\"or open the URL manually: '{url}'.\"\n        )\n        try:\n            if not webbrowser.open(url):\n                logger.warning(warning)\n        except Exception:\n            logger.warning(warning)\n    else:\n        # https://github.com/pallets/click/issues/2868 restore click.launch once bug is fixed\n        try:\n            if not webbrowser.open(url):\n                logger.warning(f\"Failed to open browser. Please open this URL manually: {url}\")\n        except Exception as e:\n            logger.debug(\"Error opening browser\", exc_info=e)\n            logger.warning(f\"Failed to open browser. Please open this URL manually: {url}\")\n"
  },
  {
    "path": "src/algokit/cli/generate.py",
    "content": "import logging\nimport shutil\nfrom functools import cache\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.generate import load_generators, run_generator\nfrom algokit.core.typed_client_generation import AppSpecsNotFoundError, ClientGenerator\n\nlogger = logging.getLogger(__name__)\n\n\n@cache\ndef _load_custom_generate_commands(project_dir: Path) -> dict[str, click.Command]:\n    \"\"\"\n    Load custom generate commands from .algokit.toml file.\n    :param project_dir: Project directory path.\n    :return: Custom generate commands.\n    \"\"\"\n\n    generators = load_generators(project_dir)\n    commands_table: dict[str, click.Command] = {}\n\n    for generator in generators:\n\n        @click.command(\n            name=generator.name, help=generator.description or \"Generator command description is not supplied.\"\n        )\n        @click.option(\n            \"answers\",\n            \"--answer\",\n            \"-a\",\n            multiple=True,\n            help=\"Answers key/value pairs to pass to the template.\",\n            nargs=2,\n            default=[],\n            metavar=\"<key> <value>\",\n        )\n        @click.option(\n            \"path\",\n            \"--path\",\n            \"-p\",\n            help=f\"Path to {generator.name} generator. (Default: {generator.path})\",\n            type=click.Path(exists=True),\n            default=generator.path,\n        )\n        @click.option(\n            \"--force\",\n            \"-f\",\n            is_flag=True,\n            required=False,\n            default=False,\n            type=click.BOOL,\n            help=\"Executes generator without confirmation. Use with trusted sources only.\",\n        )\n        def command(\n            *,\n            answers: list[tuple[str, str]],\n            path: Path,\n            force: bool,\n        ) -> None:\n            if not shutil.which(\"git\"):\n                raise click.ClickException(\n                    \"Git not found; please install git and add to path.\\n\"\n                    \"See https://github.com/git-guides/install-git for more information.\"\n                )\n\n            answers_dict = dict(answers)\n\n            if not force and not click.confirm(\n                \"You are about to run a generator. Please make sure it's from a \"\n                \"trusted source (for example, official AlgoKit Templates). Do you want to proceed?\",\n                default=False,\n            ):\n                logger.warning(\"Generator execution cancelled.\")\n                return None\n\n            return run_generator(answers_dict, path)\n\n        commands_table[generator.name] = command\n\n    return commands_table\n\n\nclass GeneratorGroup(click.Group):\n    def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n        return_value = super().get_command(ctx, cmd_name)\n\n        if return_value is not None:\n            return return_value\n\n        commands = _load_custom_generate_commands(Path.cwd())\n        return commands.get(cmd_name)\n\n    def list_commands(self, ctx: click.Context) -> list[str]:\n        predefined_command_names = super().list_commands(ctx)\n        dynamic_commands = _load_custom_generate_commands(Path.cwd())\n        dynamic_command_names = list(dynamic_commands)\n\n        return sorted(predefined_command_names + dynamic_command_names)\n\n\n@click.group(\"generate\", cls=GeneratorGroup)\ndef generate_group() -> None:\n    \"\"\"Generate code for an Algorand project.\"\"\"\n\n\n@generate_group.command(\n    \"client\",\n    context_settings={\n        \"ignore_unknown_options\": True,\n    },\n    add_help_option=False,\n)\n@click.argument(\n    \"app_spec_path_or_dir\",\n    type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),\n    required=False,\n)\n@click.option(\n    \"output_path_pattern\",\n    \"--output\",\n    \"-o\",\n    type=click.Path(exists=False),\n    default=None,\n    help=\"Path to the output file. The following tokens can be used to substitute into the output path:\"\n    \" {contract_name}, {app_spec_dir}\",\n)\n@click.option(\n    \"--language\",\n    \"-l\",\n    default=None,\n    type=click.Choice(ClientGenerator.languages()),\n    help=\"Programming language of the generated client code\",\n)\n@click.option(\n    \"--version\",\n    \"-v\",\n    \"version\",\n    default=None,\n    help=\"The client generator version to pin to, for example, 1.0.0. \"\n    \"If no version is specified, AlgoKit checks if the client generator is installed and runs the installed version. \"\n    \"If the client generator is not installed, AlgoKit runs the latest version. \"\n    \"If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. \"\n    \"Otherwise, AlgoKit runs the specified version.\",\n)\n@click.argument(\"args\", nargs=-1, type=click.UNPROCESSED)\ndef generate_client(\n    app_spec_path_or_dir: Path | None,\n    output_path_pattern: str | None,\n    language: str | None,\n    version: str | None,\n    args: tuple[str, ...],\n) -> None:\n    \"\"\"Create a typed ApplicationClient from an ARC-32/56 application.json\n\n    Supply the path to an application specification file or a directory to recursively search\n    for \"application.json\" files\"\"\"\n\n    generator = None\n    if language is not None:\n        generator = ClientGenerator.create_for_language(language, version)\n    elif output_path_pattern is not None:\n        extension = Path(output_path_pattern).suffix\n        try:\n            generator = ClientGenerator.create_for_extension(extension, version)\n        except KeyError as ex:\n            raise click.ClickException(\n                \"Could not determine language from file extension, Please use the --language option to specify a \"\n                \"target language\"\n            ) from ex\n\n    help_in_args = any(_is_help_flag(arg) for arg in args)\n    help_in_positional = app_spec_path_or_dir is not None and _is_help_flag(app_spec_path_or_dir)\n\n    if help_in_positional:\n        ctx = click.get_current_context()\n        click.echo(ctx.get_help())\n    elif generator:\n        if help_in_args:\n            generator.show_help()\n            return\n\n        if app_spec_path_or_dir is None:\n            raise click.ClickException(\"Missing argument 'APP_SPEC_PATH_OR_DIR'.\")\n\n        try:\n            generator.generate_all(\n                app_spec_path_or_dir,\n                output_path_pattern,\n                list(args),\n                raise_on_path_resolution_failure=False,\n            )\n        except AppSpecsNotFoundError as ex:\n            raise click.ClickException(\"No app specs found\") from ex\n    else:\n        raise click.ClickException(\n            \"One of --language or --output is required to determine the client language to generate\"\n        )\n\n\nHELP_FLAGS = (\"--help\", \"-h\")\n\n\ndef _is_help_flag(value: str | Path) -> bool:\n    \"\"\"Check if a value is a help flag (--help or -h).\"\"\"\n    if isinstance(value, Path):\n        return any(str(value).endswith(flag) for flag in HELP_FLAGS)\n    return value in HELP_FLAGS\n"
  },
  {
    "path": "src/algokit/cli/goal.py",
    "content": "import logging\n\nimport click\n\nfrom algokit.core import proc\nfrom algokit.core.config_commands.container_engine import get_container_engine\nfrom algokit.core.goal import (\n    get_volume_mount_path_docker,\n    get_volume_mount_path_local,\n    post_process,\n    preprocess_command_args,\n)\nfrom algokit.core.sandbox import SANDBOX_BASE_NAME, ComposeFileStatus, ComposeSandbox\n\nlogger = logging.getLogger(__name__)\n\n\n@click.command(\n    \"goal\",\n    short_help=\"Run the Algorand goal CLI against the AlgoKit LocalNet.\",\n    context_settings={\n        \"ignore_unknown_options\": True,\n    },\n)\n@click.option(\n    \"--console\",\n    is_flag=True,\n    help=\"Open a Bash console so you can execute multiple goal commands and/or interact with a filesystem.\",\n    default=False,\n)\n@click.option(\n    \"--interactive\",\n    is_flag=True,\n    help=\"Force running the goal command in interactive mode.\",\n    default=False,\n)\n@click.argument(\"goal_args\", nargs=-1, type=click.UNPROCESSED)\ndef goal_command(*, console: bool, interactive: bool, goal_args: list[str]) -> None:  # noqa: C901, PLR0912\n    \"\"\"\n    Run the Algorand goal CLI against the AlgoKit LocalNet.\n\n    Look at https://dev.algorand.co/algokit/algokit-cli/goal for more information.\n    \"\"\"\n    goal_args = list(goal_args)\n    container_engine = get_container_engine()\n    try:\n        proc.run(\n            [container_engine, \"version\"],\n            bad_return_code_error_message=f\"{container_engine} engine isn't running; please start it.\",\n        )\n    except OSError as ex:\n        # an IOError (such as PermissionError or FileNotFoundError) will only occur if \"docker\"\n        # isn't an executable in the user's path, which means docker isn't installed\n        docs_url = (\n            \"https://www.docker.com/get-started/\" if container_engine == \"docker\" else \"https://podman.io/get-started\"\n        )\n        raise click.ClickException(\n            f\"{container_engine} not found; please install {container_engine} and add to path.\\n\"\n            f\"See {docs_url} for more information.\"\n        ) from ex\n\n    sandbox = ComposeSandbox.from_environment()\n    if sandbox is None:\n        sandbox = ComposeSandbox()\n\n    if sandbox.name != SANDBOX_BASE_NAME:\n        logger.info(\"A named LocalNet is running, goal command will be executed against the named LocalNet\")\n\n    volume_mount_path_local = get_volume_mount_path_local(directory_name=sandbox.name)\n    volume_mount_path_docker = get_volume_mount_path_docker()\n\n    compose_file_status = sandbox.compose_file_status()\n    if compose_file_status is not ComposeFileStatus.UP_TO_DATE and sandbox.name == SANDBOX_BASE_NAME:\n        raise click.ClickException(\"LocalNet definition is out of date; please run `algokit localnet reset` first!\")\n    ps_result = sandbox.ps(\"algod\")\n    match ps_result:\n        case [{\"State\": \"running\"}]:\n            pass\n        case _:\n            logger.info(\"LocalNet isn't running\")\n            sandbox.up()\n\n    if console:\n        if goal_args:\n            logger.warning(\"--console opens an interactive shell, remaining arguments are being ignored\")\n        logger.info(\"Opening Bash console on the algod node; execute `exit` to return to original console\")\n        result = proc.run_interactive(f\"{container_engine} exec -it -w /root algokit_{sandbox.name}_algod bash\".split())\n    else:\n        cmd = f\"{container_engine} exec {'--tty' if interactive else ''} --interactive --workdir /root algokit_{sandbox.name}_algod goal\".split()  # noqa: E501\n        input_files, output_files, goal_args = preprocess_command_args(\n            goal_args, volume_mount_path_local, volume_mount_path_docker\n        )\n        cmd = cmd + goal_args\n\n        if interactive:\n            result = proc.run_interactive(cmd)\n        else:\n            # Try non-interactive first, fallback to interactive if it fails with input-related error\n            result = proc.run(\n                cmd,\n                stdout_log_level=logging.INFO,\n                prefix_process=False,\n                pass_stdin=True,\n            )\n            if result.exit_code != 0 and \"inappropriate ioctl\" in (result.output or \"\"):\n                # Fallback to interactive mode if we detect TTY-related errors\n                logger.debug(\"Command failed with TTY error, retrying in interactive mode\")\n                cmd.insert(2, \"--tty\")\n                result = proc.run_interactive(cmd)\n\n        post_process(input_files, output_files, volume_mount_path_local)\n\n    if result.exit_code != 0:\n        raise click.exceptions.Exit(result.exit_code)\n"
  },
  {
    "path": "src/algokit/cli/init/__init__.py",
    "content": "import click\n\n# Import the core logic function from command.py\nfrom algokit.cli.init.command import initialize_new_project\n\n# Import the example subcommand\nfrom algokit.cli.init.example import example_command\n\n# Import necessary helpers/validators directly needed for options\nfrom algokit.cli.init.helpers import (  # Assuming helpers are moved to a separate file or kept here\n    _get_blessed_templates,\n    _validate_dir_name,\n)\n\n\n# Define the group, allow invocation without subcommand, define ALL original options here\n@click.group(\n    \"init\",\n    invoke_without_command=True,\n    short_help=\"Initializes a new project from a template; run from project parent directory.\",\n)\n@click.option(\n    \"directory_name\",\n    \"--name\",\n    \"-n\",\n    type=str,\n    help=\"Name of the project / directory / repository to create.\",\n    callback=_validate_dir_name,\n)\n@click.option(\n    \"template_name\",\n    \"--template\",\n    \"-t\",\n    type=click.Choice([k.value for k in _get_blessed_templates()]),\n    default=None,\n    help=\"Name of an official template to use. To choose interactively, run this command with no arguments.\",\n)\n@click.option(\n    \"--template-url\",\n    type=str,\n    default=None,\n    help=\"URL to a git repo with a custom project template.\",\n    metavar=\"URL\",\n)\n@click.option(\n    \"--template-url-ref\",\n    type=str,\n    default=None,\n    help=\"Specific tag, branch or commit to use on git repo specified with --template-url. Defaults to latest.\",\n    metavar=\"URL\",\n)\n@click.option(\n    \"--UNSAFE-SECURITY-accept-template-url\",\n    is_flag=True,\n    default=False,\n    help=(\n        \"Accept the specified template URL, \"\n        \"acknowledging the security implications of arbitrary code execution trusting an unofficial template.\"\n    ),\n)\n@click.option(\"use_git\", \"--git/--no-git\", default=None, help=\"Initialise git repository in directory after creation.\")\n@click.option(\n    \"use_defaults\",\n    \"--defaults\",\n    is_flag=True,\n    default=False,\n    help=\"Automatically choose default answers without asking when creating this template.\",\n)\n@click.option(\n    \"run_bootstrap\",\n    \"--bootstrap/--no-bootstrap\",\n    is_flag=True,\n    default=None,\n    help=\"Whether to run `algokit project bootstrap` to install and configure the new project's dependencies locally.\",\n)\n@click.option(\n    \"open_ide\",\n    \"--ide/--no-ide\",\n    is_flag=True,\n    default=True,\n    help=\"Whether to open an IDE for you if the IDE and IDE config are detected. Supported IDEs: VS Code.\",\n)\n@click.option(\n    \"use_workspace\",\n    \"--workspace/--no-workspace\",\n    is_flag=True,\n    default=True,\n    help=(\n        \"Whether to prefer structuring standalone projects as part of a workspace. \"\n        \"An AlgoKit workspace is a conventional project structure that allows managing \"\n        \"multiple standalone projects in a monorepo.\"\n    ),\n)\n@click.option(\n    \"answers\",\n    \"--answer\",\n    \"-a\",\n    multiple=True,\n    help=\"Answers key/value pairs to pass to the template.\",\n    nargs=2,\n    default=[],\n    metavar=\"<key> <value>\",\n)\n@click.pass_context\ndef init_group(  # noqa: PLR0913\n    ctx: click.Context,\n    *,\n    directory_name: str | None,\n    template_name: str | None,\n    template_url: str | None,\n    template_url_ref: str | None,\n    unsafe_security_accept_template_url: bool,\n    use_git: bool | None,\n    answers: list[tuple[str, str]],\n    use_defaults: bool,\n    run_bootstrap: bool | None,\n    use_workspace: bool,\n    open_ide: bool,\n) -> None:\n    \"\"\"\n    Initializes a new project from a template, including prompting\n    for template specific questions to be used in template rendering.\n\n    Templates can be default templates shipped with AlgoKit, or custom\n    templates in public Git repositories.\n\n    Includes ability to initialise Git repository, run algokit project bootstrap and\n    automatically open Visual Studio Code.\n\n    This should be run in the parent directory that you want the project folder\n    created in.\n\n    By default, the `--workspace` flag creates projects within a workspace structure or integrates them into an existing\n    one, promoting organized management of multiple projects. Alternatively,\n    to disable this behavior use the `--no-workspace` flag, which ensures\n    the new project is created in a standalone target directory. This is\n    suitable for isolated projects or when workspace integration is unnecessary.\n    \"\"\"\n\n    if ctx.invoked_subcommand is None:\n        # No subcommand was called, so execute the default init logic\n        # Pass all the options received by the group down to the implementation function\n        initialize_new_project(\n            directory_name=directory_name,\n            template_name=template_name,\n            template_url=template_url,\n            template_url_ref=template_url_ref,\n            unsafe_security_accept_template_url=unsafe_security_accept_template_url,\n            use_git=use_git,\n            answers=answers,\n            use_defaults=use_defaults,\n            run_bootstrap=run_bootstrap,\n            use_workspace=use_workspace,\n            open_ide=open_ide,\n        )\n    # else: a subcommand was invoked\n\n\n# Add the subcommands to the group\ninit_group.add_command(example_command)\n"
  },
  {
    "path": "src/algokit/cli/init/command.py",
    "content": "import logging\nimport re\nimport shutil\nfrom collections.abc import Callable\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import NoReturn\n\nimport click\nimport prompt_toolkit.document\nimport questionary\n\nfrom algokit.cli.init.helpers import (\n    TemplateKey,\n    TemplateSource,\n    _get_blessed_templates,\n)\nfrom algokit.core import proc, questionary_extensions\nfrom algokit.core.conf import get_algokit_config\nfrom algokit.core.init import (\n    append_project_to_vscode_workspace,\n    get_git_user_info,\n    is_valid_project_dir_name,\n    resolve_vscode_workspace_file,\n)\nfrom algokit.core.log_handlers import EXTRA_EXCLUDE_FROM_CONSOLE\nfrom algokit.core.project import ProjectType, get_workspace_project_path\nfrom algokit.core.project.bootstrap import (\n    MAX_BOOTSTRAP_DEPTH,\n    bootstrap_any_including_subdirs,\n    project_minimum_algokit_version_check,\n)\nfrom algokit.core.sandbox import DEFAULT_ALGOD_PORT, DEFAULT_ALGOD_SERVER, DEFAULT_ALGOD_TOKEN, DEFAULT_INDEXER_PORT\nfrom algokit.core.utils import get_python_paths\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_STATIC_ANSWERS: dict[str, str] = {\n    \"algod_token\": DEFAULT_ALGOD_TOKEN,\n    \"algod_server\": DEFAULT_ALGOD_SERVER,\n    \"algod_port\": str(DEFAULT_ALGOD_PORT),\n    \"indexer_token\": DEFAULT_ALGOD_TOKEN,\n    \"indexer_server\": DEFAULT_ALGOD_SERVER,\n    \"indexer_port\": str(DEFAULT_INDEXER_PORT),\n}\nDEFAULT_DYNAMIC_ANSWERS: dict[str, Callable[[], str]] = {\n    \"author_name\": lambda: get_git_user_info(\"name\") or \"John Doe\",\n    \"author_email\": lambda: get_git_user_info(\"email\") or \"my@mail.com\",\n}\n\n\ndef _get_default_answers() -> dict[str, str]:\n    \"\"\"get all default answers\"\"\"\n    return {**DEFAULT_STATIC_ANSWERS, **{k: v() for k, v in DEFAULT_DYNAMIC_ANSWERS.items()}}\n\n\n\"\"\"Answers that are not really answers, but useful to pass through to templates in case they want to make use of them\"\"\"\n\n\nclass TemplatePresetType(str, Enum):\n    \"\"\"\n    For distinguishing main template preset type question invoked by `algokit init`\n    \"\"\"\n\n    SMART_CONTRACT = \"Smart Contracts 📜\"\n    DAPP_FRONTEND = \"DApp Frontend 🖥️\"\n    SMART_CONTRACT_AND_DAPP_FRONTEND = \"Smart Contracts & DApp Frontend 🎛️\"\n    CUSTOM_TEMPLATE = \"Custom Template 🛠️\"\n\n\nclass ContractLanguage(Enum):\n    \"\"\"\n    For programming languages that have corresponding smart contract languages\n    \"\"\"\n\n    PYTHON = \"Python 🐍\"\n    TYPESCRIPT = \"TypeScript 📘\"\n\n\nLANGUAGE_TO_TEMPLATE_MAP = {\n    ContractLanguage.PYTHON: TemplateKey.PYTHON,\n    ContractLanguage.TYPESCRIPT: TemplateKey.TYPESCRIPT,\n}\n\n\n_unofficial_template_warning = (\n    \"Community templates have not been reviewed, and can execute arbitrary code.\\n\"\n    \"Please inspect the template repository, and pay particular attention to the \"\n    \"values of _tasks, _migrations and _jinja_extensions in copier.yml\"\n)\n\n\ndef _prevent_workspace_nesting(*, workspace_path: Path | None, project_path: Path, use_workspace: bool) -> None:\n    if not workspace_path:\n        return\n\n    if use_workspace and workspace_path != project_path.parent:\n        logger.error(\n            \"Error: Workspace nesting detected. Please run 'init' from the workspace root: \"\n            f\"'{workspace_path}'. For more info, refer to \"\n            \"https://github.com/algorandfoundation/algokit-cli/blob/main/docs/features/project/run.md\"\n        )\n        _fail_and_bail()\n\n\ndef initialize_new_project(  # noqa: PLR0913, C901, PLR0915\n    *,\n    directory_name: str | None,\n    template_name: str | None,\n    template_url: str | None,\n    template_url_ref: str | None,\n    unsafe_security_accept_template_url: bool,\n    use_git: bool | None,\n    answers: list[tuple[str, str]],\n    use_defaults: bool,\n    run_bootstrap: bool | None,\n    use_workspace: bool,\n    open_ide: bool,\n) -> None:\n    if not shutil.which(\"git\"):\n        raise click.ClickException(\n            \"Git not found; please install git and add to path.\\n\"\n            \"See https://github.com/git-guides/install-git for more information.\"\n        )\n\n    # parse the input early to prevent frustration - combined with some defaults but they can be overridden\n    answers_dict = _get_default_answers() | dict(answers)\n\n    template = _get_template(\n        name=template_name,\n        url=template_url,\n        commit=template_url_ref,\n        unsafe_security_accept_template_url=unsafe_security_accept_template_url,\n    )\n\n    for custom_answer in template.answers or []:\n        answers_dict.setdefault(*custom_answer)\n\n    logger.debug(f\"template source = {template}\")\n\n    # allow skipping prompt if the template is the base template to avoid redundant\n    # 're-using existing directory' warning in fullstack template init\n    project_path, overwrite_existing_dir = _get_project_path(\n        directory_name_option=directory_name, force=template == _get_blessed_templates()[TemplateKey.BASE]\n    )\n    workspace_path = get_workspace_project_path(project_path)\n    if not overwrite_existing_dir:\n        _prevent_workspace_nesting(\n            workspace_path=workspace_path, project_path=project_path, use_workspace=use_workspace\n        )\n\n    logger.debug(f\"project path = {project_path}\")\n    directory_name = project_path.name\n    # provide the directory name as an answer to the template, if not explicitly overridden by user\n    answers_dict.setdefault(\"project_name\", directory_name)\n\n    system_python_path = next(get_python_paths(), None)\n    if system_python_path is not None:\n        answers_dict.setdefault(\"python_path\", system_python_path)\n    else:\n        answers_dict.setdefault(\"python_path\", \"no_system_python_available\")\n\n    project_path = _resolve_workspace_project_path(\n        template_source=template, project_path=project_path, use_workspace=use_workspace\n    )\n    answers_dict.setdefault(\"use_workspace\", \"yes\" if use_workspace else \"no\")\n\n    logger.info(f\"Starting template copy and render at {project_path}...\")\n    # copier is lazy imported for two reasons\n    # 1. it is slow to import on first execution after installing\n    # 2. the import fails if git is not installed (which we check above)\n    # TODO: copier is typed, need to figure out how to force mypy to accept that or submit a PR\n    #       to their repo to include py.typed file\n    from copier._main import Worker\n\n    from algokit.core.init import populate_default_answers\n\n    expected_answers_file = project_path / \".algokit\" / \".copier-answers.yml\"\n    relative_answers_file = expected_answers_file.relative_to(project_path) if expected_answers_file.exists() else None\n\n    # Ensure target directory exists (`copier` >=9.6.0 not creating parent directories during copy automatically)\n    project_path.mkdir(parents=True, exist_ok=True)\n\n    with Worker(\n        src_path=template.url,\n        dst_path=project_path,\n        answers_file=relative_answers_file,\n        data=answers_dict,\n        quiet=True,\n        vcs_ref=template.branch or template.commit,\n        unsafe=True,\n    ) as copier_worker:\n        if use_defaults:\n            populate_default_answers(copier_worker)\n        expanded_template_url = copier_worker.template.url_expanded\n        logger.debug(f\"final clone URL = {expanded_template_url}\")\n        copier_worker.run_copy()\n\n    logger.info(\"Template render complete!\")\n\n    # reload workspace path cause it might have been just introduced with new project instance\n    workspace_path = get_workspace_project_path(project_path)\n\n    _maybe_move_github_folder(project_path=project_path, use_workspace=use_workspace)\n\n    _maybe_bootstrap(project_path, run_bootstrap=run_bootstrap, use_defaults=use_defaults, use_workspace=use_workspace)\n\n    logger.info(\n        f\"🙌 Project initialized at `{directory_name}`! For template specific next steps, \"\n        \"consult the documentation of your selected template 🧐\"\n    )\n    if re.search(\"https?://\", expanded_template_url):\n        # if the URL looks like an HTTP URL (should be the case for blessed templates), be helpful\n        # and print it out so the user can (depending on terminal) click it to open in browser\n        logger.info(f\"Your selected template comes from:\\n➡️  {expanded_template_url.removesuffix('.git')}\")\n\n    # Check if a README file exists\n    readme_path = next(project_path.glob(\"README*\"), None)\n\n    # Check if a .workspace file exists\n    vscode_workspace_file = resolve_vscode_workspace_file(workspace_path or project_path)\n\n    if vscode_workspace_file:\n        append_project_to_vscode_workspace(project_path=project_path, workspace_path=vscode_workspace_file)\n\n    # Below must be ensured to run after all required filesystem changes are applied to ensure first commit captures\n    # all the changes introduced by init invocation\n    _maybe_git_init(\n        workspace_path or project_path,\n        use_git=use_git,\n        commit_message=f\"Project initialised with AlgoKit CLI using template: {expanded_template_url}\",\n    )\n\n    if (\n        open_ide\n        and ((project_path / \".vscode\").is_dir() or vscode_workspace_file)\n        and (code_cmd := shutil.which(\"code\"))\n    ):\n        target_path = str(vscode_workspace_file if vscode_workspace_file else project_path)\n\n        logger.info(\n            \"VSCode configuration detected in project directory, and 'code' command is available on path, \"\n            \"attempting to launch VSCode\"\n        )\n\n        code_cmd_and_args = [code_cmd, target_path]\n\n        if readme_path:\n            code_cmd_and_args.append(str(readme_path))\n\n        proc.run(code_cmd_and_args)\n    elif readme_path:\n        logger.info(f\"Your template includes a {readme_path.name} file, you might want to review that as a next step.\")\n\n\ndef _maybe_bootstrap(\n    project_path: Path, *, run_bootstrap: bool | None, use_defaults: bool, use_workspace: bool\n) -> None:\n    if run_bootstrap is None:\n        # if user didn't specify a bootstrap option, then assume yes if using defaults, otherwise prompt\n        run_bootstrap = use_defaults or questionary_extensions.prompt_confirm(\n            \"Do you want to run `algokit project bootstrap` for this new project? \"\n            \"This will install and configure dependencies allowing it to be run immediately.\",\n            default=True,\n        )\n    if run_bootstrap:\n        # note: we run bootstrap before git commit so that we can commit any lock files,\n        # but if something goes wrong, we don't want to block\n        try:\n            project_minimum_algokit_version_check(project_path)\n\n            # if user prefers to ignore creating the `workspace` setup, set bootstrap depth to 1 else default\n            bootstrap_depth = 1 if not use_workspace else MAX_BOOTSTRAP_DEPTH\n            bootstrap_any_including_subdirs(project_path, ci_mode=False, max_depth=bootstrap_depth)\n        except Exception as e:\n            logger.error(f\"Received an error while attempting bootstrap: {e}\")\n            logger.exception(\n                \"Bootstrap failed. Once any errors above are resolved, \"\n                f\"you can run `algokit project bootstrap` in {project_path}\",\n                exc_info=e,\n            )\n\n\ndef _maybe_git_init(project_path: Path, *, use_git: bool | None, commit_message: str) -> None:\n    if _should_attempt_git_init(use_git_option=use_git, project_path=project_path):\n        _git_init(project_path, commit_message=commit_message)\n\n\ndef _maybe_move_github_folder(*, project_path: Path, use_workspace: bool) -> None:\n    \"\"\"Move contents of .github folder from project_path to the root of the workspace if exists\n    and the workspace is used.\n\n    Args:\n        project_path: The path to the project directory.\n        use_workspace: A flag to indicate if the project is initialized with workspace flag\n    \"\"\"\n\n    source_dir = project_path / \".github\"\n\n    if (\n        not use_workspace\n        or not source_dir.exists()\n        or not (workspace_root := get_workspace_project_path(project_path.parent))\n    ):\n        return\n\n    target_dir = workspace_root / \".github\"\n\n    for source_file in source_dir.rglob(\"*\"):\n        if source_file.is_file():\n            target_file = target_dir / source_file.relative_to(source_dir)\n\n            if target_file.exists():\n                logger.debug(f\"Skipping move of {source_file.name} to {target_file} (duplicate exists)\")\n                continue\n\n            try:\n                target_file.parent.mkdir(parents=True, exist_ok=True)\n                shutil.move(str(source_file), str(target_file))\n            except shutil.Error as e:\n                logger.debug(f\"Skipping move of {source_file} to {target_file}: {e}\")\n\n    if any(p.is_file() for p in source_dir.rglob(\"*\")):\n        click.secho(\n            \"Failed to move all files within your project's .github folder to the workspace root. \"\n            \"Please review any files that remain in your project's .github folder and manually include \"\n            \"in the root .github directory as required.\",\n            fg=\"yellow\",\n        )\n    else:\n        shutil.rmtree(source_dir)\n        logger.debug(f\"No files found in .github folder after merge. Removing `.github` directory at {source_dir}...\")\n\n\ndef _fail_and_bail() -> NoReturn:\n    logger.info(\"🛑 Bailing out... 👋\")\n    raise click.exceptions.Exit(code=1)\n\n\ndef _repo_url_is_valid(url: str) -> bool:\n    \"\"\"Check the repo URL is valid according to copier\"\"\"\n    from copier._vcs import get_repo\n\n    if not url:\n        return False\n    try:\n        return get_repo(url) is not None\n    except Exception:\n        logger.exception(f\"Error parsing repo URL = {url}\", extra=EXTRA_EXCLUDE_FROM_CONSOLE)\n        return False\n\n\nclass DirectoryNameValidator(questionary.Validator):\n    def __init__(self, base_path: Path) -> None:\n        self._base_path = base_path\n\n    def validate(self, document: prompt_toolkit.document.Document) -> None:\n        name = document.text.strip()\n        new_path = self._base_path / name\n        if new_path.exists() and not new_path.is_dir():\n            raise questionary.ValidationError(\n                message=\"File with same name already exists in current directory, please enter a different name\"\n            )\n        if not is_valid_project_dir_name(document.text):\n            raise questionary.ValidationError(\n                message=\"Invalid name. Use letters, numbers, dashes, periods, underscores, and ensure it's unique.\",\n                cursor_position=len(document.text),\n            )\n\n\ndef _get_project_path(*, directory_name_option: str | None = None, force: bool = False) -> tuple[Path, bool]:\n    \"\"\"\n    Determines the project path based on the provided directory name option.\n\n    Args:\n        directory_name_option: The name of the directory provided by the user.\n                               If None, the user will be prompted to enter a name.\n        force: A flag to auto accept warning prompts.\n\n    Returns:\n        The path to the project directory and a flag to indicate if the user agreed to overwrite the directory.\n    \"\"\"\n\n    base_path = Path.cwd()\n    overwrite_existing_dir = force\n    directory_name = (\n        directory_name_option\n        if directory_name_option is not None\n        else questionary_extensions.prompt_text(\n            \"Name of project / directory to create the project in:\",\n            validators=[questionary_extensions.NonEmptyValidator(), DirectoryNameValidator(base_path)],\n        )\n    ).strip()\n\n    project_path = base_path / directory_name\n    if project_path.exists() and not project_path.is_dir():\n        logger.error(\"A file with the same name already exists in the current directory. Please use a different name.\")\n        _fail_and_bail()\n\n    if project_path.is_dir() and not force:\n        logger.warning(\n            \"Re-using existing directory, this is not recommended because if project \"\n            \"generation fails, then we can't automatically cleanup.\"\n        )\n        overwrite_existing_dir = questionary_extensions.prompt_confirm(\"Continue anyway?\", default=False)\n        if not overwrite_existing_dir:\n            return _get_project_path() if directory_name_option is None else _fail_and_bail()\n\n    return project_path, overwrite_existing_dir\n\n\ndef _get_template(\n    *,\n    name: str | None,\n    url: str | None,\n    commit: str | None,\n    unsafe_security_accept_template_url: bool,\n) -> TemplateSource:\n    if name:\n        if url:\n            raise click.ClickException(\"Cannot specify both --template and --template-url\")\n        if commit:\n            raise click.ClickException(\"--template-url-ref has no effect when template name is specified\")\n        blessed_templates = _get_blessed_templates()\n        template: TemplateSource = blessed_templates[TemplateKey(name)]\n    elif not url:\n        template = _get_template_interactive()\n    else:\n        if not _repo_url_is_valid(url):\n            logger.error(f\"Couldn't parse repo URL {url}. Try prefixing it with git+ ?\")\n            _fail_and_bail()\n        logger.warning(_unofficial_template_warning)\n        # note: we use unsafe_ask here (and everywhere else) so we don't have to\n        # handle None returns for KeyboardInterrupt - click will handle these nicely enough for us\n        # at the root level\n        if not (\n            unsafe_security_accept_template_url\n            or questionary_extensions.prompt_confirm(\"Continue anyway?\", default=False)\n        ):\n            _fail_and_bail()\n        template = TemplateSource(url=url, commit=commit)\n    return template\n\n\nclass GitRepoValidator(questionary.Validator):\n    def validate(self, document: prompt_toolkit.document.Document) -> None:\n        value = document.text.strip()\n        if value and not _repo_url_is_valid(value):\n            raise questionary.ValidationError(message=f\"Couldn't parse repo URL {value}. Try prefixing it with git+ ?\")\n\n\ndef _get_template_interactive() -> TemplateSource:\n    project_type = questionary_extensions.prompt_select(\n        \"Which of these options best describes the project you want to build?\",\n        *[questionary.Choice(title=p_type.value, value=p_type) for p_type in TemplatePresetType],  # Modified line\n    )\n    logger.debug(f\"selected project_type = {project_type.value}\")\n\n    template = None\n    language = None\n    if project_type in [TemplatePresetType.SMART_CONTRACT, TemplatePresetType.SMART_CONTRACT_AND_DAPP_FRONTEND]:\n        language = questionary_extensions.prompt_select(\n            \"Which language would you like to use for the smart contract?\",\n            *[questionary.Choice(title=lang.value, value=lang) for lang in ContractLanguage],\n        )\n        logger.debug(f\"selected language = {language}\")\n        template = (\n            TemplateKey.FULLSTACK\n            if project_type == TemplatePresetType.SMART_CONTRACT_AND_DAPP_FRONTEND\n            else LANGUAGE_TO_TEMPLATE_MAP[language]\n        )\n\n    elif project_type == TemplatePresetType.DAPP_FRONTEND:\n        template = TemplateKey.REACT\n\n    # Ensure a template has been selected\n    if not template and not project_type == TemplatePresetType.CUSTOM_TEMPLATE:\n        raise click.ClickException(\"No template selected. Please try again.\")\n\n    # Map the template string directly to the TemplateSource\n    # This is needed to be able to reuse fullstack to work with python and typescript templates\n    blessed_templates = _get_blessed_templates()\n    if template in blessed_templates:\n        selected_template_source = blessed_templates[template]\n        if template == TemplateKey.FULLSTACK and language is not None:\n            smart_contract_template = LANGUAGE_TO_TEMPLATE_MAP[language]\n            selected_template_source.answers = [(\"contract_template\", smart_contract_template)]\n        return selected_template_source\n\n    # else: user selected custom url\n    # note we print the warning but don't prompt for confirmation like we would when the URL is passed\n    # as a command line argument, instead we allow the user to return to the official selection list\n    # by entering a blank string\n    logger.warning(f\"\\n{_unofficial_template_warning}\\n\")\n    logger.info(\n        \"Enter a custom project URL, or leave blank and press enter to go back to official template selection.\\n\"\n        \"Note that you can use gh: as a shorthand for github.com and likewise gl: for gitlab.com\\n\"\n        \"Valid examples:\\n\"\n        \" - gh:copier-org/copier\\n\"\n        \" - gl:copier-org/copier\\n\"\n        \" - git@github.com:copier-org/copier.git\\n\"\n        \" - git+https://mywebsiteisagitrepo.example.com/\\n\"\n        \" - /local/path/to/git/repo\\n\"\n        \" - /local/path/to/git/bundle/file.bundle\\n\"\n        \" - ~/path/to/git/repo\\n\"\n        \" - ~/path/to/git/repo.bundle\\n\"\n    )\n    template_url = questionary_extensions.prompt_text(\"Custom template URL:\", validators=[GitRepoValidator()]).strip()\n    if not template_url:\n        # re-prompt if empty response\n        return _get_template_interactive()\n    return TemplateSource(url=template_url)\n\n\ndef _should_attempt_git_init(*, use_git_option: bool | None, project_path: Path) -> bool:\n    if use_git_option is False:\n        return False\n    try:\n        git_rev_parse_result = proc.run([\"git\", \"rev-parse\", \"--show-toplevel\"], cwd=project_path)\n    except FileNotFoundError:\n        logger.warning(\"git command wasn't found on your PATH, can not perform repository initialisation\")\n        return False\n    is_in_git_repo = git_rev_parse_result.exit_code == 0\n    if is_in_git_repo:\n        logger.log(\n            msg=\"Directory is already under git revision control, skipping git setup\",\n            # warning if the user explicitly requested to set up git, info otherwise\n            level=logging.WARNING if use_git_option else logging.INFO,\n        )\n        return False\n\n    return use_git_option or questionary_extensions.prompt_confirm(\n        \"Would you like to initialise a git repository and perform an initial commit?\",\n        default=True,\n    )\n\n\ndef _git_init(project_path: Path, commit_message: str) -> None:\n    def git(*args: str, bad_exit_warn_message: str) -> bool:\n        result = proc.run([\"git\", *args], cwd=project_path)\n        success = result.exit_code == 0\n        if not success:\n            logger.warning(bad_exit_warn_message)\n        return success\n\n    if (\n        git(\"init\", bad_exit_warn_message=\"Failed to initialise git repository\")\n        and git(\"checkout\", \"-b\", \"main\", bad_exit_warn_message=\"Failed to name initial branch\")\n        and git(\"add\", \"--all\", bad_exit_warn_message=\"Failed to add generated project files\")\n        and git(\"commit\", \"-m\", commit_message, bad_exit_warn_message=\"Initial commit failed\")\n    ):\n        logger.info(\"🎉 Performed initial git commit successfully! 🎉\")\n\n\ndef _resolve_workspace_project_path(\n    *, template_source: TemplateSource, project_path: Path, use_workspace: bool = True\n) -> Path:\n    blessed_template = _get_blessed_templates()\n\n    # If its already a Base template, do not modify project path\n    if template_source == blessed_template[TemplateKey.BASE]:\n        return project_path\n\n    cwd = Path.cwd()\n    is_standalone = template_source != blessed_template[TemplateKey.FULLSTACK]\n    config = get_algokit_config(project_dir=cwd)\n\n    # 1. If standalone project (not fullstack) and use_workspace is True, bootstrap algokit-base-template\n    if config is None and is_standalone and use_workspace:\n        _init_base_template(target_path=project_path, is_blessed=template_source in blessed_template.values())\n\n        config = get_algokit_config(project_dir=project_path)\n        if not config:\n            logger.error(\"Failed to instantiate workspace structure for standalone project\")\n            _fail_and_bail()\n\n        sub_projects_path = config.get(\"project\", {}).get(\"projects_root_path\") or \"projects\"\n        new_project_path = cwd / project_path.name / sub_projects_path / project_path.name\n        new_project_path.mkdir(parents=True, exist_ok=True)\n\n        logger.debug(f\"Workspace structure is ready! The project is to be placed under {new_project_path}\")\n        return new_project_path\n\n    # 2. If its a standalone project being instantiated inside an existing workspace project and use_workspace is True\n    # then place the new project inside expected projects folder defined by workspace toml\n    if (\n        config\n        and config.get(\"project\", {}).get(\"type\") == ProjectType.WORKSPACE.value\n        and is_standalone\n        and use_workspace\n    ):\n        sub_projects_path = config.get(\"project\", {}).get(\"projects_root_path\") or \"projects\"\n        projects_root = cwd / sub_projects_path\n        logger.debug(f\"Workspace structure detected! Moving the project to be instantiated into {projects_root}\")\n        return projects_root / project_path.name\n\n    return project_path\n\n\ndef _init_base_template(*, target_path: Path, is_blessed: bool) -> None:\n    \"\"\"\n    Instantiate the base template for a standalone project.\n    Sets up the common workspace structure for standalone projects.\n\n    Args:\n        target_path: The path to the project directory.\n        is_blessed: Whether the template is a blessed template.\n    \"\"\"\n\n    # Instantiate the base template\n    blessed_templates = _get_blessed_templates()\n    base_template = blessed_templates[TemplateKey.BASE]\n    base_template_answers = {\n        \"use_default_readme\": \"yes\",\n        \"project_name\": target_path.name,\n        \"projects_root_path\": \"projects\",\n        \"include_github_workflow_template\": not is_blessed,\n    }\n    from copier._main import Worker\n\n    with Worker(\n        src_path=base_template.url,\n        dst_path=target_path,\n        data=base_template_answers,\n        quiet=True,\n        vcs_ref=base_template.branch or base_template.commit,\n        unsafe=True,\n    ) as copier_worker:\n        copier_worker.run_copy()\n"
  },
  {
    "path": "src/algokit/cli/init/example.py",
    "content": "import shutil\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.cli.tui.init.example_selector import ExampleSelector\nfrom algokit.core.init import (\n    ALGOKIT_TEMPLATES_DIR,\n    ALGOKIT_USER_DIR,\n    _load_algokit_examples,\n    _manage_templates_repository,\n    _open_ide,\n)\n\n\n@click.command(\"example\")\n@click.argument(\"example_id\", required=False)\n@click.option(\"-l\", \"--list\", \"list_examples\", is_flag=True, help=\"List all available examples\")\ndef example_command(example_id: str, *, list_examples: bool) -> None:\n    \"\"\"Initialize a new project from an example template.\n\n    Allows you to quickly create a new project by copying one of the official AlgoKit example templates.\n    If no example ID is provided, launches an interactive selector to choose from available examples.\n    The example will be copied to a new directory in your current location.\"\"\"\n\n    _manage_templates_repository()\n\n    examples_config_path = Path.home() / ALGOKIT_USER_DIR / ALGOKIT_TEMPLATES_DIR / \"examples\" / \"examples.yml\"\n\n    if list_examples:\n        examples = _load_algokit_examples(str(examples_config_path.absolute()))\n        click.echo(\"Available examples:\")\n        for example in examples:\n            click.echo(f\"  {example['id']} - {example.get('name', '')}\")\n        return\n\n    if not example_id:\n        app = ExampleSelector()\n        app.run()\n        example_id = app.user_answers.get(\"example_id\", \"\")\n        if not example_id:\n            return\n\n    source_dir = Path.home() / ALGOKIT_USER_DIR / ALGOKIT_TEMPLATES_DIR / \"examples\" / example_id\n    target_dir = Path.cwd() / example_id\n\n    if not source_dir.exists():\n        examples = _load_algokit_examples(str(examples_config_path.absolute()))\n        click.echo(f\"Example {example_id} not found\")\n        if example_id not in [example[\"id\"] for example in examples]:\n            click.echo(\"Available example ids:\")\n            for example in examples:\n                click.echo(f\"  {example['id']}\")\n        return\n\n    shutil.copytree(source_dir, target_dir)\n    click.echo(f\"Created example {example_id}\")\n    _open_ide(target_dir)\n"
  },
  {
    "path": "src/algokit/cli/init/helpers.py",
    "content": "from dataclasses import dataclass\nfrom enum import Enum\n\nimport click\n\nfrom algokit.core.init import is_valid_project_dir_name\n\n\nclass TemplateKey(str, Enum):\n    \"\"\"\n    For templates included in wizard v2 by default\n    \"\"\"\n\n    BASE = \"base\"\n    PYTHON = \"python\"\n    TYPESCRIPT = \"typescript\"\n    TEALSCRIPT = \"tealscript\"\n    FULLSTACK = \"fullstack\"\n    REACT = \"react\"\n\n\n@dataclass(kw_only=True)\nclass TemplateSource:\n    url: str\n    commit: str | None = None\n    \"\"\"when adding a blessed template that is verified but not controlled by Algorand,\n    ensure a specific commit is used\"\"\"\n    branch: str | None = None\n    answers: list[tuple[str, str]] | None = None\n\n    def __str__(self) -> str:\n        if self.commit:\n            return \"@\".join([self.url, self.commit])\n        return self.url\n\n\n@dataclass(kw_only=True)\nclass BlessedTemplateSource(TemplateSource):\n    description: str\n\n    def __eq__(self, other: object) -> bool:\n        if not isinstance(other, BlessedTemplateSource):\n            return NotImplemented\n        return self.description == other.description and self.url == other.url\n\n    def __hash__(self) -> int:\n        return hash((self.description, self.url))\n\n\n# Please note, the main reason why below is a function is due to the need to patch the values in unit/approval tests\ndef _get_blessed_templates() -> dict[TemplateKey, BlessedTemplateSource]:\n    return {\n        TemplateKey.TEALSCRIPT: BlessedTemplateSource(\n            url=\"gh:algorand-devrel/tealscript-algokit-template\",\n            description=\"Official starter template for TEALScript applications.\",\n        ),\n        TemplateKey.TYPESCRIPT: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-typescript-template\",\n            description=\"Official starter template for Algorand TypeScript (Beta) applications\",\n        ),\n        TemplateKey.PYTHON: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-python-template\",\n            description=\"Official starter template for Algorand Python applications\",\n        ),\n        TemplateKey.REACT: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-react-frontend-template\",\n            description=\"Official template for React frontend applications (smart contracts not included).\",\n        ),\n        TemplateKey.FULLSTACK: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-fullstack-template\",\n            description=\"Official template for starter or production fullstack applications.\",\n        ),\n        TemplateKey.BASE: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-base-template\",\n            description=\"Official base template for enforcing workspace structure for standalone AlgoKit projects.\",\n        ),\n    }\n\n\ndef _validate_dir_name(context: click.Context, param: click.Parameter, value: str | None) -> str | None:\n    if value is not None and not is_valid_project_dir_name(value):\n        raise click.BadParameter(\n            \"Invalid directory name. Ensure it's a mix of letters, numbers, dashes, \"\n            \"periods, and/or underscores, and not already used.\",\n            context,\n            param,\n        )\n    return value\n"
  },
  {
    "path": "src/algokit/cli/localnet.py",
    "content": "import logging\nimport os\nfrom pathlib import Path\n\nimport click\nimport questionary\n\nfrom algokit.cli.codespace import codespace_command\nfrom algokit.cli.explore import explore_command\nfrom algokit.cli.goal import goal_command\nfrom algokit.core import proc\nfrom algokit.core.config_commands.container_engine import get_container_engine, save_container_engine\nfrom algokit.core.sandbox import (\n    COMPOSE_VERSION_COMMAND,\n    SANDBOX_BASE_NAME,\n    ComposeFileStatus,\n    ComposeSandbox,\n    ContainerEngine,\n    fetch_algod_status_data,\n    fetch_indexer_status_data,\n    get_min_compose_version,\n)\nfrom algokit.core.utils import extract_version_triple, is_minimum_version\n\nlogger = logging.getLogger(__name__)\n\ncheck_option = click.option(\n    \"check\",\n    \"--check\",\n    is_flag=True,\n    default=False,\n    help=\"Force check the Docker registry for new LocalNet image versions, ignoring the version check cache.\",\n)\n\n\n@click.group(\"localnet\", short_help=\"Manage the AlgoKit LocalNet.\")\n@click.pass_context\ndef localnet_group(ctx: click.Context) -> None:\n    if (ctx.invoked_subcommand and \"codespace\" in ctx.invoked_subcommand) or not ctx.invoked_subcommand:\n        return\n\n    try:\n        compose_version_result = proc.run(COMPOSE_VERSION_COMMAND)\n    except OSError as ex:\n        # an IOError (such as PermissionError or FileNotFoundError) will only occur if \"docker\"\n        # isn't an executable in the user's path, which means docker isn't installed\n        raise click.ClickException(\n            \"Container engine not found; please install Docker or Podman and add to path.\"\n        ) from ex\n    if compose_version_result.exit_code != 0:\n        raise click.ClickException(\n            \"Container engine compose not found; please install Docker Compose or Podman Compose and add to path.\"\n        )\n\n    compose_minimum_version = get_min_compose_version()\n    try:\n        compose_version_str = extract_version_triple(compose_version_result.output)\n        compose_version_ok = is_minimum_version(compose_version_str, compose_minimum_version)\n    except Exception:\n        logger.warning(\n            \"Unable to extract compose version from output: \\n\"\n            + compose_version_result.output\n            + f\"\\nPlease ensure a minimum of compose v{compose_minimum_version} is used\",\n            exc_info=True,\n        )\n    else:\n        if not compose_version_ok:\n            raise click.ClickException(\n                f\"Minimum compose version supported: v{compose_minimum_version}, \"\n                f\"installed = v{compose_version_str}\\n\"\n                \"Please update your compose install\"\n            )\n\n    if ctx.invoked_subcommand and ctx.invoked_subcommand == \"config\":\n        return\n\n    proc.run(\n        [get_container_engine(), \"version\"],\n        bad_return_code_error_message=\"Container engine isn't running; please start it.\",\n    )\n\n\n@localnet_group.command(\"config\", short_help=\"Configure the container engine for AlgoKit LocalNet.\")\n@click.argument(\"engine\", required=False, type=click.Choice([\"docker\", \"podman\"]))\n@click.option(\n    \"--force\",\n    \"-f\",\n    is_flag=True,\n    required=False,\n    default=False,\n    type=click.BOOL,\n    help=(\"Skip confirmation prompts. Defaults to 'yes' to all prompts.\"),\n)\ndef config_command(*, engine: str | None, force: bool) -> None:\n    \"\"\"Set the default container engine for use by AlgoKit CLI to run LocalNet images.\"\"\"\n    if engine is None:\n        current_engine = get_container_engine()\n        choices = [\n            f\"Docker {'(Active)' if current_engine == ContainerEngine.DOCKER else ''}\".strip(),\n            f\"Podman {'(Active)' if current_engine == ContainerEngine.PODMAN else ''}\".strip(),\n        ]\n        engine = questionary.select(\"Which container engine do you prefer?\", choices=choices).ask()\n        if engine is None:\n            raise click.ClickException(\"No valid container engine selected. Aborting...\")\n        engine = engine.split()[0].lower()\n\n    sandbox = ComposeSandbox.from_environment()\n    has_active_instance = sandbox is not None and (\n        force\n        or click.confirm(\n            f\"Detected active localnet instance, would you like to restart it with '{engine}'?\",\n            default=True,\n        )\n    )\n    if sandbox and has_active_instance:\n        sandbox.down()\n        save_container_engine(engine)\n        sandbox.write_compose_file()\n        sandbox.up()\n    else:\n        save_container_engine(engine)\n\n    logger.info(f\"Container engine set to `{engine}`\")\n\n\nlocalnet_group.add_command(config_command)\n\n\n@localnet_group.command(\"start\", short_help=\"Start the AlgoKit LocalNet.\")\n@click.option(\n    \"name\",\n    \"--name\",\n    \"-n\",\n    default=None,\n    help=\"Specify a name for a custom LocalNet instance. \"\n    \"AlgoKit will not manage the configuration of named LocalNet instances, \"\n    f\"allowing developers to configure it in any way they need. Defaults to '{SANDBOX_BASE_NAME}'.\",\n)\n@click.option(\n    \"--config-dir\",\n    \"-P\",\n    \"config_path\",\n    type=click.Path(exists=True, readable=True, file_okay=False, resolve_path=True, path_type=Path),\n    default=lambda: os.environ.get(\"ALGOKIT_LOCALNET_CONFIG_DIR\", None),\n    required=False,\n    help=(\n        \"Specify the custom localnet configuration directory. Defaults to '~/.config' on UNIX and \"\n        \"'C:\\\\\\\\Users\\\\\\\\USERNAME\\\\\\\\AppData\\\\\\\\Roaming' on Windows.\"\n    ),\n)\n@click.option(\n    \"--dev/--no-dev\",\n    \"-d\",\n    \"algod_dev_mode\",\n    is_flag=True,\n    required=False,\n    default=True,\n    type=click.BOOL,\n    help=(\"Control whether to launch 'algod' in developer mode or not. Defaults to 'yes'.\"),\n)\n@click.option(\n    \"force\",\n    \"--force\",\n    is_flag=True,\n    default=False,\n    help=\"Ignore the prompt to stop the LocalNet if it's already running.\",\n)\n@check_option\ndef start_localnet(\n    *, name: str | None, config_path: Path | None, algod_dev_mode: bool, force: bool, check: bool\n) -> None:\n    sandbox = ComposeSandbox.from_environment()\n    full_name = f\"{SANDBOX_BASE_NAME}_{name}\" if name is not None else SANDBOX_BASE_NAME\n    if sandbox is not None and full_name != sandbox.name:\n        logger.debug(\"LocalNet is already running.\")\n        if click.confirm(\"This will stop any running AlgoKit LocalNet instance. Are you sure?\", default=True):\n            sandbox.stop()\n        else:\n            raise click.ClickException(\"LocalNet is already running. Please stop it first\")\n    sandbox = ComposeSandbox(SANDBOX_BASE_NAME, config_path) if name is None else ComposeSandbox(name, config_path)\n    compose_file_status = sandbox.compose_file_status()\n    sandbox.check_docker_compose_for_new_image_versions(force=check)\n    if compose_file_status is ComposeFileStatus.MISSING:\n        logger.debug(\"LocalNet compose file does not exist yet; writing it out for the first time\")\n        sandbox.write_compose_file()\n        if name is not None:\n            logger.info(\n                f\"The named LocalNet configuration has been created in {sandbox.directory}. \\n\"\n                f\"You can edit the configuration by changing those files. \"\n                f\"Running `algokit localnet reset` will ensure the configuration is applied\"\n            )\n    elif compose_file_status is ComposeFileStatus.UP_TO_DATE:\n        logger.debug(\"LocalNet compose file does not require updating\")\n    elif compose_file_status is ComposeFileStatus.OUT_OF_DATE and name is None:\n        logger.warning(\"LocalNet definition is out of date; please run `algokit localnet reset`\")\n    if name is not None:\n        logger.info(\n            \"A named LocalNet is running, update checks are disabled. If you wish to synchronize with the latest \"\n            \"version, run `algokit localnet reset --update`\"\n        )\n    if sandbox.is_algod_dev_mode() != algod_dev_mode:\n        sandbox.set_algod_dev_mode(dev_mode=algod_dev_mode)\n        logger.info(f\"Refreshed 'DevMode' flag to '{algod_dev_mode}'\")\n        if not force and click.confirm(\n            f\"Would you like to restart 'LocalNet' to apply 'DevMode' flag set to '{algod_dev_mode}'? \"\n            \"Otherwise, the next `algokit localnet reset` will restart with the new flag\",\n            default=True,\n        ):\n            sandbox.down()\n            sandbox.up()\n    else:\n        sandbox.up()\n\n\n@localnet_group.command(\"stop\", short_help=\"Stop the AlgoKit LocalNet.\")\ndef stop_localnet() -> None:\n    sandbox = ComposeSandbox.from_environment()\n    if sandbox is not None:\n        compose_file_status = sandbox.compose_file_status()\n        if compose_file_status is not ComposeFileStatus.MISSING:\n            sandbox.stop()\n    else:\n        logger.debug(\"LocalNet is not running; run `algokit localnet start` to start the AlgoKit LocalNet\")\n\n\n@localnet_group.command(\"reset\", short_help=\"Reset the AlgoKit LocalNet.\")\n@click.option(\n    \"--update/--no-update\",\n    default=False,\n    help=\"Enable or disable updating to the latest available LocalNet version, default: don't update\",\n)\n@click.option(\n    \"--config-dir\",\n    \"-P\",\n    \"config_path\",\n    type=click.Path(exists=True, readable=True, file_okay=False, resolve_path=True, path_type=Path),\n    default=lambda: os.environ.get(\"ALGOKIT_LOCALNET_CONFIG_DIR\", None),\n    required=False,\n    help=\"Specify the custom localnet configuration directory.\",\n)\n@check_option\ndef reset_localnet(*, update: bool, config_path: Path | None, check: bool) -> None:\n    sandbox = ComposeSandbox.from_environment()\n    if sandbox is None:\n        sandbox = ComposeSandbox(config_path=config_path)\n    compose_file_status = sandbox.compose_file_status()\n    if compose_file_status is ComposeFileStatus.MISSING:\n        logger.debug(\"Existing LocalNet not found; creating from scratch...\")\n        sandbox.write_compose_file()\n    elif sandbox.name == SANDBOX_BASE_NAME:\n        sandbox.down()\n        if compose_file_status is not ComposeFileStatus.UP_TO_DATE:\n            logger.info(\"Syncing LocalNet configuration\")\n            sandbox.write_compose_file()\n        if update:\n            sandbox.pull()\n        else:\n            sandbox.check_docker_compose_for_new_image_versions(force=check)\n    elif update:\n        if click.confirm(\n            f\"A named LocalNet is running, are you sure you want to reset the LocalNet configuration \"\n            f\"in {sandbox.directory}?\\nThis will stop the running LocalNet and overwrite any changes \"\n            \"you've made to the configuration\",\n            default=True,\n        ):\n            sandbox.down()\n            sandbox.write_compose_file()\n            sandbox.pull()\n        else:\n            raise click.ClickException(\"LocalNet configuration has not been reset\")\n    else:\n        sandbox.down()\n    sandbox.up()\n\n\nSERVICE_NAMES = (\"algod\", \"conduit\", \"indexer-db\", \"indexer\")\n\n\n@localnet_group.command(\"status\", short_help=\"Check the status of the AlgoKit LocalNet.\")\n@check_option\ndef localnet_status(*, check: bool) -> None:\n    sandbox = ComposeSandbox.from_environment()\n    if sandbox is None:\n        sandbox = ComposeSandbox()\n\n    sandbox.check_docker_compose_for_new_image_versions(force=check)\n\n    logger.info(\"# container engine\")\n    logger.info(\n        \"Name: \" + click.style(get_container_engine(), bold=True) + \" (change with `algokit config container-engine`)\"\n    )\n\n    ps = sandbox.ps()\n    ps_by_name = {stats[\"Service\"]: stats for stats in ps}\n    # if any of the required containers does not exist (ie it's not just stopped but hasn't even been created),\n    # then they will be missing from the output dictionary\n    if set(SERVICE_NAMES) != ps_by_name.keys():\n        raise click.ClickException(\"LocalNet has not been initialized yet, please run 'algokit localnet start'\")\n    # initialise output dict by setting status\n    output_by_name = {\n        name: {\"Status\": \"Running\" if ps_by_name[name][\"State\"] == \"running\" else \"Not running\"}\n        for name in SERVICE_NAMES\n    }\n    # fill out remaining output_by_name[\"algod\"] values\n    if output_by_name[\"algod\"][\"Status\"] == \"Running\":\n        output_by_name[\"algod\"].update(fetch_algod_status_data(ps_by_name[\"algod\"]))\n    # fill out remaining output_by_name[\"indexer\"] values\n    if output_by_name[\"indexer\"][\"Status\"] == \"Running\":\n        output_by_name[\"indexer\"].update(fetch_indexer_status_data(ps_by_name[\"indexer\"]))\n\n    # Print the status details\n    for service_name, service_info in output_by_name.items():\n        logger.info(click.style(f\"# {service_name} status\", bold=True))\n        for key, value in service_info.items():\n            logger.info(click.style(f\"{key}:\", bold=True) + f\" {value}\")\n\n    # return non-zero if any container is not running\n    if not all(item[\"Status\"] == \"Running\" for item in output_by_name.values()):\n        raise click.ClickException(\n            \"At least one container isn't running; execute `algokit localnet start` to start the LocalNet\"\n        )\n\n\n@localnet_group.command(\n    \"console\",\n    short_help=\"Run the Algorand goal CLI against the AlgoKit LocalNet via a Bash console\"\n    \" so you can execute multiple goal commands and/or interact with a filesystem.\",\n)\n@click.pass_context\ndef localnet_console(context: click.Context) -> None:\n    context.invoke(goal_command, console=True)\n\n\n@localnet_group.command(\"explore\", short_help=\"Explore the AlgoKit LocalNet using lora.\")\n@click.pass_context\ndef localnet_explore(context: click.Context) -> None:\n    context.invoke(explore_command)\n\n\n@localnet_group.command(\n    \"logs\",\n    short_help=\"See the output of the Docker containers.\",\n)\n@click.option(\n    \"--follow/-f\",\n    is_flag=True,\n    help=\"Follow log output.\",\n    default=False,\n)\n@click.option(\n    \"--tail\",\n    default=\"all\",\n    help=\"Number of lines to show from the end of the logs for each container.\",\n    show_default=True,\n)\n@click.pass_context\ndef localnet_logs(ctx: click.Context, *, follow: bool, tail: str) -> None:\n    sandbox = ComposeSandbox()\n    sandbox.logs(follow=follow, no_color=ctx.color is False, tail=tail)\n\n\nlocalnet_group.add_command(codespace_command)\n"
  },
  {
    "path": "src/algokit/cli/project/__init__.py",
    "content": "import logging\nimport re\n\nimport click\n\nfrom algokit.cli.project.bootstrap import bootstrap_group\nfrom algokit.cli.project.deploy import deploy_command\nfrom algokit.cli.project.link import link_command\nfrom algokit.cli.project.list import list_command\nfrom algokit.cli.project.run import run_group\nfrom algokit.core import proc\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(\n    \"project\",\n)\ndef project_group() -> None:\n    \"\"\"Provides a suite of commands for managing your AlgoKit project.\n    This includes initializing project dependencies, deploying smart contracts,\n    and executing predefined or custom commands within your project environment.\"\"\"\n    try:\n        poetry_version_result = proc.run(\n            [\"poetry\", \"--version\"],\n        )\n    except Exception:\n        return\n\n    if re.search(r\"1\\.\\d+\\.\\d+\", poetry_version_result.output):\n        logger.warning(\n            \"You are using Poetry 1.x, which is deprecated. \"\n            \"Please upgrade to Poetry 2.x for better support and features.\"\n        )\n\n\nproject_group.add_command(deploy_command)\nproject_group.add_command(bootstrap_group)\nproject_group.add_command(run_group)\nproject_group.add_command(list_command)\nproject_group.add_command(link_command)\n"
  },
  {
    "path": "src/algokit/cli/project/bootstrap.py",
    "content": "import logging\nimport os\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.project import ProjectType\nfrom algokit.core.project.bootstrap import (\n    bootstrap_any_including_subdirs,\n    bootstrap_env,\n    bootstrap_npm,\n    bootstrap_pnpm,\n    bootstrap_poetry,\n    bootstrap_uv,\n    project_minimum_algokit_version_check,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@click.option(\n    \"force\", \"--force\", is_flag=True, default=False, help=\"Continue even if minimum AlgoKit version is not met\"\n)\n@click.group(\n    \"bootstrap\", short_help=\"Bootstrap local dependencies in an AlgoKit project; run from project root directory.\"\n)\n@click.pass_context\ndef bootstrap_group(ctx: click.Context, *, force: bool) -> None:\n    \"\"\"\n    Expedited initial setup for any developer by installing and configuring dependencies and other\n    key development environment setup activities.\n    \"\"\"\n\n    if ctx.parent and ctx.parent.command.name == \"algokit\":\n        click.secho(\n            \"WARNING: The 'bootstrap' command group is scheduled for deprecation in v2.x release. \"\n            \"Please migrate to using 'algokit project bootstrap' instead.\",\n            fg=\"yellow\",\n        )\n    project_minimum_algokit_version_check(Path.cwd(), ignore_version_check_fail=force)\n\n\n@bootstrap_group.command(\n    \"all\", short_help=\"Runs all bootstrap sub-commands in the current directory and immediate sub directories.\"\n)\n@click.option(\n    \"--interactive/--non-interactive\",\n    \"--no-ci/--ci\",  # this aliases --non-interactive to --ci and --interactive to --no-ci\n    default=lambda: \"CI\" not in os.environ,\n    help=\"Enable/disable interactive prompts. If the CI environment variable is set, defaults to non-interactive\",\n)\n@click.option(\n    \"project_names\",\n    \"--project-name\",\n    \"-p\",\n    multiple=True,\n    help=\"(Optional) Projects to execute the command on. Defaults to all projects found in the current directory.\",\n    nargs=1,\n    default=[],\n    metavar=\"<value>\",\n    required=False,\n)\n@click.option(\n    \"project_type\",\n    \"--type\",\n    \"-t\",\n    type=click.Choice([ProjectType.FRONTEND, ProjectType.CONTRACT, ProjectType.BACKEND]),\n    required=False,\n    default=None,\n    help=\"(Optional) Limit execution to specific project types if executing from workspace.\",\n)\ndef bootstrap_all(*, interactive: bool, project_names: tuple[str], project_type: str | None) -> None:\n    cwd = Path.cwd()\n    bootstrap_any_including_subdirs(\n        cwd, ci_mode=not interactive, project_names=list(project_names), project_type=project_type\n    )\n    logger.info(f\"Finished bootstrapping {cwd}\")\n\n\n@bootstrap_group.command(\n    \"env\",\n    short_help=\"Copies .env.template file to .env in the current working directory \"\n    \"and prompts for any unspecified values.\",\n)\n@click.option(\n    \"--interactive/--non-interactive\",\n    \" /--ci\",  # this aliases --non-interactive to --ci\n    default=lambda: \"CI\" not in os.environ,\n    help=\"Enable/disable interactive prompts. If the CI environment variable is set, defaults to non-interactive\",\n)\ndef env(*, interactive: bool) -> None:\n    bootstrap_env(Path.cwd(), ci_mode=not interactive)\n\n\n@bootstrap_group.command(\n    \"poetry\",\n    short_help=\"Installs Python Poetry (if not present) and runs `poetry install` in the \"\n    \"current working directory to install Python dependencies.\",\n)\ndef poetry() -> None:\n    bootstrap_poetry(Path.cwd())\n\n\n@bootstrap_group.command(\n    \"uv\",\n    short_help=\"Installs UV (if not present) and runs `uv sync` in the \"\n    \"current working directory to install Python dependencies.\",\n)\ndef uv() -> None:\n    bootstrap_uv(Path.cwd())\n\n\n@bootstrap_group.command(\n    \"npm\", short_help=\"Runs `npm install` in the current working directory to install Node.js dependencies.\"\n)\n@click.option(\n    \"--ci/--no-ci\",\n    is_flag=True,\n    default=lambda: \"CI\" in os.environ,\n    help=\"Run 'npm ci' instead of 'npm install' in CI mode (clean install).\",\n)\ndef npm(*, ci: bool) -> None:\n    bootstrap_npm(Path.cwd(), ci_mode=ci)\n\n\n@bootstrap_group.command(\n    \"pnpm\", short_help=\"Runs `pnpm install` in the current working directory to install Node.js dependencies.\"\n)\n@click.option(\n    \"--ci/--no-ci\",\n    is_flag=True,\n    default=lambda: \"CI\" in os.environ,\n    help=\"Run 'pnpm install --frozen-lockfile' instead of 'pnpm install' in \\\n    CI mode (clean install with frozen lockfile).\",\n)\ndef pnpm(*, ci: bool) -> None:\n    bootstrap_pnpm(Path.cwd(), ci_mode=ci)\n"
  },
  {
    "path": "src/algokit/cli/project/deploy.py",
    "content": "import logging\nimport os\nimport typing as t\nfrom pathlib import Path\n\nimport click\nfrom algosdk.mnemonic import from_private_key\n\nfrom algokit.cli.common.utils import MutuallyExclusiveOption, sanitize_extra_args\nfrom algokit.core import proc\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config\nfrom algokit.core.project import ProjectType, get_project_configs\nfrom algokit.core.project.deploy import load_deploy_config, load_deploy_env_files\nfrom algokit.core.tasks.wallet import get_alias\nfrom algokit.core.utils import resolve_command_path, split_command_string\n\nlogger = logging.getLogger(__name__)\n\n\ndef _ensure_aliases(\n    config_env: dict[str, str],\n    deployer_alias: str | None = None,\n    dispenser_alias: str | None = None,\n) -> None:\n    \"\"\"\n    Ensures that the required aliases for the deployer and dispenser are provided and valid and\n    injects their mnemonics into env vars config.\n\n    Args:\n        config_env (dict[str, str]): A dictionary containing the environment variables.\n        deployer_alias (str | None, optional): The alias for the deployer. Defaults to None.\n        dispenser_alias (str | None, optional): The alias for the dispenser. Defaults to None.\n\n    Raises:\n        click.ClickException: If the alias or private key is missing.\n\n    Returns:\n        None\n    \"\"\"\n\n    for key, alias in [(\"DEPLOYER_MNEMONIC\", deployer_alias), (\"DISPENSER_MNEMONIC\", dispenser_alias)]:\n        if not alias:\n            continue\n\n        alias_data = get_alias(alias)\n        if not alias_data:\n            raise click.ClickException(f\"Error: missing {alias} alias\")\n        if not alias_data.private_key:\n            raise click.ClickException(f\"Error: missing private key for {alias} alias\")\n        config_env[key] = from_private_key(alias_data.private_key)  # type: ignore[no-untyped-call]\n        logger.debug(f\"Loaded {alias} alias mnemonic as {key} environment variable\")\n\n\ndef _ensure_environment_secrets(\n    config_env: dict[str, str],\n    environment_secrets: list[str],\n    *,\n    skip_mnemonics_prompts: bool,\n) -> None:\n    \"\"\"\n    Ensures that the required environment variables are present in the `config_env` dictionary.\n    If any of the environment variables are missing, it prompts the user to enter the missing variable.\n\n    Args:\n        config_env (dict[str, str]): A dictionary containing the current environment variables.\n        environment_secrets (list[str]): A list of strings representing the required environment variables.\n        skip_mnemonics_prompts (bool): A boolean indicating whether to skip prompting the user for missing variables.\n\n    Raises:\n        click.ClickException: If a required environment variable is missing and `skip_mnemonics_prompts` is True.\n\n    Returns:\n        None. The function modifies the `config_env` dictionary in-place.\n    \"\"\"\n\n    for key in environment_secrets:\n        if not config_env.get(key):\n            if skip_mnemonics_prompts:\n                raise click.ClickException(f\"Error: missing {key} environment variable\")\n            config_env[key] = click.prompt(key, hide_input=True)\n\n\ndef _execute_deploy_command(  # noqa: PLR0913\n    *,\n    path: Path,\n    environment_name: str | None,\n    command: list[str] | None,\n    interactive: bool,\n    deployer_alias: str | None,\n    dispenser_alias: str | None,\n    extra_args: tuple[str, ...],\n) -> None:\n    logger.debug(f\"Deploying from project directory: {path}\")\n    logger.debug(\"Loading deploy command from project config\")\n    config = load_deploy_config(name=environment_name, project_dir=path)\n    if command:\n        config.command = command\n    elif not config.command:\n        if environment_name is None:\n            msg = f\"No generic deploy command specified in '{ALGOKIT_CONFIG}' file.\"\n        else:\n            msg = (\n                f\"Deploy command for '{environment_name}' is not specified in '{ALGOKIT_CONFIG}' file, \"\n                \"and no generic command available.\"\n            )\n        raise click.ClickException(msg)\n    resolved_command = resolve_command_path(config.command + list(extra_args))\n    logger.info(f\"Using deploy command: {' '.join(resolved_command)}\")\n    logger.info(\"Loading deployment environment variables...\")\n    config_dotenv = load_deploy_env_files(environment_name, path)\n    # environment variables take precedence over those in .env* files\n    config_env = {**{k: v for k, v in config_dotenv.items() if v is not None}, **os.environ}\n    _ensure_aliases(config_env, deployer_alias=deployer_alias, dispenser_alias=dispenser_alias)\n\n    if config.environment_secrets:\n        _ensure_environment_secrets(\n            config_env,\n            config.environment_secrets,\n            skip_mnemonics_prompts=not interactive,\n        )\n    logger.info(\"Deploying smart contracts from AlgoKit compliant repository 🚀\")\n    try:\n        result = proc.run(resolved_command, cwd=path, env=config_env, stdout_log_level=logging.INFO)\n    except FileNotFoundError as ex:\n        raise click.ClickException(f\"Failed to execute deploy command, '{resolved_command[0]}' wasn't found\") from ex\n    except PermissionError as ex:\n        raise click.ClickException(\n            f\"Failed to execute deploy command '{resolved_command[0]}', permission denied\"\n        ) from ex\n    else:\n        if result.exit_code != 0:\n            raise click.ClickException(f\"Deployment command exited with error code = {result.exit_code}\")\n\n\nclass _CommandParamType(click.types.StringParamType):\n    name = \"command\"\n\n    def convert(\n        self,\n        value: t.Any,  # noqa: ANN401\n        param: click.Parameter | None,\n        ctx: click.Context | None,\n    ) -> list[str]:\n        str_value = super().convert(value=value, param=param, ctx=ctx)\n        try:\n            return split_command_string(str_value)\n        except ValueError as ex:\n            logger.debug(f\"Failed to parse command string: {str_value}\", exc_info=True)\n            raise click.BadParameter(str(ex), param=param, ctx=ctx) from ex\n\n\nclass _DeployCommand(click.Command):\n    def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:\n        # Join all args into a single string\n        full_command = \" \".join(args)\n\n        try:\n            separator_index = full_command.find(\"-- \")\n            if separator_index == -1:\n                raise ValueError(\"No separator found\")\n            main_args = args[:separator_index]\n            extra_args = args[separator_index + 1 :]\n        except Exception:\n            main_args = args\n            extra_args = []\n\n        # Ensure we have at least one argument for environment_name if extra_args exist\n        if extra_args and len(main_args) == 0:\n            main_args.insert(0, \"\")\n\n        # Reconstruct args list\n        args = main_args + ([\"--\"] if extra_args else []) + extra_args\n\n        return super().parse_args(ctx, args)\n\n\n@click.command(\n    \"deploy\",\n    context_settings={\"ignore_unknown_options\": True},\n    cls=_DeployCommand,\n)\n@click.argument(\n    \"environment_name\",\n    default=None,\n    required=False,\n    callback=lambda _, __, value: None if value == \"\" else value,\n)\n@click.option(\n    \"--command\",\n    \"-C\",\n    \"-c\",\n    type=_CommandParamType(),\n    default=None,\n    help=(\"Custom deploy command. If not provided, will load the deploy command from .algokit.toml file.\"),\n    required=False,\n)\n@click.option(\n    \"--interactive/--non-interactive\",\n    \" /--ci\",  # this aliases --non-interactive to --ci\n    default=lambda: \"CI\" not in os.environ,\n    help=(\n        \"Enable/disable interactive prompts. Defaults to non-interactive if the CI \"\n        \"environment variable is set. Interactive MainNet deployments prompt for confirmation.\"\n    ),\n)\n@click.option(\n    \"--path\",\n    \"-P\",\n    type=click.Path(exists=True, readable=True, file_okay=False, resolve_path=True, path_type=Path),\n    default=\".\",\n    help=\"Specify the project directory. If not provided, current working directory will be used.\",\n)\n@click.option(\n    \"--deployer\",\n    \"deployer_alias\",\n    type=click.STRING,\n    required=False,\n    help=(\n        \"(Optional) Alias of the deployer account. Otherwise, will prompt the deployer mnemonic \"\n        \"if specified in .algokit.toml file.\"\n    ),\n)\n@click.option(\n    \"--dispenser\",\n    \"dispenser_alias\",\n    type=click.STRING,\n    required=False,\n    help=(\n        \"(Optional) Alias of the dispenser account. Otherwise, will prompt the dispenser mnemonic \"\n        \"if specified in .algokit.toml file.\"\n    ),\n)\n@click.option(\n    \"--project-name\",\n    \"-p\",\n    \"project_names\",\n    multiple=True,\n    help=\"(Optional) Projects to execute the command on. Defaults to all projects found in the current directory.\",\n    nargs=1,\n    default=[],\n    metavar=\"<value>\",\n    required=False,\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\n        \"command\",\n    ],\n)\n@click.argument(\n    \"extra_args\",\n    nargs=-1,\n    required=False,\n)\n@click.pass_context\ndef deploy_command(  # noqa: PLR0913\n    ctx: click.Context,\n    *,\n    environment_name: str | None,\n    command: list[str] | None,\n    interactive: bool,\n    path: Path,\n    deployer_alias: str | None,\n    dispenser_alias: str | None,\n    project_names: tuple[str, ...],\n    extra_args: tuple[str, ...],\n) -> None:\n    \"\"\"Deploy smart contracts from AlgoKit compliant repository.\"\"\"\n    extra_args = sanitize_extra_args(extra_args)\n\n    if ctx.parent and ctx.parent.command.name == \"algokit\":\n        click.secho(\n            \"WARNING: The 'deploy' command is scheduled for deprecation in v2.x release. \"\n            \"Please migrate to using 'algokit project deploy' instead.\",\n            fg=\"yellow\",\n        )\n\n    if interactive and environment_name and environment_name.lower() == \"mainnet\":\n        click.confirm(\n            click.style(\n                \"Warning: Proceed with MainNet deployment?\",\n                fg=\"yellow\",\n            ),\n            default=True,\n            abort=True,\n        )\n\n    config = get_algokit_config() or {}\n    is_workspace = config.get(\"project\", {}).get(\"type\") == ProjectType.WORKSPACE\n    project_name = config.get(\"project\", {}).get(\"name\", None)\n\n    if not is_workspace and project_names:\n        message = (\n            f\"Deploying `{project_name}`...\"\n            if project_name in project_names\n            else \"No project with the specified name found in the current directory or workspace.\"\n        )\n        if project_name in project_names:\n            click.echo(message)\n        else:\n            raise click.ClickException(message)\n\n    if is_workspace:\n        projects = get_project_configs(project_type=ProjectType.CONTRACT, project_names=project_names)\n\n        for project in projects:\n            project_name = project.get(\"project\", {}).get(\"name\", None)\n\n            if not project_name:\n                click.secho(\"WARNING: Skipping an unnamed project...\", fg=\"yellow\")\n                continue\n\n            _execute_deploy_command(\n                path=project.get(\"cwd\", Path.cwd()),\n                environment_name=environment_name,\n                command=None,\n                interactive=interactive,\n                deployer_alias=deployer_alias,\n                dispenser_alias=dispenser_alias,\n                extra_args=extra_args,\n            )\n    else:\n        _execute_deploy_command(\n            path=path,\n            environment_name=environment_name,\n            command=command,\n            interactive=interactive,\n            deployer_alias=deployer_alias,\n            dispenser_alias=dispenser_alias,\n            extra_args=extra_args,\n        )\n"
  },
  {
    "path": "src/algokit/cli/project/link.py",
    "content": "import logging\nimport typing\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport click\nimport questionary\n\nfrom algokit.cli.common.utils import MutuallyExclusiveOption\nfrom algokit.core import questionary_extensions\nfrom algokit.core.conf import get_algokit_config\nfrom algokit.core.project import ProjectType, get_project_configs\nfrom algokit.core.typed_client_generation import AppSpecsNotFoundError, ClientGenerator\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ContractArtifacts:\n    \"\"\"Represents the contract project artifacts.\n\n    Attributes:\n        project_name (str): The name of the project.\n        cwd (Path): The current working directory of the project.\n    \"\"\"\n\n    project_name: str\n    cwd: Path\n\n\ndef _is_frontend(project_data: dict) -> bool:\n    \"\"\"Determines if the project is a frontend project.\n\n    Args:\n        project_data (dict): The project data to evaluate.\n\n    Returns:\n        bool: True if the project is a frontend project, False otherwise.\n    \"\"\"\n    return project_data.get(\"type\") == ProjectType.FRONTEND\n\n\ndef _get_contract_projects() -> list[ContractArtifacts]:\n    \"\"\"Retrieves contract projects configurations.\n\n    Returns:\n        list[ContractArtifacts]: A list of contract project artifacts.\n    \"\"\"\n    contract_configs = []\n    try:\n        project_configs = get_project_configs(project_type=\"contract\")\n        for config in project_configs:\n            project = config.get(\"project\", {})\n            project_type = project.get(\"type\")\n            project_name = project.get(\"name\")\n            project_cwd = config.get(\"cwd\", Path.cwd())\n            contract_artifacts = project.get(\"artifacts\")\n\n            if any([not project_type, not project_name, not project_cwd, not contract_artifacts]):\n                continue\n\n            contract_configs.append(ContractArtifacts(project_name, project_cwd))\n\n        return contract_configs\n    except Exception:\n        return []\n\n\ndef _link_projects(\n    *,\n    frontend_clients_path: Path,\n    contract_project_root: Path,\n    language: str,\n    fail_fast: bool,\n    version: str | None = None,\n) -> None:\n    \"\"\"Links projects by generating client code.\n\n    Args:\n        frontend_clients_path (Path): The path to the frontend clients.\n        contract_project_root (Path): The root path of the contract project.\n        language (str): The programming language of the generated client code.\n        fail_fast (bool): Whether to exit immediately if a client generation process fails.\n        version (str | None): Version to pin the client generator to (Defaults to None).\n    \"\"\"\n    output_path_pattern = f\"{frontend_clients_path}/{{contract_name}}.{'ts' if language == 'typescript' else 'py'}\"\n    generator = ClientGenerator.create_for_language(language, version=version)\n\n    try:\n        generator.generate_all(\n            contract_project_root,\n            output_path_pattern,\n            None,  # no additional args for project link\n            raise_on_path_resolution_failure=fail_fast,\n        )\n    except AppSpecsNotFoundError:\n        click.secho(\n            f\"WARNING: No application.json | *.arc32.json | *.arc56.json files found in {contract_project_root}. \"\n            \"Skipping...\",\n            fg=\"yellow\",\n        )\n\n\ndef _prompt_contract_project() -> ContractArtifacts | None:\n    \"\"\"Prompts the user to select a contract project.\n\n    Returns:\n        ContractArtifacts | None: The selected contract project artifacts or None if no projects are available.\n    \"\"\"\n    contract_projects = _get_contract_projects()\n\n    if not contract_projects:\n        return None\n\n    return typing.cast(\n        \"ContractArtifacts\",\n        questionary_extensions.prompt_select(\n            \"Select contract project to link with\",\n            *[questionary.Choice(title=contract.project_name, value=contract) for contract in contract_projects],\n        ),\n    )\n\n\ndef _select_contract_projects_to_link(\n    *,\n    project_names: typing.Sequence[str] | None = None,\n    link_all: bool = False,\n) -> list[ContractArtifacts]:\n    \"\"\"Selects contract projects to link based on criteria.\n\n    Args:\n        project_names (typing.Sequence[str] | None): Specific project names to link. Defaults to None.\n        link_all (bool): Whether to link all projects. Defaults to False.\n\n    Returns:\n        list[ContractArtifacts]: A list of contract project artifacts to link.\n    \"\"\"\n    if link_all:\n        return _get_contract_projects()\n    elif project_names:\n        return [project for project in _get_contract_projects() if project.project_name in project_names]\n    else:\n        contract_project = _prompt_contract_project()\n        return [contract_project] if contract_project else []\n\n\n@click.command(\"link\")\n@click.option(\n    \"project_names\",\n    \"--project-name\",\n    \"-p\",\n    multiple=True,\n    help=\"Specify contract projects for the command. Defaults to all in the current workspace.\",\n    nargs=1,\n    default=[],\n    metavar=\"<value>\",\n    required=False,\n)\n@click.option(\n    \"--language\",\n    \"-l\",\n    default=\"typescript\",\n    type=click.Choice(ClientGenerator.languages()),\n    help=\"Programming language of the generated client code\",\n)\n@click.option(\n    \"link_all\",\n    \"--all\",\n    \"-a\",\n    help=\"Link all contract projects with the frontend project\",\n    default=False,\n    is_flag=True,\n    type=click.BOOL,\n    required=False,\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\"project_name\"],\n)\n@click.option(\n    \"fail_fast\",\n    \"--fail-fast\",\n    \"-f\",\n    help=\"Exit immediately if at least one client generation process fails\",\n    default=False,\n    is_flag=True,\n    type=click.BOOL,\n    required=False,\n)\n@click.option(\n    \"--version\",\n    \"-v\",\n    \"version\",\n    default=None,\n    help=\"The client generator version to pin to, for example, 1.0.0. \"\n    \"If no version is specified, AlgoKit checks if the client generator is installed and runs the installed version. \"\n    \"If the client generator is not installed, AlgoKit runs the latest version. \"\n    \"If a version is specified, AlgoKit checks if an installed version matches and runs the installed version. \"\n    \"Otherwise, AlgoKit runs the specified version.\",\n)\ndef link_command(\n    *, project_names: tuple[str] | None, language: str, link_all: bool, fail_fast: bool, version: str | None\n) -> None:\n    \"\"\"Automatically invoke 'algokit generate client' on contract projects available in the workspace.\n    Must be invoked from the root of a standalone 'frontend' typed project.\"\"\"\n\n    config = get_algokit_config() or {}\n    project_data = config.get(\"project\", {})\n\n    if not config:\n        click.secho(\"WARNING: No .algokit.toml config found. Skipping...\", fg=\"yellow\")\n        return\n\n    if not _is_frontend(project_data):\n        click.secho(\"WARNING: This command is only available in projects of type `frontend`. Skipping...\", fg=\"yellow\")\n        return\n\n    frontend_artifacts_path = project_data.get(\"artifacts\")\n    if not frontend_artifacts_path:\n        raise click.ClickException(\"No `contract_clients` path specified in .algokit.toml\")\n\n    contract_projects = _select_contract_projects_to_link(\n        project_names=project_names,\n        link_all=link_all,\n    )\n\n    if not contract_projects:\n        click.secho(\n            f\"WARNING: No {' '.join(project_names) if project_names else 'contract project(s)'} found. Skipping...\",\n            fg=\"yellow\",\n        )\n        return\n\n    iteration = 1\n    total = len(contract_projects)\n    for contract_project in contract_projects:\n        _link_projects(\n            frontend_clients_path=Path.cwd() / frontend_artifacts_path,\n            contract_project_root=contract_project.cwd,\n            language=language,\n            fail_fast=fail_fast,\n            version=version,\n        )\n\n        logger.info(f\"{iteration}/{total}: Finished processing {contract_project.project_name}\")\n        iteration += 1\n"
  },
  {
    "path": "src/algokit/cli/project/list.py",
    "content": "import logging\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.conf import get_algokit_config\nfrom algokit.core.project import ProjectType, get_project_configs, get_workspace_project_path\n\nlogger = logging.getLogger(__name__)\n\n\nPROJECT_TYPE_ICONS = {\n    ProjectType.CONTRACT: \"📜\",\n    ProjectType.FRONTEND: \"🖥️\",\n    ProjectType.WORKSPACE: \"📁\",\n    ProjectType.BACKEND: \"⚙️\",\n}\n\n\ndef _is_workspace(workspace_path: Path | None = None) -> bool:\n    config = get_algokit_config(project_dir=get_workspace_project_path(workspace_path)) or {}\n    project = config.get(\"project\", {})\n    return bool(project.get(\"type\", None) == ProjectType.WORKSPACE)\n\n\n@click.command(\"list\")\n@click.argument(\n    \"workspace_path\",\n    type=click.Path(exists=True, resolve_path=True, file_okay=False, dir_okay=True, readable=True, path_type=Path),\n    default=\".\",\n)\ndef list_command(*, workspace_path: Path) -> None:\n    \"\"\"List all projects in the workspace\"\"\"\n\n    is_workspace = True\n    resolved_workspace_path = get_workspace_project_path(workspace_path)\n    if resolved_workspace_path is None:\n        is_workspace = False\n\n    if not is_workspace:\n        click.secho(\n            \"WARNING: No AlgoKit workspace found. Check [project.type] definition at .algokit.toml\",\n            fg=\"yellow\",\n            err=True,\n        )\n        return\n\n    configs = get_project_configs(resolved_workspace_path)\n\n    if not configs:\n        click.secho(\n            \"WARNING: No AlgoKit project(s) found in the workspace. Check [project.type] definition at .algokit.toml\",\n            fg=\"yellow\",\n            err=True,\n        )\n        return\n\n    click.echo(f\"workspace: {resolved_workspace_path} {PROJECT_TYPE_ICONS[ProjectType.WORKSPACE]}\")\n    for config in configs:\n        project = config.get(\"project\", {})\n        name, project_type = project.get(\"name\"), project.get(\"type\")\n        cwd = Path(config.get(\"cwd\", Path.cwd()))\n        path_label = \"this directory\" if cwd == Path.cwd() else cwd\n        icon = PROJECT_TYPE_ICONS.get(project_type, \"🔍 Unknown\")\n        click.echo(f\"  - {name} ({path_label}) {icon}\")\n"
  },
  {
    "path": "src/algokit/cli/project/run.py",
    "content": "import logging\nfrom functools import cache\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.cli.common.utils import MutuallyExclusiveOption, sanitize_extra_args\nfrom algokit.core.project import ProjectType\nfrom algokit.core.project.run import (\n    ProjectCommand,\n    WorkspaceProjectCommand,\n    load_commands,\n    run_command,\n    run_workspace_command,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@cache\ndef _load_project_commands(project_dir: Path) -> dict[str, click.Command]:\n    \"\"\"\n    Loads project commands from the .algokit.toml file located in the specified project directory.\n\n    This function reads the project directory's .algokit.toml configuration file, extracts custom commands defined\n    within it, and returns a dictionary mapping command names to their corresponding Click command objects.\n\n    Args:\n        project_dir (Path): The path to the project directory.\n\n    Returns:\n        dict[str, click.Command]: A dictionary where keys are command names and values are Click command objects.\n    \"\"\"\n\n    custom_commands = load_commands(project_dir)\n\n    if custom_commands is None:\n        return {}\n\n    commands_table: dict[str, click.Command] = {}\n\n    for custom_command in custom_commands:\n        # Define the base command function\n        def base_command(  # noqa: PLR0913\n            *,\n            custom_command: ProjectCommand | WorkspaceProjectCommand = custom_command,\n            project_names: tuple[str] | None = None,\n            list_projects: bool = False,\n            project_type: str | None = None,\n            sequential: bool = False,\n            extra_args: tuple[str, ...] | None = None,\n        ) -> None:\n            \"\"\"\n            Executes a base command function with optional parameters for listing projects or specifying project names.\n\n            This function serves as the base for executing both ProjectCommand and WorkspaceProjectCommand instances.\n            It handles listing projects within a workspace and executing commands for specific projects or all projects\n            within a workspace.\n\n            Args:\n                extra_args (tuple[str, ...]): The command arguments to be passed to the custom command.\n                custom_command (ProjectCommand | WorkspaceProjectCommand): The custom command to be executed.\n                project_names (list[str] | None): Optional. A list of project names to execute the command on.\n                list_projects (bool): Optional. A flag indicating whether to list projects associated\n                with a workspace command.\n                project_type (str | None): Optional. Only execute commands in projects of specified type.\n                sequential (bool): Whether to execute wokspace commands sequentially. Defaults to False.\n            Returns:\n                None\n            \"\"\"\n            extra_args = sanitize_extra_args(extra_args or ())\n            if list_projects and isinstance(custom_command, WorkspaceProjectCommand):\n                for command in custom_command.commands:\n                    cmds = \" && \".join(\" \".join(cmd) for cmd in command.commands)\n                    logger.info(f\"ℹ️  Project: {command.project_name}, Command name: {command.name}, Command(s): {cmds}\")  # noqa: RUF001\n                return\n\n            run_command(command=custom_command, extra_args=extra_args) if isinstance(\n                custom_command, ProjectCommand\n            ) else run_workspace_command(\n                workspace_command=custom_command,\n                project_names=list(project_names or []),\n                project_type=project_type,\n                sequential=sequential,\n                extra_args=extra_args,\n            )\n\n        # Check if the command is a WorkspaceProjectCommand and conditionally decorate\n        is_workspace_command = isinstance(custom_command, WorkspaceProjectCommand)\n        command = click.argument(\"extra_args\", nargs=-1, type=click.UNPROCESSED, required=False)(base_command)\n        if is_workspace_command:\n            command = click.option(\n                \"project_names\",\n                \"--project-name\",\n                \"-p\",\n                multiple=True,\n                help=(\n                    \"Optional. Execute the command on specified projects. \"\n                    \"Defaults to all projects in the current directory.\"\n                ),\n                nargs=1,\n                default=[],\n                required=False,\n            )(base_command)\n            command = click.option(\n                \"list_projects\",\n                \"--list\",\n                \"-l\",\n                help=\"(Optional) List all projects associated with workspace command\",\n                default=False,\n                is_flag=True,\n                type=click.BOOL,\n                required=False,\n                cls=MutuallyExclusiveOption,\n                not_required_if=[\"project_names\"],\n            )(command)\n            command = click.option(\n                \"project_type\",\n                \"--type\",\n                \"-t\",\n                type=click.Choice([ProjectType.FRONTEND, ProjectType.CONTRACT, ProjectType.BACKEND]),\n                required=False,\n                default=None,\n                help=\"Limit execution to specific project types if executing from workspace. (Optional)\",\n            )(command)\n            command = click.option(\n                \"sequential\",\n                \"--sequential/--concurrent\",\n                \"-s/-c\",\n                help=\"Execute workspace commands sequentially. Defaults to concurrent.\",\n                default=False,\n                is_flag=True,\n                required=False,\n            )(command)\n\n        # Apply the click.command decorator with common options\n        command = click.command(\n            name=custom_command.name,\n            help=f\"{custom_command.description}\" or \"Command description is not supplied.\",\n            context_settings={\n                # Enables workspace commands in standalone projects without execution impact,\n                # supporting uniform GitHub workflows across official templates.\n                \"ignore_unknown_options\": not is_workspace_command,\n            },\n        )(command)\n\n        commands_table[custom_command.name] = command\n\n    return commands_table\n\n\nclass RunCommandGroup(click.Group):\n    \"\"\"\n    A custom Click command group for dynamically loading and executing project commands.\n\n    This command group overrides the default Click command loading mechanism to include dynamically loaded project\n    commands from the .algokit.toml configuration file. It supports both predefined and dynamically loaded commands.\n    \"\"\"\n\n    def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n        \"\"\"\n        Retrieves a command by name, including dynamically loaded project commands.\n\n        Args:\n            ctx (click.Context): The current Click context.\n            cmd_name (str): The name of the command to retrieve.\n\n        Returns:\n            click.Command | None: The requested command if found; otherwise, None.\n        \"\"\"\n        return_value = super().get_command(ctx, cmd_name)\n\n        if return_value is not None:\n            return return_value\n\n        return _load_project_commands(Path.cwd()).get(cmd_name)\n\n    def list_commands(self, ctx: click.Context) -> list[str]:\n        \"\"\"\n        Lists all available commands, including dynamically loaded project commands.\n\n        Args:\n            ctx (click.Context): The current Click context.\n\n        Returns:\n            list[str]: A sorted list of all available command names.\n        \"\"\"\n        predefined_command_names = super().list_commands(ctx)\n        dynamic_commands = _load_project_commands(Path.cwd())\n        dynamic_command_names = list(dynamic_commands)\n\n        return sorted(predefined_command_names + dynamic_command_names)\n\n\n@click.group(\"run\", cls=RunCommandGroup)\ndef run_group() -> None:\n    \"\"\"Define custom commands and manage their execution in you projects.\"\"\"\n"
  },
  {
    "path": "src/algokit/cli/task.py",
    "content": "import logging\n\nimport click\n\nfrom algokit.cli.tasks.analyze import analyze\nfrom algokit.cli.tasks.assets import opt_in_command, opt_out_command\nfrom algokit.cli.tasks.ipfs import ipfs_group\nfrom algokit.cli.tasks.mint import mint\nfrom algokit.cli.tasks.nfd import nfd_lookup\nfrom algokit.cli.tasks.send_transaction import send\nfrom algokit.cli.tasks.sign_transaction import sign\nfrom algokit.cli.tasks.transfer import transfer\nfrom algokit.cli.tasks.vanity_address import vanity_address\nfrom algokit.cli.tasks.wallet import wallet\n\nlogger = logging.getLogger(__name__)\n\n\nclass AliasedGroup(click.Group):\n    def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:\n        rv = click.Group.get_command(self, ctx, cmd_name)\n        if rv is not None:\n            return rv\n\n        if cmd_name == \"analyse\":\n            return click.Group.get_command(self, ctx, \"analyze\")\n\n        return None\n\n\n@click.group(name=\"task\", cls=AliasedGroup)\ndef task_group() -> None:\n    \"\"\"Collection of useful tasks to help you develop on Algorand.\"\"\"\n\n\ntask_group.add_command(wallet)\ntask_group.add_command(vanity_address)\ntask_group.add_command(transfer)\ntask_group.add_command(sign)\ntask_group.add_command(send)\ntask_group.add_command(ipfs_group)\ntask_group.add_command(nfd_lookup)\ntask_group.add_command(opt_out_command)\ntask_group.add_command(opt_in_command)\ntask_group.add_command(mint)\ntask_group.add_command(analyze)\n"
  },
  {
    "path": "src/algokit/cli/tasks/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/tasks/analyze.py",
    "content": "import json\nimport logging\nimport re\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.tasks.analyze import (\n    TEALER_SNAPSHOTS_ROOT,\n    ensure_tealer_installed,\n    generate_report_filename,\n    generate_summaries,\n    generate_tealer_command,\n    has_baseline_diff,\n    load_tealer_report,\n    prepare_artifacts_folders,\n    run_tealer,\n)\nfrom algokit.core.utils import run_with_animation\n\nlogger = logging.getLogger(__name__)\n\n\ndef display_analysis_summary(analysis_results: dict) -> None:\n    \"\"\"\n    Display the summary of the analysis results.\n\n    Args:\n        analysis_results (dict): Dictionary containing analysis results.\n    \"\"\"\n    impact_frequency: dict = {}\n    for file_path, result_rows in analysis_results.items():\n        click.echo(f\"\\nFile: {file_path}\\n\")\n        for result in result_rows:\n            click.echo(\n                f\"Detector: {result[0]}\\n\"\n                f\"Impact: {result[1]}\\n\"\n                f\"Details: {result[2]}\\n\"\n                f\"Execution Paths (#Lines):\\n{result[3]}\\n\"\n            )\n            impact_frequency[result[1]] = impact_frequency.get(result[1], 0) + 1\n    # print summary by impact label\n    click.echo(\"\\nTotal issues:\")\n    for impact, frequency in impact_frequency.items():\n        click.secho(f\"{impact}: {frequency}\", fg=\"yellow\")\n\n\ndef has_template_vars(path: Path) -> bool:\n    \"\"\"\n    Check if the file contains template variables.\n\n    Args:\n        path (Path): The file path to check.\n\n    Returns:\n        bool: True if template variables are found, False otherwise.\n    \"\"\"\n    content = path.read_text()\n    return bool(re.search(r\"^(?!.*//.*TMPL_).*TMPL_.*\", content, flags=re.MULTILINE))\n\n\ndef get_input_files(*, input_paths: tuple[Path], recursive: bool) -> list[Path]:\n    \"\"\"\n    Get input files based on the input paths and recursive flag.\n\n    Args:\n        input_paths (tuple[Path]): Tuple of input paths.\n        recursive (bool): Flag to indicate recursive search.\n\n    Returns:\n        list[Path]: List of input files.\n    \"\"\"\n\n    input_files = []\n    for input_path in input_paths:\n        if input_path.is_dir():\n            pattern = \"**/*.teal\" if recursive else \"*.teal\"\n            input_files.extend(sorted(input_path.glob(pattern)))\n        else:\n            if recursive:\n                click.secho(\n                    f\"Warning: Ignoring recursive flag for {input_path} as it is not a directory.\\n\",\n                    fg=\"yellow\",\n                )\n            input_files.append(input_path)\n    return sorted(set(input_files))\n\n\n@click.command(\n    name=\"analyze\",\n    help=(\n        \"Analyze TEAL programs for common vulnerabilities using Tealer. \"\n        \"This task uses a third party tool to suggest improvements for your TEAL programs, \"\n        \"but remember to always test your smart contracts code, follow modern software engineering practices \"\n        \"and use the guidelines for smart contract development. \"\n        \"This should not be used as a substitute for an actual audit. \"\n        \"For full list of available detectors, please refer to https://github.com/crytic/tealer?tab=readme-ov-file#detectors\"\n    ),\n)\n@click.argument(\n    \"input_paths\",\n    nargs=-1,\n    type=click.Path(exists=True, dir_okay=True, file_okay=True, path_type=Path),\n    required=True,\n)\n@click.option(\n    \"-r\",\n    \"--recursive\",\n    is_flag=True,\n    help=\"Recursively search for all TEAL files within the provided directory.\",\n)\n@click.option(\n    \"--force\",\n    is_flag=True,\n    help=\"Force verification without the disclaimer confirmation prompt.\",\n)\n@click.option(\n    \"--diff\",\n    \"diff_only\",\n    is_flag=True,\n    help=(\n        \"Exit with a non-zero code if differences are found between current \"\n        \"and last reports. Reports are generated each run, but with this flag \"\n        \"execution fails if the current report doesn't match \"\n        \"the last report. Reports are stored in the \"\n        \".algokit/static-analysis/snapshots folder by default. Use --output for a \"\n        \"custom path.\"\n    ),\n)\n@click.option(\n    \"-o\",\n    \"--output\",\n    \"output_path\",\n    required=False,\n    default=None,\n    type=click.Path(dir_okay=True, file_okay=False, resolve_path=True, path_type=Path),\n    help=(\n        \"Directory path where to store the results of the static analysis. \"\n        \"Defaults to .algokit/static-analysis/snapshots.\"\n    ),\n)\n@click.option(\n    \"-e\",\n    \"--exclude\",\n    \"detectors_to_exclude\",\n    multiple=True,\n    default=[],\n    type=click.STRING,\n    help=\"Exclude specific vulnerabilities from the analysis. Supports multiple exclusions in a single run.\",\n)\ndef analyze(  # noqa: PLR0913, C901\n    *,\n    input_paths: tuple[Path],\n    recursive: bool,\n    force: bool,\n    diff_only: bool,\n    output_path: Path | None,\n    detectors_to_exclude: list[str],\n) -> None:\n    \"\"\"\n    Analyze TEAL programs for common vulnerabilities using Tealer.\n    \"\"\"\n\n    # Install tealer if needed\n    ensure_tealer_installed()\n\n    detectors_to_exclude = sorted(set(detectors_to_exclude))\n    input_files = get_input_files(input_paths=input_paths, recursive=recursive)\n\n    if not force:\n        click.confirm(\n            click.style(\n                \"Warning: This task uses `tealer` to suggest improvements for your TEAL programs, \"\n                \"but remember to always test your smart contracts code, follow modern software engineering practices \"\n                \"and use the guidelines for smart contract development. \"\n                \"This should not be used as a substitute for an actual audit. Do you understand?\",\n                fg=\"yellow\",\n            ),\n            default=True,\n            abort=True,\n        )\n\n    reports = {}\n    duplicate_files: dict[str, int] = {}\n    prepare_artifacts_folders(output_path)\n    total_files = len(input_files)\n    for index in range(total_files):\n        cur_file = input_files[index]\n        file = cur_file.resolve()\n\n        if has_template_vars(file):\n            click.secho(\n                f\"Warning: Skipping {file} due to template variables. Substitute them before scanning.\",\n                err=True,\n                fg=\"yellow\",\n            )\n            continue\n\n        filename = generate_report_filename(file, duplicate_files)\n\n        # If a custom output path is provided, store the report in the specified path\n        report_output_root = output_path or TEALER_SNAPSHOTS_ROOT\n        report_output_path = report_output_root / filename\n\n        command = generate_tealer_command(cur_file, report_output_path, detectors_to_exclude)\n        old_report = load_tealer_report(str(report_output_path)) if report_output_path.exists() and diff_only else None\n        if not old_report and diff_only:\n            click.secho(\n                f\"Unable to provide the diff since {file} report is missing. \"\n                \"Please run the task without the --diff flag first.\",\n                err=True,\n                fg=\"red\",\n            )\n            raise click.exceptions.Exit(1)\n\n        try:\n            run_with_animation(run_tealer, f\"Analyzing {index + 1} out of {total_files} files\", command)\n\n            if diff_only and old_report:\n                has_diff = has_baseline_diff(\n                    cur_file=cur_file, report_output_path=report_output_path, old_report=old_report\n                )\n                if has_diff:\n                    raise click.exceptions.Exit(1)\n\n            reports[str(report_output_path.absolute())] = json.load(report_output_path.open(encoding=\"utf-8\"))\n        except Exception as e:\n            if diff_only and old_report:\n                report_output_path.write_text(json.dumps(old_report.model_dump(by_alias=True), indent=2))\n\n            if isinstance(e, click.exceptions.Exit):\n                raise e\n\n            click.secho(\n                f\"An error occurred while analyzing {cur_file}. \"\n                \"Please make sure the files supplied are valid TEAL code before trying again.\",\n                err=True,\n                fg=\"red\",\n            )\n            raise click.Abort(\"Error while running tealer\") from e\n\n    summaries = generate_summaries(reports, detectors_to_exclude=detectors_to_exclude)\n\n    if summaries and not diff_only:\n        display_analysis_summary(summaries)\n        click.echo(f\"Finished analyzing {total_files} files.\")\n        raise click.exceptions.Exit(1)\n"
  },
  {
    "path": "src/algokit/cli/tasks/assets.py",
    "content": "import logging\n\nimport click\nfrom algosdk import error\nfrom algosdk.v2client.algod import AlgodClient\n\nfrom algokit.cli.common.constants import AlgorandNetwork, ExplorerEntityType\nfrom algokit.cli.common.utils import get_explorer_url\nfrom algokit.cli.tasks.utils import (\n    get_account_info,\n    get_account_with_private_key,\n    load_algod_client,\n    validate_account_balance_to_opt_in,\n    validate_address,\n)\nfrom algokit.core.utils import get_algorand_client_for_network\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_zero_balanced_assets(\n    *, provided_asset_ids: tuple[int], address: str, algod_client: AlgodClient, all_assets: bool = False\n) -> list[int]:\n    asset_ids_list = []\n    if all_assets:\n        account_info = get_account_info(algod_client, address)\n        for asset in account_info.get(\"assets\", []):\n            if asset.get(\"amount\", 0) == 0:\n                asset_ids_list.append(int(asset[\"asset-id\"]))\n    else:\n        for asset_id in provided_asset_ids:\n            asset_ids_list.append(asset_id)\n\n    return asset_ids_list\n\n\n@click.command(\n    name=\"opt-in\",\n    help=\"Opt-in to an asset(s). This is required before you can receive an asset. \"\n    \"Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\",\n)\n@click.argument(\"asset_ids\", type=click.INT, required=True, nargs=-1)\n@click.option(\"--account\", \"-a\", type=click.STRING, required=True, help=\"Address or alias of the signer account.\")\n@click.option(\n    \"-n\",\n    \"--network\",\n    type=click.Choice(AlgorandNetwork.to_list()),\n    default=AlgorandNetwork.LOCALNET,\n    required=False,\n    help=f\"Network to use. Refers to `{AlgorandNetwork.LOCALNET}` by default.\",\n)\ndef opt_in_command(asset_ids: tuple[int], account: str, network: AlgorandNetwork) -> None:\n    asset_ids_list = list(asset_ids)\n\n    opt_in_account = get_account_with_private_key(account)\n    validate_address(opt_in_account.address)\n    algod_client = load_algod_client(network)\n    algorand = get_algorand_client_for_network(network)\n\n    validate_account_balance_to_opt_in(algod_client, opt_in_account, len(asset_ids_list))\n    try:\n        click.echo(\"Performing opt-in. This may take a few seconds...\")\n        response = algorand.asset.bulk_opt_in(\n            account=opt_in_account.address,\n            asset_ids=asset_ids_list,\n            signer=opt_in_account.signer,\n        )\n        click.echo(\"Successfully performed opt-in.\")\n        if len(response) > 1:\n            account_url = get_explorer_url(opt_in_account.address, network, ExplorerEntityType.ADDRESS)\n            click.echo(f\"Check latest transactions on your account at: {account_url}\")\n        else:\n            for asset_opt_int_result in response:\n                explorer_url = get_explorer_url(asset_opt_int_result.transaction_id, network, ExplorerEntityType.ASSET)\n                click.echo(f\"Check opt-in status for asset {asset_opt_int_result.asset_id} at: {explorer_url}\")\n    except error.AlgodHTTPError as err:\n        raise click.ClickException(str(err)) from err\n    except ValueError as err:\n        logger.debug(err, exc_info=True)\n        raise click.ClickException(str(err)) from err\n    except Exception as err:\n        logger.debug(err, exc_info=True)\n        raise click.ClickException(\"Failed to perform opt-in\") from err\n\n\n@click.command(\n    name=\"opt-out\",\n    help=\"opt-out of an asset(s). You can only opt out of an asset with a zero balance. \"\n    \"Use -n to specify localnet, testnet, or mainnet. To supply multiple asset IDs, separate them with a whitespace.\",\n)\n@click.argument(\"asset_ids\", type=click.INT, required=False, nargs=-1)\n@click.option(\"--account\", \"-a\", type=click.STRING, required=True, help=\"Address or alias of the signer account.\")\n@click.option(\n    \"--all\",\n    \"all_assets\",\n    is_flag=True,\n    type=click.BOOL,\n    help=\"Opt-out of all assets with zero balance.\",\n)\n@click.option(\n    \"-n\",\n    \"--network\",\n    type=click.Choice(AlgorandNetwork.to_list()),\n    default=AlgorandNetwork.LOCALNET,\n    required=False,\n    help=f\"Network to use. Refers to `{AlgorandNetwork.LOCALNET}` by default.\",\n)\ndef opt_out_command(*, asset_ids: tuple[int], account: str, network: AlgorandNetwork, all_assets: bool) -> None:\n    if not (all_assets or asset_ids):\n        raise click.UsageError(\"asset_ids or --all must be specified\")\n    opt_out_account = get_account_with_private_key(account)\n    validate_address(opt_out_account.address)\n    algod_client = load_algod_client(network)\n    algorand = get_algorand_client_for_network(network)\n    asset_ids_list = []\n    try:\n        asset_ids_list = _get_zero_balanced_assets(\n            provided_asset_ids=asset_ids,\n            address=opt_out_account.address,\n            algod_client=algod_client,\n            all_assets=all_assets,\n        )\n\n        if not asset_ids_list:\n            raise click.ClickException(\"No assets to opt-out of.\")\n\n        click.echo(\"Performing opt-out. This may take a few seconds...\")\n        response = algorand.asset.bulk_opt_out(\n            account=opt_out_account.address,\n            asset_ids=asset_ids_list,\n            signer=opt_out_account.signer,\n        )\n        click.echo(\"Successfully performed opt-out.\")\n        if len(response) > 1:\n            account_url = get_explorer_url(opt_out_account.address, network, ExplorerEntityType.ADDRESS)\n            click.echo(f\"Check latest transactions on your account at: {account_url}\")\n        else:\n            asset_opt_out_result = response[0]\n            transaction_url = get_explorer_url(\n                asset_opt_out_result.transaction_id, network, ExplorerEntityType.TRANSACTION\n            )\n            click.echo(f\"Check opt-in status for asset {asset_opt_out_result.asset_id} at: {transaction_url}\")\n    except error.AlgodHTTPError as err:\n        raise click.ClickException(str(err)) from err\n    except ConnectionRefusedError as err:\n        raise click.ClickException(str(err)) from err\n    except ValueError as err:\n        logger.debug(err, exc_info=True)\n        raise click.ClickException(str(err)) from err\n    except Exception as err:\n        logger.debug(err, exc_info=True)\n        raise click.ClickException(\"Failed to perform opt-out.\") from err\n"
  },
  {
    "path": "src/algokit/cli/tasks/ipfs.py",
    "content": "import logging\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.tasks.ipfs import (\n    MAX_FILE_SIZE,\n    PinataBadRequestError,\n    PinataForbiddenError,\n    PinataHttpError,\n    PinataInternalServerError,\n    PinataUnauthorizedError,\n    get_pinata_jwt,\n    set_pinata_jwt,\n    upload_to_pinata,\n)\nfrom algokit.core.utils import run_with_animation\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(\n    \"ipfs\",\n)\ndef ipfs_group() -> None:\n    \"\"\"Upload files to IPFS using Pinata provider.\"\"\"\n\n\n@ipfs_group.command(\"login\", help=\"Login to Pinata ipfs provider. You will be prompted for your JWT.\")\ndef login_command() -> None:\n    pinata_jwt = get_pinata_jwt()\n    if pinata_jwt:\n        logger.warning(\"You are already logged in!\")\n        return\n    else:\n        logger.info(\n            \"Follow the instructions on https://docs.pinata.cloud/docs/getting-started \"\n            \"to create an account and obtain a JWT.\"\n        )\n        set_pinata_jwt(click.prompt(\"Enter pinata JWT\", hide_input=True, confirmation_prompt=True, type=str))\n        logger.info(\"Login successful\")\n\n\n@ipfs_group.command(\"logout\", help=\"Logout of Pinata ipfs provider.\")\ndef logout_command() -> None:\n    pinata_jwt = get_pinata_jwt()\n    if pinata_jwt:\n        set_pinata_jwt(None)\n        logger.info(\"Logout successful\")\n        return\n    else:\n        logger.warning(\"Already logged out\")\n\n\n@ipfs_group.command(\"upload\", help=\"Upload a file to Pinata ipfs provider. Please note, max file size is 100MB.\")\n@click.option(\n    \"--file\",\n    \"-f\",\n    \"file_path\",\n    required=True,\n    type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),\n    help=\"Path to the file to upload.\",\n)\n@click.option(\n    \"--name\",\n    \"-n\",\n    \"name\",\n    required=False,\n    type=click.STRING,\n    help=\"Human readable name for this upload, for use in file listings.\",\n)\ndef upload(file_path: Path, name: str | None) -> None:\n    pinata_jwt = get_pinata_jwt()\n    if not pinata_jwt:\n        raise click.ClickException(\"You are not logged in! Please login using `algokit task ipfs login`.\")\n\n    try:\n        total = file_path.stat().st_size\n        if total > MAX_FILE_SIZE:\n            raise click.ClickException(\"File size exceeds 100MB limit!\")\n\n        def upload() -> str:\n            return upload_to_pinata(file_path, pinata_jwt, name)\n\n        cid = run_with_animation(\n            target_function=upload,\n            animation_text=\"Uploading\",\n        )\n        logger.info(f\"File uploaded successfully!\\n CID: {cid}\")\n\n    except click.ClickException as ex:\n        raise ex\n    except OSError as ex:\n        logger.debug(ex)\n        raise click.ClickException(\"Failed to open file!\") from ex\n    except (\n        PinataBadRequestError,\n        PinataUnauthorizedError,\n        PinataForbiddenError,\n        PinataInternalServerError,\n        PinataHttpError,\n    ) as ex:\n        logger.debug(ex)\n        raise click.ClickException(repr(ex)) from ex\n    except Exception as ex:\n        logger.debug(ex)\n        raise click.ClickException(\"Failed to upload file!\") from ex\n"
  },
  {
    "path": "src/algokit/cli/tasks/mint.py",
    "content": "import json\nimport logging\nimport math\nfrom decimal import Decimal\nfrom pathlib import Path\n\nimport click\nfrom algokit_utils import AlgoAmount, SigningAccount\nfrom algosdk.error import AlgodHTTPError\n\nfrom algokit.cli.common.constants import AlgorandNetwork, ExplorerEntityType\nfrom algokit.cli.common.utils import get_explorer_url\nfrom algokit.cli.tasks.utils import (\n    get_account_with_private_key,\n    load_algod_client,\n    run_callback_once,\n    validate_balance,\n)\nfrom algokit.core.tasks.ipfs import (\n    PinataBadRequestError,\n    PinataForbiddenError,\n    PinataHttpError,\n    PinataInternalServerError,\n    PinataUnauthorizedError,\n    get_pinata_jwt,\n)\nfrom algokit.core.tasks.mint.mint import mint_token\nfrom algokit.core.tasks.mint.models import TokenMetadata\n\nlogger = logging.getLogger(__name__)\n\nMAX_UNIT_NAME_BYTE_LENGTH = 8\nMAX_ASSET_NAME_BYTE_LENGTH = 32\nASSET_MINTING_MBR = Decimal(\"0.2\")  # Algos, 0.1 for base account, 0.1 for asset creation\n\n\ndef _validate_supply(total: int, decimals: int) -> None:\n    \"\"\"\n    Validate the total supply and decimal places of a token.\n\n    Args:\n        total (int): The total supply of the token.\n        decimals (int): The number of decimal places for the token.\n\n    Raises:\n        click.ClickException: If the validation fails.\n    \"\"\"\n    if not (total == 1 or (total % 10 == 0 and total != 0)):\n        raise click.ClickException(\"Total must be 1 or a power of 10 larger than 1 (10, 100, 1000, ...).\")\n    if not ((total == 1 and decimals == 0) or (total != 1 and decimals == int(math.log10(total)))):\n        raise click.ClickException(\n            \"Number of digits after the decimal point must be 0 for a pure NFT, or \"\n            \"equal to the logarithm in base 10 of total number of units for a fractional NFT.\"\n        )\n\n\ndef _validate_unit_name(context: click.Context, param: click.Parameter, value: str) -> str:\n    \"\"\"\n    Validate the unit name by checking if its byte length is less than or equal to a predefined maximum value.\n\n    Args:\n        context (click.Context): The click context.\n        param (click.Parameter): The click parameter.\n        value (str): The value of the parameter.\n\n    Returns:\n        str: The value of the parameter if it passes the validation.\n    \"\"\"\n\n    if len(value.encode(\"utf-8\")) <= MAX_UNIT_NAME_BYTE_LENGTH:\n        return value\n    else:\n        raise click.BadParameter(\n            f\"Unit name must be {MAX_UNIT_NAME_BYTE_LENGTH} bytes or less.\", ctx=context, param=param\n        )\n\n\ndef _get_and_validate_asset_name(context: click.Context, param: click.Parameter, value: str | None) -> str:\n    \"\"\"\n    Validate the asset name by checking if its byte length is less than or equal to a predefined maximum value.\n    If asset name has not been supplied in the metadata file or via an argument a prompt is displayed.\n\n    Args:\n        context (click.Context): The click context.\n        param (click.Parameter): The click parameter.\n        value (str|None): The value of the parameter.\n\n    Returns:\n        str: The value of the parameter if it passes the validation.\n    \"\"\"\n    token_metadata_path = context.params.get(\"token_metadata_path\")\n    token_name = None\n\n    if token_metadata_path is not None:\n        with Path(token_metadata_path).open(mode=\"r\", encoding=\"utf-8\") as metadata_file:\n            data = json.load(metadata_file)\n            token_name = data.get(\"name\")\n\n    if value is None:\n        if token_name is None:\n            value = click.prompt(\"Provide the asset name\", type=str)\n        else:\n            value = token_name\n    elif token_name is not None and token_name != value:\n        raise click.BadParameter(\"Token name in metadata JSON must match CLI argument providing token name!\")\n\n    if value is None:\n        raise click.BadParameter(\"Asset name cannot be None\")\n\n    if len(value.encode(\"utf-8\")) <= MAX_ASSET_NAME_BYTE_LENGTH:\n        return value\n    else:\n        raise click.BadParameter(\n            f\"Unit name must be {MAX_UNIT_NAME_BYTE_LENGTH} bytes or less.\", ctx=context, param=param\n        )\n\n\ndef _get_creator_account(_: click.Context, __: click.Parameter, value: str) -> SigningAccount:\n    \"\"\"\n    Validate the creator account by checking if it is a valid Algorand address.\n\n    Args:\n        context (click.Context): The click context.\n        value (str): The value of the parameter.\n\n    Returns:\n        SigningAccount: An account object with the address and private key.\n    \"\"\"\n    try:\n        return get_account_with_private_key(value)\n    except Exception as ex:\n        raise click.BadParameter(str(ex)) from ex\n\n\ndef _get_and_validate_decimals(context: click.Context, _: click.Parameter, value: int | None) -> int:\n    \"\"\"\n    Validate the number of decimal places for the token.\n    If decimals has not been supplied in the metadata file or via an argument a prompt is displayed.\n\n    Args:\n        context (click.Context): The click context.\n        value (int|None): The value of the parameter.\n\n    Returns:\n        int: The value of the parameter if it passes the validation.\n    \"\"\"\n    token_metadata_path = context.params.get(\"token_metadata_path\")\n    token_decimals = None\n    if token_metadata_path is not None:\n        with Path(token_metadata_path).open(mode=\"r\", encoding=\"utf-8\") as metadata_file:\n            data = json.load(metadata_file)\n            token_decimals = data.get(\"decimals\")\n\n    if value is None:\n        if token_decimals is None:\n            decimals: int = click.prompt(\"Provide the asset decimals\", type=int, default=0)\n            return decimals\n        return int(token_decimals)\n    else:\n        if token_decimals is not None and token_decimals != value:\n            raise click.BadParameter(\"The value for decimals in the metadata JSON must match the decimals argument.\")\n        return value\n\n\ndef _validate_supply_for_nft(context: click.Context, _: click.Parameter, value: bool) -> bool:  # noqa: FBT001\n    \"\"\"\n    Validate the total supply and decimal places for NFTs.\n\n    Args:\n        context (click.Context): The click context.\n        value (bool): The value of the parameter.\n\n    Returns:\n        bool: The value of the parameter if it passes the validation.\n    \"\"\"\n    if value:\n        try:\n            total = context.params.get(\"total\")\n            decimals = context.params.get(\"decimals\")\n            if total is not None and decimals is not None:\n                _validate_supply(total, decimals)\n        except click.ClickException as ex:\n            raise ex\n    return value\n\n\n@click.command(\n    name=\"mint\",\n    help=\"Mint new fungible or non-fungible assets on Algorand.\",\n)\n@click.option(\n    \"--creator\",\n    required=True,\n    prompt=\"Provide the address or alias of the asset creator\",\n    help=\"Address or alias of the asset creator.\",\n    type=click.STRING,\n    callback=run_callback_once(_get_creator_account),\n    is_eager=True,\n)\n@click.option(\n    \"--name\",\n    \"asset_name\",\n    type=click.STRING,\n    required=False,\n    callback=_get_and_validate_asset_name,\n    help=\"Asset name.\",\n    is_eager=True,\n)\n@click.option(\n    \"-u\",\n    \"--unit\",\n    \"unit_name\",\n    type=click.STRING,\n    required=True,\n    callback=run_callback_once(_validate_unit_name),\n    prompt=\"Provide the unit name\",\n    help=\"Unit name of the asset.\",\n    is_eager=True,\n)\n@click.option(\n    \"-t\",\n    \"--total\",\n    type=click.INT,\n    required=False,\n    default=1,\n    prompt=\"Provide the total supply\",\n    help=\"Total supply of the asset. Defaults to 1.\",\n    is_eager=True,\n)\n@click.option(\n    \"-d\",\n    \"--decimals\",\n    type=click.INT,\n    required=False,\n    callback=_get_and_validate_decimals,\n    help=\"Number of decimals. Defaults to 0.\",\n    is_eager=True,  # This option needs to be evaluated before nft option.\n)\n@click.option(\n    \"--nft/--ft\",\n    \"non_fungible\",\n    type=click.BOOL,\n    prompt=\"Validate asset as NFT? Checks values of `total` and `decimals` as per ARC3 if set to True.\",\n    default=False,\n    callback=_validate_supply_for_nft,\n    help=\"\"\"Whether the asset should be validated as NFT or FT. Refers to NFT by default and validates canonical\n    definitions of pure or fractional NFTs as per ARC3 standard.\"\"\",\n)\n@click.option(\n    \"-i\",\n    \"--image\",\n    \"image_path\",\n    type=click.Path(exists=True, dir_okay=False, file_okay=True, resolve_path=True, path_type=Path),\n    prompt=\"Provide the path to the asset image file\",\n    help=\"Path to the asset image file to be uploaded to IPFS.\",\n    required=True,\n)\n@click.option(\n    \"-m\",\n    \"--metadata\",\n    \"token_metadata_path\",\n    type=click.Path(exists=True, dir_okay=False, file_okay=True, resolve_path=True, path_type=Path),\n    help=\"\"\"Path to the ARC19 compliant asset metadata file to be uploaded to IPFS. If not provided,\n        a default metadata object will be generated automatically based on asset-name, decimals and image.\n        For more details refer to https://arc.algorand.foundation/ARCs/arc-0003#json-metadata-file-schema.\"\"\",\n    default=None,\n    required=False,\n)\n@click.option(\n    \"--mutable/--immutable\",\n    type=click.BOOL,\n    prompt=\"Would you like to make the asset mutable?\",\n    default=False,\n    help=\"Whether the asset should be mutable or immutable. Refers to `ARC19` by default.\",\n)\n@click.option(\n    \"-n\",\n    \"--network\",\n    type=click.Choice(AlgorandNetwork.to_list()),\n    prompt=\"Provide the network to use\",\n    default=AlgorandNetwork.LOCALNET,\n    required=False,\n    help=f\"Network to use. Refers to `{AlgorandNetwork.LOCALNET}` by default.\",\n)\ndef mint(  # noqa: PLR0913\n    *,\n    creator: SigningAccount,\n    asset_name: str,\n    unit_name: str,\n    total: int,\n    decimals: int,\n    image_path: Path,\n    token_metadata_path: Path | None,\n    mutable: bool,\n    network: AlgorandNetwork,\n    non_fungible: bool,  # noqa: ARG001\n) -> None:\n    pinata_jwt = get_pinata_jwt()\n    if not pinata_jwt:\n        raise click.ClickException(\"You are not logged in! Please login using `algokit task ipfs login`.\")\n\n    client = load_algod_client(network)\n    validate_balance(\n        client,\n        creator,\n        0,\n        AlgoAmount.from_algo(ASSET_MINTING_MBR).micro_algo,\n    )\n\n    token_metadata = TokenMetadata.from_json_file(token_metadata_path, asset_name, decimals)\n    try:\n        asset_id, txn_id = mint_token(\n            client=client,\n            jwt=pinata_jwt,\n            creator_account=creator,\n            unit_name=unit_name,\n            total=total,\n            token_metadata=token_metadata,\n            image_path=image_path,\n            mutable=mutable,\n        )\n\n        click.echo(\"\\nSuccessfully minted the asset!\")\n        click.echo(f\"Browse your asset at: {get_explorer_url(asset_id, network, ExplorerEntityType.ASSET)}\")\n        click.echo(f\"Check transaction status at: {get_explorer_url(txn_id, network, ExplorerEntityType.TRANSACTION)}\")\n    except (\n        PinataBadRequestError,\n        PinataUnauthorizedError,\n        PinataForbiddenError,\n        PinataInternalServerError,\n        PinataHttpError,\n    ) as ex:\n        logger.debug(ex)\n        raise click.ClickException(repr(ex)) from ex\n    except AlgodHTTPError as ex:\n        raise click.ClickException(str(ex)) from ex\n    except Exception as ex:\n        logger.debug(ex, exc_info=True)\n        raise click.ClickException(\"Failed to mint the asset!\") from ex\n"
  },
  {
    "path": "src/algokit/cli/tasks/nfd.py",
    "content": "import logging\n\nimport click\n\nfrom algokit.cli.tasks.utils import validate_address\nfrom algokit.core.tasks.nfd import NFDMatchType, nfd_lookup_by_address, nfd_lookup_by_domain\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_nfd(value: str) -> bool:\n    return value.endswith(\".algo\")\n\n\ndef is_algorand_address(value: str) -> bool:\n    try:\n        validate_address(value)\n        return True\n    except Exception:\n        return False\n\n\n@click.command(\n    name=\"nfd-lookup\",\n    help=\"Perform a lookup via NFD domain or address, returning the associated address or domain respectively.\",\n)\n@click.argument(\n    \"value\",\n    type=click.STRING,\n)\n@click.option(\n    \"--output\",\n    \"-o\",\n    required=False,\n    default=NFDMatchType.ADDRESS.value,\n    type=click.Choice([e.value for e in NFDMatchType]),\n    help=\"Output format for NFD API response. Defaults to address|domain resolved.\",\n)\ndef nfd_lookup(\n    value: str,\n    output: str,\n) -> None:\n    if not is_nfd(value) and not is_algorand_address(value):\n        raise click.ClickException(\"Invalid input. Must be either a valid NFD domain or an Algorand address.\")\n\n    try:\n        if is_nfd(value):\n            click.echo(nfd_lookup_by_domain(value, NFDMatchType(output)))\n        elif is_algorand_address(value):\n            click.echo(nfd_lookup_by_address(value, NFDMatchType(output)))\n    except Exception as err:\n        raise click.ClickException(str(err)) from err\n"
  },
  {
    "path": "src/algokit/cli/tasks/send_transaction.py",
    "content": "import json\nimport logging\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport click\nfrom algosdk import encoding, error\nfrom algosdk.transaction import SignedTransaction, retrieve_from_file\n\nfrom algokit.cli.common.constants import AlgorandNetwork, ExplorerEntityType\nfrom algokit.cli.common.utils import MutuallyExclusiveOption, get_explorer_url\nfrom algokit.cli.tasks.utils import (\n    load_algod_client,\n    stdin_has_content,\n)\n\nif TYPE_CHECKING:\n    from io import TextIOWrapper\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_sign_task_output_txn(item: dict) -> bool:\n    \"\"\"\n    Checks if a given item is a dictionary and contains the keys \"transaction_id\" and \"content\".\n\n    Args:\n        item (dict): A dictionary object to be checked.\n\n    Returns:\n        bool: True if the input item is a dictionary with the keys \"transaction_id\" and \"content\", False otherwise.\n    \"\"\"\n\n    return isinstance(item, dict) and all(key in item for key in [\"transaction_id\", \"content\"])\n\n\ndef _load_from_stdin() -> list[SignedTransaction]:\n    \"\"\"\n    Load transaction data from standard input and convert it into a list of SignedTransaction objects.\n\n    Returns:\n        A list of SignedTransaction objects representing the loaded transactions from the standard input.\n\n    Raises:\n        click.ClickException: If the piped transaction content is invalid.\n    \"\"\"\n    # Read the raw file content from the standard input\n\n    raw_file_content = cast(\"TextIOWrapper\", click.get_text_stream(\"stdin\")).read()\n\n    try:\n        # Parse the raw file content as JSON\n        file_content = json.loads(raw_file_content)\n    except json.JSONDecodeError as ex:\n        raise click.ClickException(\"Invalid piped transaction content!\") from ex\n\n    # Check if the content is a list of dicts with the required fields\n    if not isinstance(file_content, list) or not all(_is_sign_task_output_txn(item) for item in file_content):\n        raise click.ClickException(\"Invalid piped transaction content!\")\n\n    # Convert the content into SignedTransaction objects\n    return [encoding.msgpack_decode(item[\"content\"]) for item in file_content]  # type: ignore[no-untyped-call]\n\n\ndef _get_signed_transactions(file: Path | None = None, transaction: str | None = None) -> list[SignedTransaction]:\n    \"\"\"\n    Retrieves a list of signed transactions.\n\n    Args:\n        file (Optional[Path]): A `Path` object representing the file path from which to retrieve the transactions.\n        transaction (Optional[str]): A base64 encoded string representing a single signed transaction.\n\n    Returns:\n        list[SignedTransaction]: A list of `SignedTransaction` objects representing the retrieved signed transactions.\n\n    Raises:\n        click.ClickException: If the supplied transaction is not of type `SignedTransaction`.\n        click.ClickException: If there is an error decoding the transaction.\n\n    \"\"\"\n    try:\n        if file:\n            txns = retrieve_from_file(str(file))  # type: ignore[no-untyped-call]\n        elif transaction:\n            txns = [encoding.msgpack_decode(transaction)]  # type: ignore[no-untyped-call]\n        else:\n            txns = _load_from_stdin()\n\n        for txn in txns:\n            if not isinstance(txn, SignedTransaction):\n                raise click.ClickException(\"Supplied transaction is not signed!\")\n\n        return cast(\"list[SignedTransaction]\", txns)\n\n    except Exception as ex:\n        logger.debug(ex, exc_info=True)\n        raise click.ClickException(\n            \"Failed to decode transaction! If you are intending to send multiple transactions use `--file` instead.\"\n        ) from ex\n\n\ndef _send_transactions(network: AlgorandNetwork, txns: list[SignedTransaction]) -> None:\n    \"\"\"\n    Sends a list of signed transactions to the Algorand blockchain network using the AlgodClient.\n\n    Args:\n        network (AlgorandNetwork): The network to which the transactions will be sent.\n        txns (list[SignedTransaction]): A list of signed transactions to be sent.\n\n    Returns:\n        None: The function does not return any value.\n    \"\"\"\n    algod_client = load_algod_client(network)\n\n    if any(txn.transaction.group for txn in txns):\n        txid = algod_client.send_transactions(txns)\n        click.echo(f\"Transaction group successfully sent with txid: {txid}\")\n        click.echo(\n            f\"Check transaction group status at: {get_explorer_url(txid, network, ExplorerEntityType.TRANSACTION)}\"\n        )\n    else:\n        for index, txn in enumerate(txns, start=1):\n            click.echo(f\"\\nSending transaction {index}/{len(txns)}\")\n            txid = algod_client.send_transaction(txn)\n            click.echo(f\"Transaction successfully sent with txid: {txid}\")\n            click.echo(\n                f\"Check transaction status at: {get_explorer_url(txid, network, ExplorerEntityType.TRANSACTION)}\"\n            )\n\n\n@click.command(name=\"send\", help=\"Send a signed transaction to the given network.\")\n@click.option(\n    \"--file\",\n    \"-f\",\n    type=click.Path(exists=True, dir_okay=False, file_okay=True, resolve_path=True, path_type=Path),\n    help=\"Single or multiple message pack encoded signed transactions from binary file to send.\",\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\"transaction\"],\n    required=False,\n)\n@click.option(\n    \"--transaction\",\n    \"-t\",\n    type=click.STRING,\n    help=\"Base64 encoded signed transaction to send.\",\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\"file\"],\n    required=False,\n)\n@click.option(\n    \"-n\",\n    \"--network\",\n    type=click.Choice(AlgorandNetwork.to_list()),\n    default=AlgorandNetwork.LOCALNET,\n    required=False,\n    help=f\"Network to use. Refers to `{AlgorandNetwork.LOCALNET}` by default.\",\n)\ndef send(*, file: Path | None, transaction: str | None, network: AlgorandNetwork) -> None:\n    if not file and not transaction and not stdin_has_content():\n        raise click.ClickException(\n            \"Please provide a file path via `--file` or a base64 encoded signed transaction via `--transaction`. \"\n            \"Alternatively, you can also pipe the output of `algokit task sign` to this command.\"\n        )\n\n    txns = _get_signed_transactions(file, transaction)\n\n    if not txns:\n        raise click.ClickException(\"No valid transactions found!\")\n\n    try:\n        _send_transactions(network, txns)\n    except error.AlgodHTTPError as ex:\n        raise click.ClickException(str(ex)) from ex\n    except Exception as ex:\n        logger.debug(ex, exc_info=True)\n        raise click.ClickException(\"Failed to send transaction!\") from ex\n"
  },
  {
    "path": "src/algokit/cli/tasks/sign_transaction.py",
    "content": "import base64\nimport json\nimport logging\nfrom pathlib import Path\nfrom typing import Any, cast\n\nimport click\nfrom algosdk import encoding\nfrom algosdk.transaction import SignedTransaction, Transaction, retrieve_from_file, write_to_file\n\nfrom algokit.cli.common.utils import MutuallyExclusiveOption\nfrom algokit.cli.tasks.utils import get_account_with_private_key\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransactionBytesEncoder(json.JSONEncoder):\n    def default(self, obj: Any) -> Any:  # noqa: ANN401\n        if isinstance(obj, bytes | bytearray):\n            return base64.b64encode(obj).decode()\n        return super().default(obj)\n\n\ndef _validate_for_signed_txns(txns: list[Transaction]) -> None:\n    signed_txns = [txn for txn in txns if isinstance(txn, SignedTransaction)]\n\n    if signed_txns:\n        transaction_ids = \", \".join([txn.get_txid() for txn in signed_txns])  # type: ignore[no-untyped-call]\n        message = f\"Supplied transactions {transaction_ids} are already signed!\"\n        raise click.ClickException(message)\n\n\ndef _get_transactions(file: Path | None, transaction: str | None) -> list[Transaction]:\n    try:\n        if file:\n            txns: list[Transaction] = retrieve_from_file(str(file))  # type: ignore[no-untyped-call]\n            return txns\n        else:\n            return [cast(\"Transaction\", encoding.msgpack_decode(transaction))]  # type: ignore[no-untyped-call]\n    except Exception as ex:\n        logger.debug(ex, exc_info=True)\n        raise click.ClickException(\n            \"Failed to decode transaction! If you are intending to sign multiple transactions use `--file` instead.\"\n        ) from ex\n\n\ndef _confirm_transaction(txns: list[Transaction]) -> bool:\n    click.echo(\n        json.dumps(\n            [\n                {\n                    \"transaction_id\": txn.get_txid(),  # type: ignore[no-untyped-call]\n                    \"content\": txn.dictify(),  # type: ignore[no-untyped-call]\n                }\n                for txn in txns\n            ],\n            cls=TransactionBytesEncoder,\n            indent=2,\n        ),\n    )\n    response = click.prompt(\n        \"Would you like to proceed with signing the above?\", type=click.Choice([\"y\", \"n\"]), default=\"n\"\n    )\n    return bool(response == \"y\")\n\n\ndef _sign_and_output_transaction(txns: list[Transaction], private_key: str, output: Path | None) -> None:\n    signed_txns = [txn.sign(private_key) for txn in txns]  # type: ignore[no-untyped-call]\n\n    if output:\n        write_to_file(signed_txns, str(output))  # type: ignore[no-untyped-call]\n        click.echo(f\"Signed transaction written to {output}\")\n    else:\n        encoded_signed_txns = [\n            {\"transaction_id\": txn.get_txid(), \"content\": encoding.msgpack_encode(txn)}  # type: ignore[no-untyped-call]\n            for txn in signed_txns\n        ]\n        click.echo(json.dumps(encoded_signed_txns, indent=2))\n\n\n@click.command(name=\"sign\", help=\"Sign goal clerk compatible Algorand transaction(s).\")\n@click.option(\"--account\", \"-a\", type=click.STRING, required=True, help=\"Address or alias of the signer account.\")\n@click.option(\n    \"--file\",\n    \"-f\",\n    type=click.Path(exists=True, dir_okay=False, file_okay=True, resolve_path=True, path_type=Path),\n    help=\"Single or multiple message pack encoded transactions from binary file to sign.\",\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\"transaction\"],\n)\n@click.option(\n    \"--transaction\",\n    \"-t\",\n    type=click.STRING,\n    help=\"Single base64 encoded transaction object to sign.\",\n    cls=MutuallyExclusiveOption,\n    not_required_if=[\"file\"],\n)\n@click.option(\n    \"--output\",\n    \"-o\",\n    type=click.Path(resolve_path=True, dir_okay=False, file_okay=True, path_type=Path),\n    help=\"The output file path to store signed transaction(s).\",\n    required=False,\n)\n@click.option(\"--force\", is_flag=True, help=\"Force signing without confirmation.\", required=False, type=click.BOOL)\ndef sign(*, account: str, file: Path | None, transaction: str | None, output: Path | None, force: bool) -> None:\n    if not file and not transaction:\n        raise click.ClickException(\n            \"Please provide a file path via `--file` or a base64 encoded unsigned transaction via `--transaction`.\"\n        )\n\n    signer_account = get_account_with_private_key(account)\n\n    txns = _get_transactions(file, transaction)\n\n    if not txns:\n        raise click.ClickException(\"No valid transactions found!\")\n\n    _validate_for_signed_txns(txns)\n\n    if not force and not _confirm_transaction(txns):\n        return\n\n    _sign_and_output_transaction(txns, signer_account.private_key, output)\n"
  },
  {
    "path": "src/algokit/cli/tasks/transfer.py",
    "content": "import logging\n\nimport click\nfrom algokit_utils import AlgoAmount, AssetTransferParams, PaymentParams, SendAtomicTransactionComposerResults\n\nfrom algokit.cli.common.constants import AlgorandNetwork, ExplorerEntityType\nfrom algokit.cli.common.utils import get_explorer_url\nfrom algokit.cli.tasks.utils import (\n    get_account_with_private_key,\n    get_address,\n    get_asset_decimals,\n    load_algod_client,\n    validate_address,\n    validate_balance,\n)\nfrom algokit.core.utils import get_algorand_client_for_network\n\nlogger = logging.getLogger(__name__)\n\n# TODO: upon algokit nfd lookup being implemented receiver will also allow nfd lookups\n\n\n@click.command(name=\"transfer\", help=\"\"\"Transfer algos or assets from one account to another.\"\"\")\n@click.option(\"--sender\", \"-s\", type=click.STRING, help=\"Address or alias of the sender account.\", required=True)\n@click.option(\n    \"--receiver\",\n    \"-r\",\n    type=click.STRING,\n    help=\"Address or alias to an account that will receive the asset(s).\",\n    required=True,\n)\n@click.option(\n    \"--asset\",\n    \"--id\",\n    \"asset_id\",\n    type=click.INT,\n    help=\"Asset ID to transfer. Defaults to 0 (Algo).\",\n    default=0,\n    required=False,\n)\n@click.option(\"--amount\", \"-a\", type=click.INT, help=\"Amount to transfer.\", required=True)\n@click.option(\n    \"--whole-units\",\n    \"whole_units\",\n    is_flag=True,\n    type=click.BOOL,\n    help=(\n        \"Use whole units (Algos | ASAs) instead of smallest divisible units (for example, microAlgos). \"\n        \"Disabled by default.\"\n    ),\n    default=False,\n    required=False,\n)\n@click.option(\n    \"-n\",\n    \"--network\",\n    type=click.Choice([choice.value for choice in AlgorandNetwork]),\n    default=AlgorandNetwork.LOCALNET,\n    required=False,\n    help=f\"Network to use. Refers to `{AlgorandNetwork.LOCALNET}` by default.\",\n)\ndef transfer(  # noqa: PLR0913\n    *,\n    sender: str,\n    receiver: str,\n    asset_id: int,\n    amount: int,\n    whole_units: bool,\n    network: AlgorandNetwork,\n) -> None:\n    # Load addresses and accounts from mnemonics or aliases\n    sender_account = get_account_with_private_key(sender)\n    receiver_address = get_address(receiver)\n\n    # Get algod client\n    algod_client = load_algod_client(network)\n\n    # Convert amount to whole units if specified\n    if whole_units:\n        amount = amount * (10 ** get_asset_decimals(asset_id, algod_client))\n\n    # Validate inputs\n    validate_address(receiver_address)\n    validate_balance(algod_client, sender_account, asset_id, amount)\n    validate_balance(algod_client, receiver_address, asset_id)\n\n    # Transfer algos or assets depending on asset_id\n    txn_response: SendAtomicTransactionComposerResults | None = None\n    algorand = get_algorand_client_for_network(network)\n    try:\n        if asset_id == 0:\n            txn_response = (\n                algorand.new_group()\n                .add_payment(\n                    PaymentParams(\n                        sender=sender_account.address,\n                        receiver=receiver_address,\n                        amount=AlgoAmount(micro_algo=amount),\n                        signer=sender_account.signer,\n                    )\n                )\n                .send()\n            )\n        else:\n            txn_response = (\n                algorand.new_group()\n                .add_asset_transfer(\n                    AssetTransferParams(\n                        sender=sender_account.address,\n                        receiver=receiver_address,\n                        amount=amount,\n                        asset_id=asset_id,\n                        signer=sender_account.signer,\n                    ),\n                )\n                .send()\n            )\n\n        txn_url = get_explorer_url(\n            identifier=txn_response.tx_ids[0],\n            network=network,\n            entity_type=ExplorerEntityType.TRANSACTION,\n        )\n        click.echo(f\"Successfully performed transfer. See details at {txn_url}\")\n\n    except Exception as err:\n        logger.debug(err, exc_info=True)\n        raise click.ClickException(\"Failed to perform transfer\") from err\n"
  },
  {
    "path": "src/algokit/cli/tasks/utils.py",
    "content": "# AlgoKit Tasks related utility functions\n\nimport logging\nimport os\nimport stat\nimport sys\nfrom collections.abc import Callable\nfrom decimal import Decimal\nfrom functools import wraps\nfrom typing import Any\n\nimport algosdk\nimport algosdk.encoding\nimport click\nfrom algokit_utils import AlgoAmount, ClientManager, SigningAccount\n\nfrom algokit.cli.common.constants import AlgorandNetwork\nfrom algokit.core.tasks.wallet import get_alias\n\nlogger = logging.getLogger(__name__)\n\n\ndef _validate_asset_balance(account_info: dict, asset_id: int, decimals: int, amount: int = 0) -> None:\n    asset_record = next((asset for asset in account_info.get(\"assets\", []) if asset[\"asset-id\"] == asset_id), None)\n\n    if not asset_record:\n        raise click.ClickException(\"SigningAccount is not opted into the asset\")\n\n    if amount > 0 and asset_record[\"amount\"] < amount:\n        required = amount / 10**decimals\n        available = asset_record[\"amount\"] / 10**decimals\n        raise click.ClickException(\n            f\"Insufficient asset balance in account, required: {required}, available: {available}\"\n        )\n\n\ndef _validate_algo_balance(account_info: dict, amount: int) -> None:\n    if account_info.get(\"amount\", 0) < amount:\n        required = AlgoAmount.from_micro_algo(amount)\n        available = AlgoAmount.from_micro_algo(account_info.get(\"amount\", 0))\n        raise click.ClickException(\n            f\"Insufficient Algos balance in account, required: {required.algo} Algos, available: {available.algo} Algos\"\n        )\n\n\ndef get_private_key_from_mnemonic() -> str:\n    \"\"\"\n    Converts a mnemonic phrase into a private key.\n\n    Returns:\n        str: The private key generated from the mnemonic phrase.\n\n    Raises:\n        click.ClickException: If the entered mnemonic phrase is invalid.\n\n    Example Usage:\n        private_key = get_private_key_from_mnemonic()\n        print(private_key)\n\n    Inputs:\n        None\n\n    Flow:\n        1. Prompts the user to enter a mnemonic phrase.\n        2. Converts the entered mnemonic phrase into a private key using `algosdk.mnemonic.to_private_key`.\n        3. Returns the private key.\n\n    \"\"\"\n\n    mnemonic_phrase = click.prompt(\"Enter the mnemonic phrase (25 words separated by whitespace)\", hide_input=True)\n    try:\n        return str(algosdk.mnemonic.to_private_key(mnemonic_phrase))  # type: ignore[no-untyped-call]\n    except Exception as err:\n        raise click.ClickException(\"Invalid mnemonic. Please provide a valid Algorand mnemonic.\") from err\n\n\ndef load_algod_client(network: AlgorandNetwork) -> algosdk.v2client.algod.AlgodClient:\n    \"\"\"\n    Returns an instance of the `algosdk.v2client.algod.AlgodClient` class for the specified network.\n\n    Args:\n        network (str): The network for which the `AlgodClient` instance needs to be loaded.\n\n    Returns:\n        algosdk.v2client.algod.AlgodClient: An instance of the `AlgodClient` class for the specified network.\n\n    Raises:\n        click.ClickException: If the specified network is invalid.\n    \"\"\"\n\n    config_mapping = {\n        AlgorandNetwork.LOCALNET: ClientManager.get_default_localnet_config(\"algod\"),\n        AlgorandNetwork.TESTNET: ClientManager.get_algonode_config(\"testnet\", \"algod\"),\n        AlgorandNetwork.MAINNET: ClientManager.get_algonode_config(\"mainnet\", \"algod\"),\n    }\n    try:\n        return ClientManager.get_algod_client(config_mapping[network])\n    except KeyError as err:\n        raise click.ClickException(\"Invalid network\") from err\n\n\ndef get_asset_decimals(asset_id: int, algod_client: algosdk.v2client.algod.AlgodClient) -> int:\n    \"\"\"\n    Retrieves the number of decimal places for a given asset.\n\n    Args:\n        asset_id (int): The ID of the asset for which the number of decimal places is to be retrieved. (0 for Algo)\n        algod_client (algosdk.v2client.algod.AlgodClient): An instance of the AlgodClient class.\n\n    Returns:\n        int: The number of decimal places for the given asset.\n\n    Raises:\n        click.ClickException: If the asset info response is invalid.\n\n    Example:\n        asset_id = 123\n        algod_client = algosdk.v2client.algod.AlgodClient(\"https://mainnet-api.algonode.cloud\", \"API_KEY\")\n        decimals = get_asset_decimals(asset_id, algod_client)\n        print(decimals)\n    \"\"\"\n\n    if asset_id == 0:\n        return 6\n\n    asset_info = algod_client.asset_info(asset_id)\n\n    if not isinstance(asset_info, dict) or \"params\" not in asset_info or \"decimals\" not in asset_info[\"params\"]:\n        raise click.ClickException(\"Invalid asset info response\")\n\n    return int(asset_info[\"params\"][\"decimals\"])\n\n\ndef validate_balance(\n    algod_client: algosdk.v2client.algod.AlgodClient, account: SigningAccount | str, asset_id: int, amount: int = 0\n) -> None:\n    \"\"\"\n    Validates the balance of an account before an operation.\n\n    Args:\n        algod_client (algosdk.v2client.algod.AlgodClient): The AlgodClient object for\n        interacting with the Algorand blockchain.\n        account (SigningAccount | str): The account object.\n        asset_id (int): The ID of the asset to be checked (0 for Algos).\n        amount (int): The amount of Algos or asset for the operation. Defaults to 0 implying opt-in check only.\n\n    Raises:\n        click.ClickException: If any validation check fails.\n    \"\"\"\n    address = account.address if isinstance(account, SigningAccount) else account\n    account_info = algod_client.account_info(address)\n\n    if not isinstance(account_info, dict):\n        raise click.ClickException(\"Invalid account info response\")\n\n    if asset_id == 0:\n        _validate_algo_balance(account_info, amount)\n    else:\n        decimals = get_asset_decimals(asset_id, algod_client)\n        _validate_asset_balance(account_info, asset_id, decimals, amount)\n\n\ndef validate_address(address: str) -> None:\n    \"\"\"\n    Check if a given address is a valid Algorand account address.\n\n    Args:\n        address (str): The address to be validated.\n\n    Raises:\n        click.ClickException: If the address is invalid.\n\n    Returns:\n        None\n    \"\"\"\n\n    if not algosdk.encoding.is_valid_address(address):  # type: ignore[no-untyped-call]\n        raise click.ClickException(f\"`{address}` is an invalid account address\")\n\n\ndef get_account_with_private_key(address: str) -> SigningAccount:\n    \"\"\"\n    Retrieves an account object with the private key based on the provided address.\n\n    Args:\n        address (str): The address for which to retrieve the account object.\n\n    Returns:\n        SigningAccount: An account object with the address and private key.\n\n    Raises:\n        click.ClickException: If the address is not valid or if the alias does not exist or does not have a private key.\n    \"\"\"\n\n    parsed_address = address.strip('\"')\n\n    try:\n        validate_address(parsed_address)\n        pk = get_private_key_from_mnemonic()\n        return SigningAccount(address=parsed_address, private_key=pk)\n    except click.ClickException as ex:\n        alias_data = get_alias(parsed_address)\n\n        if not alias_data:\n            raise click.ClickException(f\"Alias `{parsed_address}` alias does not exist.\") from ex\n        if not alias_data.private_key:\n            raise click.ClickException(f\"Alias `{parsed_address}` does not have a private key.\") from ex\n\n        return SigningAccount(address=alias_data.address, private_key=alias_data.private_key)\n\n\ndef get_address(address: str) -> str:\n    \"\"\"\n    Validates the given address and returns it if valid. If the address is not valid,\n    it checks if the address is an alias and returns the corresponding address if the alias exists.\n\n    Args:\n        address (str): The address to be validated or checked for an alias.\n\n    Returns:\n        str: The validated address or the corresponding address from an alias.\n\n    Raises:\n        click.ClickException: If the address is not valid and no corresponding alias exists.\n\n    Example:\n        address = get_address(\"ABCD1234\")\n        print(address)\n    \"\"\"\n\n    parsed_address = address.strip('\"')\n\n    try:\n        validate_address(parsed_address)\n        return parsed_address\n    except click.ClickException as ex:\n        if len(parsed_address) == algosdk.constants.address_len:\n            raise click.ClickException(f\"`{parsed_address}` is an invalid account address\") from ex\n\n        alias_data = get_alias(parsed_address)\n\n        if not alias_data:\n            raise click.ClickException(f\"Alias `{parsed_address}` alias does not exist.\") from ex\n\n        return alias_data.address\n\n\ndef stdin_has_content() -> bool:\n    \"\"\"\n    Checks if there is content in the standard input.\n\n    Returns:\n        bool: True if there is content in the standard input, False otherwise.\n    \"\"\"\n\n    mode = os.fstat(sys.stdin.fileno()).st_mode\n    return stat.S_ISFIFO(mode) or stat.S_ISREG(mode)\n\n\ndef validate_account_balance_to_opt_in(\n    algod_client: algosdk.v2client.algod.AlgodClient, account: SigningAccount, num_assets: int\n) -> None:\n    \"\"\"\n    Validates the balance of an account before opt in operation.\n    Each asset requires 0.1 Algos to opt in.\n\n    Args:\n        algod_client (algosdk.v2client.algod.AlgodClient): The AlgodClient object for\n        interacting with the Algorand blockchain.\n        account (SigningAccount | str): The account object.\n        num_assets (int): The number of the assets for opt in (0 for Algos).\n\n    Raises:\n        click.ClickException: If there is an insufficient fund in the account or account is not valid.\n    \"\"\"\n\n    address = account.address if isinstance(account, SigningAccount) else account\n    account_info = algod_client.account_info(address)\n\n    if not isinstance(account_info, dict):\n        raise click.ClickException(\"Invalid account info response\")\n\n    required_microalgos = num_assets * AlgoAmount.from_algo(Decimal(\"0.1\")).micro_algo\n    available_microalgos = account_info.get(\"amount\", 0)\n    if available_microalgos < required_microalgos:\n        required_algo = AlgoAmount.from_micro_algo(required_microalgos).algo\n        available_algos = AlgoAmount.from_micro_algo(available_microalgos).algo\n        raise click.ClickException(\n            f\"Insufficient Algos balance in account to opt in, required: {required_algo} Algos, available:\"\n            f\" {available_algos} Algos\"\n        )\n\n\ndef get_account_info(algod_client: algosdk.v2client.algod.AlgodClient, account_address: str) -> dict:\n    account_info = algod_client.account_info(account_address)\n    assert isinstance(account_info, dict)\n    return account_info\n\n\ndef run_callback_once(callback: Callable) -> Callable:\n    \"\"\"\n    Click option callbacks run twice, first to validate the prompt input,\n    and then independently from that is used to validate the value passed to the option.\n\n    In cases where the callback is expensive or has side effects(like prompting the user),\n    it's better to run it only once.\n    \"\"\"\n\n    @wraps(callback)\n    def wrapper(context: click.Context, param: click.Parameter, value: Any) -> Any:  # noqa: ANN401\n        if context.obj is None:\n            context.obj = {}\n\n        key = f\"{param.name}_callback_result\"\n        if key not in context.obj:\n            result = callback(context, param, value)\n            context.obj[key] = result\n            return result\n        return context.obj[key]\n\n    return wrapper\n"
  },
  {
    "path": "src/algokit/cli/tasks/vanity_address.py",
    "content": "import json\nimport logging\nimport re\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.tasks.vanity_address import MatchType, VanityAccount, generate_vanity_address\nfrom algokit.core.tasks.wallet import WALLET_ALIASING_MAX_LIMIT, WalletAliasingLimitError, add_alias, get_alias\n\nlogger = logging.getLogger(__name__)\n\n\ndef _validate_inputs(\n    keyword: str,\n    output: str,\n    alias: str | None,\n    output_file: Path | None,\n) -> None:\n    if not re.match(\"^[A-Z2-7]+$\", keyword):\n        raise click.ClickException(\"Invalid KEYWORD. Allowed: uppercase letters A-Z and numbers 2-7.\")\n    if output == \"alias\" and not alias:\n        raise click.ClickException(\n            \"Please provide an alias using the '--alias' option when the output is set to 'alias'.\"\n        )\n    if output == \"file\" and not output_file:\n        raise click.ClickException(\n            \"Please provide an output filename using the '--file-path' option when the output is set to 'file'.\"\n        )\n\n\ndef _store_vanity_to_alias(*, alias: str, vanity_account: VanityAccount, force: bool) -> None:\n    logger.info(f\"Adding {vanity_account.address} to wallet alias named {alias}\")\n    if get_alias(alias) and not force:\n        response = click.prompt(\n            f\"Alias '{alias}' already exists. Overwrite?\",\n            type=click.Choice([\"y\", \"n\"]),\n            default=\"n\",\n        )\n        if response == \"n\":\n            return\n\n    try:\n        add_alias(alias, vanity_account.address, vanity_account.private_key)\n    except WalletAliasingLimitError as ex:\n        raise click.ClickException(f\"Reached the max of {WALLET_ALIASING_MAX_LIMIT} aliases.\") from ex\n    except Exception as ex:\n        raise click.ClickException(\"Failed to add alias\") from ex\n    else:\n        click.echo(f\"Alias '{alias}' added successfully.\")\n\n\n@click.command(\n    name=\"vanity-address\",\n    help=\"\"\"Generate a vanity Algorand address. Your KEYWORD can only include letters A - Z and numbers 2 - 7.\n    Keeping your KEYWORD under 5 characters will usually result in faster generation.\n    Note: The longer the KEYWORD, the longer it may take to generate a matching address.\n    Please be patient if you choose a long keyword.\n    \"\"\",\n)\n@click.argument(\"keyword\")\n@click.option(\n    \"--match\",\n    \"-m\",\n    default=MatchType.START.value,\n    type=click.Choice([e.value for e in MatchType]),\n    help=\"Location where the keyword will be included. Default is start.\",\n)\n@click.option(\n    \"--output\",\n    \"-o\",\n    required=False,\n    default=\"stdout\",\n    type=click.Choice([\"stdout\", \"alias\", \"file\"]),\n    help=\"How the output will be presented.\",\n)\n@click.option(\n    \"--alias\",\n    \"-a\",\n    required=False,\n    default=None,\n    help='Alias for the address. Required if output is \"alias\".',\n    type=click.STRING,\n)\n@click.option(\n    \"--file-path\",\n    \"output_file_path\",\n    required=False,\n    default=None,\n    type=click.Path(dir_okay=False, file_okay=True, resolve_path=True, path_type=Path),\n    help='File path where to dump the output. Required if output is \"file\".',\n)\n@click.option(\n    \"--force\",\n    \"-f\",\n    is_flag=True,\n    required=False,\n    default=False,\n    type=click.BOOL,\n    help=\"Allow overwriting an aliases without confirmation, if output option is 'alias'.\",\n)\ndef vanity_address(  # noqa: PLR0913\n    *,\n    keyword: str,\n    match: MatchType,\n    output: str,\n    alias: str | None,\n    output_file_path: Path | None,\n    force: bool,\n) -> None:\n    if output_file_path and output != \"file\":\n        raise click.ClickException(\"File path can only be set when the output is set to 'file'.\")\n    if alias and output != \"alias\":\n        raise click.ClickException(\"Alias can only be set when the output is set to 'alias'.\")\n\n    match = MatchType(match)  # Force cast since click does not yet support enums as types\n    _validate_inputs(keyword, output, alias, output_file_path)\n\n    try:\n        vanity_account = generate_vanity_address(keyword, match)\n    except KeyboardInterrupt as ex:\n        click.echo(\"\\nAborting vanity address generation...\")\n        raise click.Abort from ex\n\n    if output == \"stdout\":\n        logger.warning(\n            \"WARNING: Your mnemonic is displayed on the console. \"\n            \"Ensure its security by keeping it confidential.\"\n            \"Consider clearing your terminal history after noting down the token.\\n\"\n        )\n        click.echo(vanity_account.__dict__)\n\n    elif output == \"alias\" and alias:\n        _store_vanity_to_alias(alias=alias, vanity_account=vanity_account, force=force)\n    elif output == \"file\" and output_file_path is not None:\n        with output_file_path.open(mode=\"w\", encoding=\"utf-8\") as f:\n            json.dump(vanity_account.__dict__, f, indent=4)\n            click.echo(f\"Output written to {output_file_path.absolute()}\")\n"
  },
  {
    "path": "src/algokit/cli/tasks/wallet.py",
    "content": "import json\nimport re\n\nimport click\nfrom algosdk import account\n\nfrom algokit.cli.tasks.utils import get_private_key_from_mnemonic, validate_address\nfrom algokit.core.tasks.wallet import (\n    WALLET_ALIASING_MAX_LIMIT,\n    WalletAliasingLimitError,\n    add_alias,\n    get_alias,\n    get_aliases,\n    remove_alias,\n)\n\n\ndef _validate_alias_name(alias_name: str) -> None:\n    pattern = r\"^[\\w-]{1,20}$\"\n    if not re.match(pattern, alias_name):\n        raise click.ClickException(\n            \"Invalid alias name. It should have at most 20 characters consisting of numbers, \"\n            \"letters, dashes, or underscores.\"\n        )\n\n\n@click.group()\ndef wallet() -> None:\n    \"\"\"Create short aliases for your addresses and accounts on AlgoKit CLI.\"\"\"\n\n\n@wallet.command(\"add\")\n@click.argument(\"alias_name\", type=click.STRING)\n@click.option(\"--address\", \"-a\", type=click.STRING, required=True, help=\"The address of the account.\")\n@click.option(\n    \"--mnemonic\",\n    \"-m\",\n    \"use_mnemonic\",\n    is_flag=True,\n    help=\"If specified then prompt the user for a mnemonic phrase interactively using masked input.\",\n)\n@click.option(\"--force\", \"-f\", is_flag=True, help=\"Allow overwriting an existing alias.\", type=click.BOOL)\ndef add(*, alias_name: str, address: str, use_mnemonic: bool, force: bool) -> None:\n    \"\"\"Add an address or account to be stored against a named alias (at most 50 aliases).\"\"\"\n\n    _validate_alias_name(alias_name)\n    validate_address(address)\n\n    private_key = get_private_key_from_mnemonic() if use_mnemonic else None\n\n    if use_mnemonic:\n        derived_address = account.address_from_private_key(private_key)  # type: ignore[no-untyped-call]\n        if derived_address != address:\n            click.echo(\n                \"Warning: Address from the mnemonic doesn't match the provided address. \"\n                \"It won't work unless the account has been rekeyed.\"\n            )\n\n    if get_alias(alias_name) and not force:\n        response = click.prompt(\n            f\"Alias '{alias_name}' already exists. Overwrite?\",\n            type=click.Choice([\"y\", \"n\"]),\n            default=\"n\",\n        )\n        if response == \"n\":\n            return\n\n    try:\n        add_alias(alias_name, address, private_key)\n    except WalletAliasingLimitError as ex:\n        raise click.ClickException(f\"Reached the max of {WALLET_ALIASING_MAX_LIMIT} aliases.\") from ex\n    except Exception as ex:\n        raise click.ClickException(\"Failed to add alias\") from ex\n    else:\n        click.echo(f\"Alias '{alias_name}' added successfully.\")\n\n\n@wallet.command(\"get\")\n@click.argument(\"alias\", type=click.STRING)\ndef get(alias: str) -> None:\n    \"\"\"Get an address or account stored against a named alias.\"\"\"\n    alias_data = get_alias(alias)\n\n    if not alias_data:\n        raise click.ClickException(f\"Alias `{alias}` does not exist.\")\n\n    click.echo(\n        f\"Address for alias `{alias}`: {alias_data.address}\"\n        f\"{' (🔐 includes private key)' if alias_data.private_key else ''}\"\n    )\n\n\n@wallet.command(\"list\")\ndef list_all() -> None:\n    \"\"\"List all addresses and accounts stored against a named alias.\"\"\"\n\n    aliases = get_aliases()\n\n    output = [\n        {\n            \"alias\": alias_data.alias,\n            \"address\": alias_data.address,\n            \"has_private_key\": bool(alias_data.private_key),\n        }\n        for alias_data in aliases\n    ]\n\n    content = (\n        json.dumps(output, indent=2)\n        if output\n        else \"You don't have any aliases stored yet. Create one using `algokit task wallet add`.\"\n    )\n\n    click.echo(content)\n\n\n@wallet.command(\"remove\")\n@click.argument(\"alias\", type=click.STRING)\n@click.option(\"--force\", \"-f\", is_flag=True, help=\"Allow removing an alias without confirmation.\")\ndef remove(*, alias: str, force: bool) -> None:\n    \"\"\"Remove an address or account stored against a named alias.\"\"\"\n\n    alias_data = get_alias(alias)\n\n    if not alias_data:\n        raise click.ClickException(f\"Alias `{alias}` does not exist.\")\n\n    if not force:\n        response = click.prompt(\n            f\"🚨 This is a destructive action that will remove the `{alias_data.alias}` alias. Are you sure?\",\n            type=click.Choice([\"y\", \"n\"]),\n            default=\"n\",\n        )\n\n        if response == \"n\":\n            return\n\n    remove_alias(alias)\n\n    click.echo(f\"Alias `{alias}` removed successfully.\")\n\n\n@wallet.command(\"reset\")\n@click.option(\"--force\", \"-f\", is_flag=True, help=\"Allow removing all aliases without confirmation.\")\ndef reset(*, force: bool) -> None:\n    \"\"\"Remove all aliases.\"\"\"\n\n    aliases = get_aliases()\n\n    if not aliases:\n        click.echo(\"Warning: No aliases available to reset.\")\n        return\n\n    if not force:\n        response = click.prompt(\n            \"🚨 This is a destructive action that will clear all aliases. Are you sure?\",\n            type=click.Choice([\"y\", \"n\"]),\n            default=\"n\",\n        )\n\n        if response == \"n\":\n            return\n\n    for alias_data in aliases:\n        try:\n            remove_alias(alias_data.alias)\n        except Exception as ex:\n            raise click.ClickException(f\"Failed to remove alias {alias_data.alias}\") from ex\n\n    click.echo(\"All aliases have been cleared.\")\n"
  },
  {
    "path": "src/algokit/cli/tui/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/tui/init/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/tui/init/example_selector.py",
    "content": "from pathlib import Path\nfrom typing import Any, ClassVar\n\nfrom textual.app import App\n\nfrom algokit.core.init import ALGOKIT_TEMPLATES_DIR, ALGOKIT_USER_DIR\n\nfrom .screens.example_selector_screen import ChooseExampleScreen\n\nexamples_config_path = str(\n    (Path.home() / ALGOKIT_USER_DIR / ALGOKIT_TEMPLATES_DIR / \"examples\" / \"examples.yml\").absolute()\n)\n\nUserAnswers = dict[str, Any]\n\n\nclass ExampleSelector(App):\n    SCREENS: ClassVar[dict] = {\n        \"example_selector\": lambda: ChooseExampleScreen(examples_config_path),\n    }\n    ENABLE_COMMAND_PALETTE = False\n    BINDINGS: ClassVar[list] = [\n        (\"b\", \"toggle_dark\", \"Toggle theme\"),\n        (\"q\", \"quit\", \"Quit\"),\n    ]\n    TITLE: str = \"AlgoKit Examples\"\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.user_answers: UserAnswers = {}\n\n    def on_mount(self) -> None:\n        # Disable the palette by not calling the default palette setup\n        self.push_screen(\"example_selector\")\n"
  },
  {
    "path": "src/algokit/cli/tui/init/screens/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/cli/tui/init/screens/example_selector_screen.py",
    "content": "from typing import TYPE_CHECKING\n\nfrom textual import on\nfrom textual.app import ComposeResult\nfrom textual.containers import Vertical\nfrom textual.screen import Screen\nfrom textual.widgets import Footer, Header, Label, ListItem, ListView\n\nfrom algokit.core.init import _load_algokit_examples\n\nif TYPE_CHECKING:\n    from algokit.cli.tui.init.example_selector import ExampleSelector\n\n\nclass ChooseExampleScreen(Screen):\n    app: \"ExampleSelector\"\n    \"\"\"A screen that displays available example templates based on chosen framework.\"\"\"\n\n    CSS = \"\"\"\n    .margin-bottom-1 {\n        margin-bottom: 1;\n    }\n    .examples-screen {\n        height: auto;\n        overflow-y: auto;\n        width: 100%;\n    }\n    \"\"\"\n    SUB_TITLE = \"Initialize Example\"\n\n    def __init__(self, examples_path: str) -> None:\n        super().__init__()\n        self.examples_path = examples_path\n        # Use the imported load_examples function\n        self.examples = _load_algokit_examples(examples_path)\n\n    def compose(self) -> ComposeResult:\n        yield Header(icon=\"📚\")\n\n        with Vertical(classes=\"examples-screen\") as examples_container:\n            self.examples_container = examples_container\n            yield Label(\"Choose an example:\", classes=\"margin-bottom-1\")\n            yield ListView(\n                *[\n                    ListItem(Label(f\"{example['name']} - {example['type']}\"), id=example[\"id\"])\n                    for example in self.examples\n                ],\n                id=\"input-example-choice\",\n            )\n\n        yield Footer()\n\n    @on(ListView.Selected, \"#input-example-choice\")\n    def handle_example_selection(self, event: ListView.Selected) -> None:\n        \"\"\"Handle the selection of an example using the keyboard.\"\"\"\n        selected_item = event.item.id\n        if selected_item:\n            self.app.user_answers[\"example_id\"] = selected_item\n            self.app.exit()\n"
  },
  {
    "path": "src/algokit/core/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/_toml.py",
    "content": "from __future__ import annotations\n\nimport importlib\nimport sys\nimport typing as t\n\n_TOML_MODULE = \"tomllib\" if sys.version_info >= (3, 11) else \"tomli\"\n_toml = importlib.import_module(_TOML_MODULE)\n\n\ndef loads(data: str) -> dict[str, t.Any]:\n    return t.cast(\"dict[str, t.Any]\", _toml.loads(data))\n"
  },
  {
    "path": "src/algokit/core/_vendor/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/_vendor/auth0/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/_vendor/auth0/authentication/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/_vendor/auth0/authentication/token_verifier.py",
    "content": "\"\"\"Token Verifier module\nCommit ref: https://github.com/auth0/auth0-python/commit/423f9b35faf0b673bfb2cd68d73717964080b5c9\nFile ref: https://github.com/auth0/auth0-python/blob/master/auth0/authentication/token_verifier.py\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport time\nfrom typing import TYPE_CHECKING, Any, ClassVar\n\nimport jwt\nimport requests  # type: ignore[import-untyped]\n\nif TYPE_CHECKING:\n    from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey\n\n\nclass TokenValidationError(Exception):\n    pass\n\n\nclass SignatureVerifier:\n    \"\"\"Abstract class that will verify a given JSON web token's signature\n    using the key fetched internally given its key id.\n\n    Args:\n        algorithm (str): The expected signing algorithm (e.g. RS256).\n    \"\"\"\n\n    DISABLE_JWT_CHECKS: ClassVar[dict[str, bool]] = {\n        \"verify_signature\": True,\n        \"verify_exp\": False,\n        \"verify_nbf\": False,\n        \"verify_iat\": False,\n        \"verify_aud\": False,\n        \"verify_iss\": False,\n        \"require_exp\": False,\n        \"require_iat\": False,\n        \"require_nbf\": False,\n    }\n\n    def __init__(self, algorithm: str) -> None:\n        if not algorithm or type(algorithm) != str:\n            raise ValueError(\"algorithm must be specified.\")\n        self._algorithm = algorithm\n\n    def _fetch_key(self, key_id: str) -> str | RSAPublicKey:\n        \"\"\"Obtains the key associated to the given key id.\n        Must be implemented by subclasses.\n\n        Args:\n            key_id (str): The id of the key to fetch.\n\n        Returns:\n            the key to use for verifying a cryptographic signature\n        \"\"\"\n        raise NotImplementedError\n\n    def _get_kid(self, token: str) -> str | None:\n        \"\"\"Gets the key id from the kid claim of the header of the token\n\n        Args:\n            token (str): The JWT to get the header from.\n\n        Raises:\n            TokenValidationError: if the token cannot be decoded, the algorithm is invalid\n            or the token's signature doesn't match the calculated one.\n\n        Returns:\n            the key id or None\n        \"\"\"\n        try:\n            header = jwt.get_unverified_header(token)\n        except jwt.exceptions.DecodeError:\n            raise TokenValidationError(\"token could not be decoded.\")\n\n        alg = header.get(\"alg\", None)\n        if alg != self._algorithm:\n            raise TokenValidationError(\n                f'Signature algorithm of \"{alg}\" is not supported. Expected the token '\n                f'to be signed with \"{self._algorithm}\"'\n            )\n\n        return header.get(\"kid\", None)\n\n    def _decode_jwt(self, token: str, secret_or_certificate: str) -> dict[str, Any]:\n        \"\"\"Verifies and decodes the given JSON web token with the given public key or shared secret.\n\n        Args:\n            token (str): The JWT to get its signature verified.\n            secret_or_certificate (str): The public key or shared secret.\n\n        Raises:\n            TokenValidationError: if the token cannot be decoded, the algorithm is invalid\n            or the token's signature doesn't match the calculated one.\n        \"\"\"\n        try:\n            decoded = jwt.decode(\n                jwt=token,\n                key=secret_or_certificate,\n                algorithms=[self._algorithm],\n                options=self.DISABLE_JWT_CHECKS,\n            )\n        except jwt.exceptions.InvalidSignatureError:\n            raise TokenValidationError(\"Invalid token signature.\")\n        return decoded\n\n    def verify_signature(self, token: str) -> dict[str, Any]:\n        \"\"\"Verifies the signature of the given JSON web token.\n\n        Args:\n            token (str): The JWT to get its signature verified.\n\n        Raises:\n            TokenValidationError: if the token cannot be decoded, the algorithm is invalid\n            or the token's signature doesn't match the calculated one.\n        \"\"\"\n        kid = self._get_kid(token)\n        if kid is None:\n            kid = \"\"\n        secret_or_certificate = self._fetch_key(key_id=kid)\n\n        return self._decode_jwt(token, secret_or_certificate)  # type: ignore[arg-type]\n\n\nclass SymmetricSignatureVerifier(SignatureVerifier):\n    \"\"\"Verifier for HMAC signatures, which rely on shared secrets.\n\n    Args:\n        shared_secret (str): The shared secret used to decode the token.\n        algorithm (str, optional): The expected signing algorithm. Defaults to \"HS256\".\n    \"\"\"\n\n    def __init__(self, shared_secret: str, algorithm: str = \"HS256\") -> None:\n        super().__init__(algorithm)\n        self._shared_secret = shared_secret\n\n    def _fetch_key(self, key_id: str = \"\") -> str:\n        return self._shared_secret\n\n\nclass JwksFetcher:\n    \"\"\"Class that fetches and holds a JSON web key set.\n    This class makes use of an in-memory cache. For it to work properly, define this instance once and re-use it.\n\n    Args:\n        jwks_url (str): The url where the JWK set is located.\n        cache_ttl (str, optional): The lifetime of the JWK set cache in seconds. Defaults to 600 seconds.\n    \"\"\"\n\n    CACHE_TTL: ClassVar[int] = 600  # 10 min cache lifetime\n\n    def __init__(self, jwks_url: str, cache_ttl: int = CACHE_TTL) -> None:\n        self._jwks_url = jwks_url\n        self._init_cache(cache_ttl)\n\n    def _init_cache(self, cache_ttl: int) -> None:\n        self._cache_value: dict[str, RSAPublicKey] = {}\n        self._cache_date = 0.0\n        self._cache_ttl = cache_ttl\n        self._cache_is_fresh = False\n\n    def _cache_expired(self) -> bool:\n        \"\"\"Checks if the cache is expired\n\n        Returns:\n            True if it should use the cache.\n        \"\"\"\n        return self._cache_date + self._cache_ttl < time.time()\n\n    def _cache_jwks(self, jwks: dict[str, Any]) -> None:\n        \"\"\"Cache the response of the JWKS request\n\n        Args:\n            jwks (dict): The JWKS\n        \"\"\"\n        self._cache_value = self._parse_jwks(jwks)\n        self._cache_is_fresh = True\n        self._cache_date = time.time()\n\n    def _fetch_jwks(self, force: bool = False) -> dict[str, RSAPublicKey]:\n        \"\"\"Attempts to obtain the JWK set from the cache, as long as it's still valid.\n        When not, it will perform a network request to the jwks_url to obtain a fresh result\n        and update the cache value with it.\n\n        Args:\n            force (bool, optional): whether to ignore the cache and force a network request or not. Defaults to False.\n        \"\"\"\n        if force or self._cache_expired():\n            self._cache_value = {}\n            response = requests.get(self._jwks_url)\n            if response.ok:\n                jwks: dict[str, Any] = response.json()\n                self._cache_jwks(jwks)\n            return self._cache_value\n\n        self._cache_is_fresh = False\n        return self._cache_value\n\n    @staticmethod\n    def _parse_jwks(jwks: dict[str, Any]) -> dict[str, RSAPublicKey]:\n        \"\"\"\n        Converts a JWK string representation into a binary certificate in PEM format.\n        \"\"\"\n        keys: dict[str, RSAPublicKey] = {}\n\n        for key in jwks[\"keys\"]:\n            # noinspection PyUnresolvedReferences\n            # requirement already includes cryptography -> pyjwt[crypto]\n            rsa_key: RSAPublicKey = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(key))\n            keys[key[\"kid\"]] = rsa_key\n        return keys\n\n    def get_key(self, key_id: str) -> RSAPublicKey:\n        \"\"\"Obtains the JWK associated with the given key id.\n\n        Args:\n            key_id (str): The id of the key to fetch.\n\n        Returns:\n            the JWK associated with the given key id.\n\n        Raises:\n            TokenValidationError: when a key with that id cannot be found\n        \"\"\"\n        keys = self._fetch_jwks()\n\n        if keys and key_id in keys:\n            return keys[key_id]\n\n        if not self._cache_is_fresh:\n            keys = self._fetch_jwks(force=True)\n            if keys and key_id in keys:\n                return keys[key_id]\n        raise TokenValidationError(f'RSA Public Key with ID \"{key_id}\" was not found.')\n\n\nclass AsymmetricSignatureVerifier(SignatureVerifier):\n    \"\"\"Verifier for RSA signatures, which rely on public key certificates.\n\n    Args:\n        jwks_url (str): The url where the JWK set is located.\n        algorithm (str, optional): The expected signing algorithm. Defaults to \"RS256\".\n        cache_ttl (int, optional): The lifetime of the JWK set cache in seconds. Defaults to 600 seconds.\n    \"\"\"\n\n    def __init__(\n        self,\n        jwks_url: str,\n        algorithm: str = \"RS256\",\n        cache_ttl: int = JwksFetcher.CACHE_TTL,\n    ) -> None:\n        super().__init__(algorithm)\n        self._fetcher = JwksFetcher(jwks_url, cache_ttl)\n\n    def _fetch_key(self, key_id: str) -> RSAPublicKey:\n        return self._fetcher.get_key(key_id)\n\n\nclass TokenVerifier:\n    \"\"\"Class that verifies ID tokens following the steps defined in the OpenID Connect spec.\n    An OpenID Connect ID token is not meant to be consumed until it's verified.\n\n    Args:\n        signature_verifier (SignatureVerifier): The instance that knows how to verify the signature.\n        issuer (str): The expected issuer claim value.\n        audience (str): The expected audience claim value.\n        leeway (int, optional): The clock skew to accept when verifying date related claims in seconds.\n        Defaults to 60 seconds.\n    \"\"\"\n\n    def __init__(\n        self,\n        signature_verifier: SignatureVerifier,\n        issuer: str,\n        audience: str,\n        leeway: int = 0,\n    ) -> None:\n        if not signature_verifier or not isinstance(signature_verifier, SignatureVerifier):\n            raise TypeError(\"signature_verifier must be an instance of SignatureVerifier.\")\n\n        self.iss = issuer\n        self.aud = audience\n        self.leeway = leeway\n        self._sv = signature_verifier\n        self._clock = None  # visible for testing\n\n    def verify(\n        self,\n        token: str,\n        nonce: str | None = None,\n        max_age: int | None = None,\n        organization: str | None = None,\n    ) -> dict[str, Any]:\n        \"\"\"Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.\n\n        Args:\n            token (str): The JWT to verify.\n            nonce (str, optional): The nonce value sent during authentication.\n            max_age (int, optional): The max_age value sent during authentication.\n            organization (str, optional): The expected organization ID (org_id) or organization name (org_name) claim value. This should be specified\n            when logging in to an organization.\n\n        Returns:\n            the decoded payload from the token\n\n        Raises:\n            TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,\n            the token signature is invalid or the token has a claim missing or with unexpected value.\n        \"\"\"\n\n        # Verify token presence\n        if not token or not isinstance(token, str):\n            raise TokenValidationError(\"ID token is required but missing.\")\n\n        # Verify algorithm and signature\n        payload = self._sv.verify_signature(token)\n\n        # Verify claims\n        self._verify_payload(payload, nonce, max_age, organization)\n\n        return payload\n\n    def _verify_payload(\n        self,\n        payload: dict[str, Any],\n        nonce: str | None = None,\n        max_age: int | None = None,\n        organization: str | None = None,\n    ) -> None:\n        # Issuer\n        if \"iss\" not in payload or not isinstance(payload[\"iss\"], str):\n            raise TokenValidationError(\"Issuer (iss) claim must be a string present in the ID token\")\n        if payload[\"iss\"] != self.iss:\n            raise TokenValidationError(\n                'Issuer (iss) claim mismatch in the ID token; expected \"{}\", ' 'found \"{}\"'.format(\n                    self.iss, payload[\"iss\"]\n                )\n            )\n\n        # Subject\n        if \"sub\" not in payload or not isinstance(payload[\"sub\"], str):\n            raise TokenValidationError(\"Subject (sub) claim must be a string present in the ID token\")\n\n        # Audience\n        if \"aud\" not in payload or not isinstance(payload[\"aud\"], (str, list)):\n            raise TokenValidationError(\n                \"Audience (aud) claim must be a string or array of strings present in\" \" the ID token\"\n            )\n\n        if isinstance(payload[\"aud\"], list) and self.aud not in payload[\"aud\"]:\n            payload_audiences = \", \".join(payload[\"aud\"])\n            raise TokenValidationError(\n                f'Audience (aud) claim mismatch in the ID token; expected \"{self.aud}\" but was '\n                f'not one of \"{payload_audiences}\"'\n            )\n        elif isinstance(payload[\"aud\"], str) and payload[\"aud\"] != self.aud:\n            raise TokenValidationError(\n                'Audience (aud) claim mismatch in the ID token; expected \"{}\" ' 'but found \"{}\"'.format(\n                    self.aud, payload[\"aud\"]\n                )\n            )\n\n        # --Time validation (epoch)--\n        now = self._clock or time.time()\n        leeway = self.leeway\n\n        # Expires at\n        if \"exp\" not in payload or not isinstance(payload[\"exp\"], int):\n            raise TokenValidationError(\"Expiration Time (exp) claim must be a number present in the ID token\")\n\n        exp_time = payload[\"exp\"] + leeway\n        if now > exp_time:\n            raise TokenValidationError(\n                f\"Expiration Time (exp) claim error in the ID token; current time ({now})\"\n                f\" is after expiration time ({exp_time})\"\n            )\n\n        # Issued at\n        if \"iat\" not in payload or not isinstance(payload[\"iat\"], int):\n            raise TokenValidationError(\"Issued At (iat) claim must be a number present in the ID token\")\n\n        # Nonce\n        if nonce:\n            if \"nonce\" not in payload or not isinstance(payload[\"nonce\"], str):\n                raise TokenValidationError(\"Nonce (nonce) claim must be a string present in the ID token\")\n            if payload[\"nonce\"] != nonce:\n                raise TokenValidationError(\n                    'Nonce (nonce) claim mismatch in the ID token; expected \"{}\", ' 'found \"{}\"'.format(\n                        nonce, payload[\"nonce\"]\n                    )\n                )\n\n        # Organization\n        if organization:\n            if organization.startswith(\"org_\"):\n                if \"org_id\" not in payload or not isinstance(payload[\"org_id\"], str):\n                    raise TokenValidationError(\n                        \"Organization (org_id) claim must be a string present in the ID\" \" token\"\n                    )\n                if payload[\"org_id\"] != organization:\n                    raise TokenValidationError(\n                        \"Organization (org_id) claim mismatch in the ID token; expected\" ' \"{}\", found \"{}\"'.format(\n                            organization, payload[\"org_id\"]\n                        )\n                    )\n            else:\n                if \"org_name\" not in payload or not isinstance(payload[\"org_name\"], str):\n                    raise TokenValidationError(\n                        \"Organization (org_name) claim must be a string present in the ID\" \" token\"\n                    )\n                if payload[\"org_name\"] != organization.lower():\n                    raise TokenValidationError(\n                        \"Organization (org_name) claim mismatch in the ID token; expected\" ' \"{}\", found \"{}\"'.format(\n                            organization, payload[\"org_name\"]\n                        )\n                    )\n\n        # Authorized party\n        if isinstance(payload[\"aud\"], list) and len(payload[\"aud\"]) > 1:\n            if \"azp\" not in payload or not isinstance(payload[\"azp\"], str):\n                raise TokenValidationError(\n                    \"Authorized Party (azp) claim must be a string present in the ID\"\n                    \" token when Audience (aud) claim has multiple values\"\n                )\n            if payload[\"azp\"] != self.aud:\n                raise TokenValidationError(\n                    \"Authorized Party (azp) claim mismatch in the ID token; expected\" ' \"{}\", found \"{}\"'.format(\n                        self.aud, payload[\"azp\"]\n                    )\n                )\n\n        # Authentication time\n        if max_age:\n            if \"auth_time\" not in payload or not isinstance(payload[\"auth_time\"], int):\n                raise TokenValidationError(\n                    \"Authentication Time (auth_time) claim must be a number present in\"\n                    \" the ID token when Max Age (max_age) is specified\"\n                )\n\n            auth_valid_until = payload[\"auth_time\"] + max_age + leeway\n            if now > auth_valid_until:\n                raise TokenValidationError(\n                    \"Authentication Time (auth_time) claim in the ID token indicates\"\n                    \" that too much time has passed since the last end-user\"\n                    f\" authentication. Current time ({now}) is after last auth at ({auth_valid_until})\"\n                )\n"
  },
  {
    "path": "src/algokit/core/atomic_write.py",
    "content": "import contextlib\nimport os\nimport shutil\nimport stat\nfrom pathlib import Path\nfrom typing import Literal\n\n\ndef atomic_write(file_contents: str, target_file_path: Path, mode: Literal[\"a\", \"w\"] = \"w\") -> None:\n    # if target path is a symlink, we want to use the real path as the replacement target,\n    # otherwise we'd just be overwriting the symlink\n    target_file_path = target_file_path.resolve()\n    temp_file_path = target_file_path.with_suffix(f\"{target_file_path.suffix}.algokit~\")\n    try:\n        # preserve file metadata if it already exists\n        with contextlib.suppress(FileNotFoundError):\n            _copy_with_metadata(target_file_path, temp_file_path)\n        # write content to new temp file\n        with temp_file_path.open(mode=mode, encoding=\"utf-8\") as fp:\n            fp.write(file_contents)\n        # overwrite destination with the temp file\n        temp_file_path.replace(target_file_path)\n    finally:\n        temp_file_path.unlink(missing_ok=True)\n\n\ndef _copy_with_metadata(source: Path, target: Path) -> None:\n    # copy content, stat-info (mode too), timestamps...\n    shutil.copy2(source, target)\n    # try copy owner+group if platform supports it\n    if hasattr(os, \"chown\"):\n        # copy owner and group\n        st = source.stat()\n        os.chown(target, st[stat.ST_UID], st[stat.ST_GID])\n"
  },
  {
    "path": "src/algokit/core/codespace.py",
    "content": "import json\nimport logging\nimport subprocess\nimport tempfile\nimport time\nfrom datetime import datetime\nfrom functools import cache\nfrom pathlib import Path\nfrom typing import Any\n\nimport click\nimport httpx\n\nfrom algokit.core import proc, questionary_extensions\nfrom algokit.core.utils import is_windows, run_with_animation\n\nlogger = logging.getLogger(__name__)\n\nGH_WEBI_INSTALLER_URL = \"https://webi.sh/gh\"\nCODESPACE_PORT_FORWARD_RETRY_SECONDS = 5\nCODESPACE_NAME_PREFIX = \"algokit-localnet\"\nCODESPACE_CREATE_TIMEOUT = 60\nCODESPACE_CREATE_RETRY_TIMEOUT = 10\nCODESPACE_CONTAINER_AVAILABLE = \"Available\"\nCODESPACE_TOO_MANY_ERROR_MSG = \"too many codespaces\"\nCODESPACE_LOADING_MSG = \"Provisioning a new codespace instance...\"\n\n# https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces\nCODESPACE_FORWARD_TIMEOUT_MIN = 1\nCODESPACE_FORWARD_TIMEOUT_MAX = 240\n\n\ndef _is_port_in_use(port: int) -> bool:\n    import socket\n\n    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n        return s.connect_ex((\"localhost\", port)) == 0\n\n\ndef _find_next_available_port(start_port: int, ignore_ports: list[int]) -> int:\n    port = start_port\n    while _is_port_in_use(port) or port in ignore_ports:\n        port += 1\n    return port\n\n\ndef _try_forward_ports_once(ports: list[tuple[int, int]], codespace_name: str, timeout: int) -> bool:\n    command = [\n        \"gh\",\n        \"codespace\",\n        \"ports\",\n        \"forward\",\n        \"--codespace\",\n        codespace_name,\n        *(f\"{external_port}:{internal_port}\" for internal_port, external_port in ports),\n    ]\n\n    try:\n        logger.info(\n            f\"NOTE: This codespace port-forwarding attempt will auto shut down at \"\n            f\"{datetime.fromtimestamp(time.time() + timeout).astimezone().strftime('%Y-%m-%d %H:%M:%S %Z')}.\"\n            \"  See https://docs.github.com/en/codespaces/overview#pricing for more details.\"\n        )\n        response = proc.run_interactive(command, timeout=timeout)\n        return response.exit_code == 0\n    except subprocess.TimeoutExpired as e:\n        logger.debug(f\"Timed out trying to forward ports for codespace {codespace_name} {e}\")\n        raise e\n    except Exception as e:\n        logger.error(f\"Port forwarding attempt failed with error: {e}\")\n        return False\n\n\ndef _write_temp_script(script_content: str, script_extension: str) -> Path:\n    \"\"\"\n    Writes the script content to a temporary file and returns the file path.\n    \"\"\"\n    with tempfile.NamedTemporaryFile(delete=False, suffix=f\".{script_extension}\", mode=\"w\") as tmp_file:\n        script_path = Path(tmp_file.name)\n        script_path.write_text(script_content)\n        script_path.chmod(0o755)\n    return script_path\n\n\ndef _run_powershell_script(script_path: Path) -> None:\n    \"\"\"\n    Runs the PowerShell script.\n    \"\"\"\n    _ensure_command_available(\n        [\"powershell\", \"-command\", \"(Get-Variable PSVersionTable -ValueOnly).PSVersion\"],\n        \"PowerShell is required but not found on this system. Refer to `https://aka.ms/install-powershell` \"\n        \"for details.\",\n    )\n    proc.run([\"powershell\", \"-File\", str(script_path)])\n\n\ndef _run_unix_script(script_path: Path) -> None:\n    \"\"\"\n    Runs the Unix shell script.\n    \"\"\"\n    shell = _find_available_shell()\n    proc.run([shell, str(script_path)])\n\n\ndef _ensure_command_available(command: list[str], error_message: str) -> None:\n    \"\"\"\n    Ensures that the specified command is available on the system.\n    \"\"\"\n    try:\n        proc.run(command)\n    except Exception as e:\n        raise RuntimeError(error_message) from e\n\n\ndef _find_available_shell() -> str:\n    \"\"\"\n    Finds an available shell (bash or zsh) on the system.\n    \"\"\"\n    try:\n        _ensure_command_available(\n            [\"bash\", \"--version\"],\n            \"Bash is required but not found on this system. Checking whether zsh is available...\",\n        )\n        return \"bash\"\n    except RuntimeError:\n        _ensure_command_available(\n            [\"zsh\", \"--version\"],\n            \"Neither Bash nor Zsh is found on this Linux system. \"\n            \"Please make sure to install one of them before running \"\n            \"`algokit localnet codespace`.\",\n        )\n\n        return \"zsh\"\n\n\ndef ensure_github_cli_installed() -> None:\n    \"\"\"\n    Ensures GitHub CLI (`gh`) is installed, installing it if necessary.\n    \"\"\"\n    try:\n        proc.run([\"gh\", \"--version\"])\n    except Exception as err:\n        logger.info(\"Installing gh...\")\n        try:\n            install_github_cli_via_webi()\n        except Exception as e:\n            logger.error(f\"Failed to automatically install gh cli: {e}\")\n            logger.error(\n                \"Please install `gh cli` manually by following official documentation at https://cli.github.com/\"\n            )\n            raise\n        logger.info(\"gh installed successfully!\")\n        logger.warning(\n            \"Restart your terminal to activate the `gh` CLI and re-run `algokit localnet codespace` to get started...\"\n        )\n        raise click.exceptions.Exit(code=0) from err\n\n\ndef install_github_cli_via_webi() -> None:\n    \"\"\"\n    Installs `gh` using the `webi.sh` script.\n    \"\"\"\n    response = httpx.get(f\"https://webi.{'ms' if is_windows() else 'sh'}/gh\")\n    response.raise_for_status()\n\n    script_extension = \"ps1\" if is_windows() else \"sh\"\n    script_path = _write_temp_script(response.text, script_extension)\n\n    if is_windows():\n        _run_powershell_script(script_path)\n    else:\n        _run_unix_script(script_path)\n\n\n@cache\ndef is_github_cli_authenticated() -> bool:\n    \"\"\"\n    Checks if the user is authenticated with GitHub CLI and has the 'codespace' scope.\n    \"\"\"\n    try:\n        result = proc.run([\"gh\", \"auth\", \"status\"])\n\n        # Normalize output for easier parsing\n        normalized_output = \" \".join(result.output.splitlines()).lower()\n\n        # Check for authentication and 'codespace' scope\n        authenticated = \"logged in\" in normalized_output\n        has_codespace_scope = \"codespace\" in normalized_output\n\n        if not authenticated:\n            logger.error(\"GitHub CLI authentication check failed. Please login with `gh auth login -s codespace`.\")\n        if not has_codespace_scope:\n            logger.error(\n                \"Required 'codespace' scope is missing. \"\n                \"Please ensure you have the 'codespace' scope by running \"\n                \"`gh auth refresh-token -s codespace`.\"\n            )\n\n        return authenticated and has_codespace_scope\n    except subprocess.CalledProcessError:\n        logger.error(\"GitHub CLI authentication check failed. Please login with `gh auth login -s codespace`.\")\n        return False\n\n\ndef authenticate_with_github() -> bool:\n    \"\"\"\n    Logs the user into GitHub Codespace.\n    \"\"\"\n    if is_github_cli_authenticated():\n        return True\n\n    result = proc.run_interactive(\n        [\"gh\", \"auth\", \"login\", \"-s\", \"codespace\"],\n    )\n    if result.exit_code != 0:\n        logger.error(\"Failed to start LocalNet in GitHub Codespace\")\n        return False\n    logger.info(\"Logged in to GitHub Codespace\")\n    return True\n\n\ndef list_github_codespaces() -> list[str]:\n    \"\"\"\n    Lists available GitHub Codespaces.\n    \"\"\"\n    if not is_github_cli_authenticated():\n        return []\n\n    result = proc.run([\"gh\", \"codespace\", \"list\"], pass_stdin=True)\n\n    if result.exit_code != 0:\n        logger.error(\"Failed to log in to GitHub Codespaces. Run with -v flag for more details.\")\n        logger.debug(result.output, result.exit_code)\n        return []\n\n    return [line.split(\"\\t\")[0] for line in result.output.splitlines()]\n\n\ndef forward_ports_for_codespace(  # noqa: PLR0913\n    codespace_name: str,\n    algod_port: int,\n    kmd_port: int,\n    indexer_port: int,\n    *,\n    max_retries: int = 3,\n    timeout: int = CODESPACE_FORWARD_TIMEOUT_MAX * 60,\n) -> None:\n    \"\"\"\n    Forwards specified ports for a GitHub Codespace with retries.\n    \"\"\"\n    ports = [\n        (algod_port, 4001),\n        (kmd_port, 4002),\n        (indexer_port, 8980),\n    ]\n\n    occupied_ports = [port for port in [algod_port, kmd_port, indexer_port] if _is_port_in_use(port)]\n\n    if occupied_ports:\n        logger.warning(f\"Ports {', '.join(map(str, occupied_ports))} are already in use!\")\n        if questionary_extensions.prompt_confirm(\"Retry on next available ports?\", default=True):\n            logger.warning(\n                \"NOTE: Ensure to update the port numbers in your Algorand related configuration files (if any).\"\n            )\n            next_algod_port = _find_next_available_port(algod_port, occupied_ports)\n            next_kmd_port = _find_next_available_port(kmd_port, [next_algod_port, *occupied_ports])\n            next_indexer_port = _find_next_available_port(\n                indexer_port, [next_algod_port, next_kmd_port, *occupied_ports]\n            )\n            logger.info(\n                f\"Retrying with ports {next_algod_port} (was {algod_port}), \"\n                f\"{next_kmd_port} (was {kmd_port}), {next_indexer_port} (was {indexer_port})\"\n            )\n            return forward_ports_for_codespace(\n                codespace_name,\n                next_algod_port if algod_port in occupied_ports else algod_port,\n                next_kmd_port if kmd_port in occupied_ports else kmd_port,\n                next_indexer_port if indexer_port in occupied_ports else indexer_port,\n                max_retries=max_retries,\n                timeout=timeout,\n            )\n        return None\n\n    initial_timestamp = time.time()\n    for attempt in reversed(range(1, max_retries + 1)):\n        new_timeout = timeout - (time.time() - initial_timestamp)\n        if new_timeout < 0:\n            raise subprocess.TimeoutExpired(cmd=\"gh codespace ports forward\", timeout=timeout)\n        if _try_forward_ports_once(ports, codespace_name, int(new_timeout)):\n            logger.info(\"Port forwarding successful.\")\n            break\n        logger.error(\"Port forwarding failed!\")\n        if attempt > 1:\n            run_with_animation(\n                time.sleep, f\"Retrying ({attempt - 1} attempts left)...\", CODESPACE_PORT_FORWARD_RETRY_SECONDS\n            )\n    else:\n        raise Exception(\n            \"Port forwarding failed! Make sure you are not already running a localnet container on those ports.\"\n        )\n\n\ndef delete_codespaces_with_prefix(codespaces: list[str], default_name: str) -> None:\n    \"\"\"\n    Deletes GitHub Codespaces that start with the specified default name.\n\n    Args:\n        codespaces (list[str]): List of codespace names.\n        default_name (str): The prefix to match for deletion.\n    \"\"\"\n    for codespace in filter(lambda cs: cs.startswith(default_name), codespaces):\n        proc.run([\"gh\", \"codespace\", \"delete\", \"--codespace\", codespace, \"--force\"], pass_stdin=True)\n        logger.info(f\"Deleted unused codespace {codespace}\")\n\n\ndef is_codespace_ready(codespace_name: str) -> dict[str, Any]:\n    \"\"\"\n    Checks if the specified codespace is ready.\n\n    Args:\n        codespace_name (str): The name of the codespace to check.\n\n    Returns:\n        dict[str, Any] | None: The codespace data if ready, None otherwise.\n    \"\"\"\n    max_retries = 10\n    while max_retries > 0:\n        max_retries -= 1\n\n        status_result = proc.run(\n            [\"gh\", \"codespace\", \"list\", \"--json\", \"displayName\", \"--json\", \"state\", \"--json\", \"name\"],\n            pass_stdin=True,\n        )\n        try:\n            codespace_data: dict[str, Any] = next(\n                data for data in json.loads(status_result.output.strip()) if data[\"displayName\"] == codespace_name\n            )\n        except StopIteration:\n            run_with_animation(\n                time.sleep,\n                CODESPACE_LOADING_MSG,\n                CODESPACE_CREATE_RETRY_TIMEOUT,\n            )\n            continue\n\n        if status_result.exit_code == 0 and codespace_data and codespace_data[\"state\"] == CODESPACE_CONTAINER_AVAILABLE:\n            return codespace_data\n    raise RuntimeError(\n        \"After 10 attempts, codespace isn't ready. Avoid codespace deletion and retry with --codespace-name.\"\n    )\n\n\ndef delete_codespace(*, codespace_data: dict[str, Any], force: bool) -> None:\n    \"\"\"\n    Deletes the specified codespace.\n\n    Args:\n        codespace_data (dict[str, Any]): The codespace data.\n        force (bool): Whether to force deletion without confirmation.\n    \"\"\"\n    if codespace_data and (force or questionary_extensions.prompt_confirm(\"Delete the codespace?\", default=True)):\n        logger.warning(f\"Deleting the `{codespace_data['name']}` codespace...\")\n        proc.run(\n            [\"gh\", \"codespace\", \"delete\", \"--codespace\", codespace_data[\"name\"], \"--force\"],\n            pass_stdin=True,\n        )\n\n\ndef create_codespace(repo_url: str, codespace_name: str, machine: str, timeout: int) -> None:\n    \"\"\"\n    Creates a GitHub Codespace with the specified repository, display name, and machine type.\n\n    Args:\n        repo_url (str): The URL of the repository for the codespace.\n        codespace_name (str): The display name for the codespace.\n        machine (str): The machine type for the codespace.\n    \"\"\"\n    response = proc.run(\n        [\n            \"gh\",\n            \"codespace\",\n            \"create\",\n            \"--repo\",\n            repo_url,\n            \"--display-name\",\n            codespace_name,\n            \"--machine\",\n            machine,\n            \"--idle-timeout\",\n            f\"{timeout}m\",\n        ],\n        pass_stdin=True,\n    )\n    if response.exit_code != 0 and CODESPACE_TOO_MANY_ERROR_MSG in response.output.lower():\n        raise Exception(\n            \"Creation failed: User's codespace limit reached. Delete unused codespaces using `gh` cli and try again.\"\n        )\n\n    run_with_animation(\n        time.sleep,\n        CODESPACE_LOADING_MSG,\n        CODESPACE_CREATE_TIMEOUT,\n    )\n"
  },
  {
    "path": "src/algokit/core/compilers/python.py",
    "content": "from collections.abc import Iterator\n\nfrom algokit.core.proc import run\nfrom algokit.core.utils import extract_version_triple, find_valid_pipx_command\n\n\ndef find_valid_puyapy_command(version: str | None) -> list[str]:\n    return _find_puyapy_command_at_version(version) if version is not None else _find_puyapy_command()\n\n\ndef _find_puyapy_command_at_version(version: str) -> list[str]:\n    \"\"\"\n    Find puyapy command with a specific version.\n    If the puyapy version isn't installed, install it with pipx run.\n    \"\"\"\n    for puyapy_command in _get_candidate_puyapy_commands():\n        try:\n            puyapy_version_result = run([*puyapy_command, \"--version\"])\n        except OSError:\n            pass  # in case of path/permission issues, go to next candidate\n        else:\n            if puyapy_version_result.exit_code == 0 and (\n                extract_version_triple(version) == extract_version_triple(puyapy_version_result.output)\n            ):\n                return puyapy_command\n\n    pipx_command = find_valid_pipx_command(\n        \"Unable to find pipx install so that the `PuyaPy` compiler can be run; \"\n        \"please install pipx via https://pypa.github.io/pipx/ \"\n        \"and then try `algokit compile python ...` again.\"\n    )\n\n    return [\n        *pipx_command,\n        \"run\",\n        f\"--spec=puyapy=={version}\",\n        \"puyapy\",\n    ]\n\n\ndef _find_puyapy_command() -> list[str]:\n    \"\"\"\n    Find puyapy command.\n    If puyapy isn't installed, install the latest version with pipx.\n    \"\"\"\n    for puyapy_command in _get_candidate_puyapy_commands():\n        try:\n            puyapy_help_result = run([*puyapy_command, \"--version\"])\n        except OSError:\n            pass  # in case of path/permission issues, go to next candidate\n        else:\n            if puyapy_help_result.exit_code == 0:\n                return puyapy_command\n\n    pipx_command = find_valid_pipx_command(\n        \"Unable to find pipx install so that the `PuyaPy` compiler can be run; \"\n        \"please install pipx via https://pypa.github.io/pipx/ \"\n        \"and then try `algokit compile python ...` again.\"\n    )\n    return [\n        *pipx_command,\n        \"run\",\n        \"--spec=puyapy\",\n        \"puyapy\",\n    ]\n\n\ndef _get_candidate_puyapy_commands() -> Iterator[list[str]]:\n    # when puyapy is installed at the project level\n    yield [\"poetry\", \"run\", \"puyapy\"]\n    # when puyapy is installed at the global level\n    yield [\"puyapy\"]\n"
  },
  {
    "path": "src/algokit/core/compilers/typescript.py",
    "content": "from algokit.core.proc import run\nfrom algokit.core.utils import extract_semantic_version, get_npm_command\n\nPUYATS_NPM_PACKAGE = \"@algorandfoundation/puya-ts\"\n\n\ndef find_valid_puyats_command(version: str | None) -> list[str]:\n    return _find_puyats_command(version)\n\n\ndef _find_project_puyats_command(\n    npm_command: list[str], npx_command: list[str], version: str | None\n) -> list[str] | None:\n    \"\"\"\n    Try to find PuyaTs command installed at the project level.\n    \"\"\"\n    try:\n        result = run([*npm_command, \"ls\", \"--no-unicode\"])\n        # Normally we would check the exit code, however `npm ls` may return a non zero exit code\n        # when certain dependencies are not met. We still want to continue processing.\n        if result.output != \"\":\n            compile_command = [*npx_command, PUYATS_NPM_PACKAGE]\n            for line in result.output.splitlines():\n                if PUYATS_NPM_PACKAGE in line:\n                    if \"UNMET DEPENDENCY\" in line:\n                        raise ModuleNotFoundError(\n                            f\"{PUYATS_NPM_PACKAGE} was detected in the project, but is not installed.\"\n                        )\n                    if version is not None:\n                        installed_version = extract_semantic_version(line)\n                        if version == installed_version:\n                            return compile_command\n                    else:\n                        return compile_command\n    except OSError:\n        pass\n    except ValueError:\n        pass\n\n    return None\n\n\ndef _find_global_puyats_command(\n    npm_command: list[str], npx_command: list[str], version: str | None\n) -> list[str] | None:\n    \"\"\"\n    Try to find PuyaTs command installed globally.\n    \"\"\"\n    return _find_project_puyats_command([*npm_command, \"--global\"], npx_command, version)\n\n\ndef _find_puyats_command(version: str | None) -> list[str]:\n    \"\"\"\n    Find puyats command.\n    First checks if a matching version is installed at the project level, then uses it.\n    Then checks if a matching version is installed at the global level, then uses it.\n    Otherwise, runs the matching version via npx.\n    \"\"\"\n    npm_command = get_npm_command(\n        f\"Unable to find npm install so that the `{PUYATS_NPM_PACKAGE}` can be run; \"\n        \"please install npm via https://docs.npmjs.com/downloading-and-installing-node-js-and-npm \"\n        \"and then try `algokit compile typescript ...` again.\",\n    )\n    npx_command = get_npm_command(\n        f\"Unable to find npx so that the `{PUYATS_NPM_PACKAGE}` compiler can be run; \"\n        \"please make sure `npx` is installed and try `algokit compile typescript ...` again.\"\n        \"`npx` is automatically installed with `node` starting with version 8.2.0 and above.\",\n        is_npx=True,\n    )\n\n    # Try to find at project level first\n    project_result = _find_project_puyats_command(npm_command, npx_command, version)\n    if project_result is not None:\n        try:\n            puyats_version_result = run([*project_result, \"--version\"])\n            if puyats_version_result.exit_code == 0:\n                return [*project_result]\n        except OSError:\n            pass  # In case of path/permission issues, continue to the next candidate\n\n    # Try to find at global level\n    global_result = _find_global_puyats_command(npm_command, npx_command, version)\n    if global_result is not None:\n        try:\n            puyats_version_result = run([*global_result, \"--version\"])\n            if puyats_version_result.exit_code == 0:\n                return [*global_result]\n        except OSError:\n            pass  # In case of path/permission issues, fall back to npx\n\n    # When not installed or available, run via npx\n    return [*npx_command, \"-y\", f\"{PUYATS_NPM_PACKAGE}{'@' + version if version is not None else ''}\"]\n"
  },
  {
    "path": "src/algokit/core/conf.py",
    "content": "import logging\nimport os\nimport platform\nimport typing as t\nfrom importlib import metadata\nfrom pathlib import Path\n\nfrom algokit.core._toml import loads as toml_loads\n\nPACKAGE_NAME = \"algokit\"\nALGOKIT_CONFIG = \".algokit.toml\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_app_config_dir() -> Path:\n    \"\"\"Get the application config files location - things that should persist, and potentially follow a user\"\"\"\n    os_type = platform.system().lower()\n    if os_type == \"windows\":\n        config_dir = os.getenv(\"APPDATA\")\n    else:\n        config_dir = os.getenv(\"XDG_CONFIG_HOME\")\n    if config_dir is None:\n        config_dir = \"~/.config\"\n    return _get_relative_app_path(config_dir)\n\n\ndef get_app_state_dir() -> Path:\n    \"\"\"Get the application state files location - things the user wouldn't normally interact with directly\"\"\"\n    os_type = platform.system().lower()\n    if os_type == \"windows\":\n        state_dir = os.getenv(\"LOCALAPPDATA\")\n    elif os_type == \"darwin\":\n        state_dir = \"~/Library/Application Support\"\n    else:\n        state_dir = os.getenv(\"XDG_STATE_HOME\")\n    if state_dir is None:\n        state_dir = \"~/.local/state\"\n    return _get_relative_app_path(state_dir)\n\n\ndef _get_relative_app_path(base_dir: str) -> Path:\n    path = Path(base_dir).expanduser()\n    result = path / PACKAGE_NAME\n    result.mkdir(parents=True, exist_ok=True)\n    # resolve path in case of UWP sandbox redirection\n    return result.resolve()\n\n\ndef get_current_package_version() -> str:\n    return metadata.version(PACKAGE_NAME)\n\n\ndef get_algokit_config(*, project_dir: Path | None = None, verbose_validation: bool = False) -> dict[str, t.Any] | None:\n    \"\"\"\n    Load and parse a TOML configuration file. Will never throw.\n    :param project_dir: Project directory path.\n    :param verbose_validation: Whether to warn user if toml validation failed.\n    :return: A dictionary containing the configuration or None if not found.\n    \"\"\"\n    project_dir = project_dir or Path.cwd()\n    config_path = project_dir / ALGOKIT_CONFIG\n    logger.debug(f\"Attempting to load project config from {config_path}\")\n    try:\n        config_text = config_path.read_text(\"utf-8\")\n    except FileNotFoundError:\n        logger.debug(f\"No {ALGOKIT_CONFIG} file found in the project directory.\")\n        return None\n    except Exception as ex:\n        logger.debug(f\"Unexpected error reading {ALGOKIT_CONFIG} file: {ex}\", exc_info=True)\n        return None\n    try:\n        return toml_loads(config_text)\n    except Exception as ex:\n        if verbose_validation:\n            logger.warning(f\"{ALGOKIT_CONFIG} file at {project_dir} is not valid toml! Skipping...\", exc_info=True)\n        else:\n            logger.debug(f\"Error parsing {ALGOKIT_CONFIG} file: {ex}\", exc_info=True)\n        return None\n"
  },
  {
    "path": "src/algokit/core/config_commands/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/config_commands/container_engine.py",
    "content": "import enum\nimport logging\n\nimport click\nimport questionary\n\nfrom algokit.core.conf import get_app_config_dir\n\nlogger = logging.getLogger(__name__)\n\nCONTAINER_ENGINE_CONFIG_FILE = get_app_config_dir() / \"active-container-engine\"\n\n\nclass ContainerEngine(str, enum.Enum):\n    DOCKER = \"docker\"\n    PODMAN = \"podman\"\n\n    def __str__(self) -> str:\n        return self.value\n\n\ndef get_container_engine() -> str:\n    if CONTAINER_ENGINE_CONFIG_FILE.exists():\n        return CONTAINER_ENGINE_CONFIG_FILE.read_text().strip()\n    return str(ContainerEngine.DOCKER)\n\n\ndef save_container_engine(engine: str) -> None:\n    if engine not in ContainerEngine:\n        raise ValueError(f\"Invalid container engine: {engine}\")\n    CONTAINER_ENGINE_CONFIG_FILE.write_text(engine)\n\n\n@click.command(\"container-engine\", short_help=\"Configure the container engine for AlgoKit LocalNet.\")\n@click.argument(\"engine\", required=False, type=click.Choice([\"docker\", \"podman\"]))\n@click.option(\n    \"--force\",\n    \"-f\",\n    is_flag=True,\n    required=False,\n    default=False,\n    type=click.BOOL,\n    help=(\"Skip confirmation prompts. Defaults to 'yes' to all prompts.\"),\n)\ndef container_engine_configuration_command(*, engine: str | None, force: bool) -> None:\n    \"\"\"Set the default container engine for use by AlgoKit CLI to run LocalNet images.\"\"\"\n    from algokit.core.sandbox import ComposeSandbox\n\n    if engine is None:\n        current_engine = get_container_engine()\n        choices = [\n            f\"Docker {'(Active)' if current_engine == ContainerEngine.DOCKER else ''}\".strip(),\n            f\"Podman {'(Active)' if current_engine == ContainerEngine.PODMAN else ''}\".strip(),\n        ]\n        engine = questionary.select(\"Which container engine do you prefer?\", choices=choices).ask()\n        if engine is None:\n            raise click.ClickException(\"No valid container engine selected. Aborting...\")\n        engine = engine.split()[0].lower()\n\n    sandbox = ComposeSandbox.from_environment()\n    has_active_instance = sandbox is not None and (\n        force\n        or click.confirm(\n            f\"Detected active localnet instance, would you like to restart it with '{engine}'?\",\n            default=True,\n        )\n    )\n    if sandbox and has_active_instance:\n        sandbox.down()\n        save_container_engine(engine)\n        sandbox.write_compose_file()\n        sandbox.up()\n    else:\n        save_container_engine(engine)\n\n    logger.info(f\"Container engine set to `{engine}`\")\n"
  },
  {
    "path": "src/algokit/core/config_commands/js_package_manager.py",
    "content": "import enum\nimport logging\nfrom pathlib import Path\n\nimport click\nimport questionary\n\nfrom algokit.core.conf import get_app_config_dir\n\nlogger = logging.getLogger(__name__)\n\n\nclass JSPackageManager(str, enum.Enum):\n    NPM = \"npm\"\n    PNPM = \"pnpm\"\n\n    def __str__(self) -> str:\n        return self.value\n\n\ndef _get_js_config_file() -> Path:\n    return get_app_config_dir() / \"default-js-package-manager\"\n\n\ndef get_js_package_manager() -> str | None:\n    \"\"\"Get the default JavaScript package manager for use by AlgoKit CLI.\n    None implies it has not been set yet, likely to be first time user.\n    \"\"\"\n\n    config_file = _get_js_config_file()\n    if config_file.exists():\n        return config_file.read_text().strip()\n    return None\n\n\ndef save_js_package_manager(manager: str) -> None:\n    if manager not in JSPackageManager:\n        raise ValueError(f\"Invalid JavaScript package manager: {manager}\")\n    config_file = _get_js_config_file()\n    config_file.write_text(manager)\n\n\n@click.command(\"js-package-manager\", short_help=\"Configure the default JavaScript package manager for AlgoKit.\")\n@click.argument(\"package_manager\", required=False, type=click.Choice([JSPackageManager.NPM, JSPackageManager.PNPM]))\ndef js_package_manager_configuration_command(*, package_manager: str | None) -> None:\n    \"\"\"Set the default JavaScript package manager for use by AlgoKit CLI.\"\"\"\n\n    if package_manager is None:\n        current_manager = get_js_package_manager() or JSPackageManager.NPM\n        choices = [\n            f\"npm {'(active)' if current_manager == JSPackageManager.NPM else ''}\".strip(),\n            f\"pnpm {'(active)' if current_manager == JSPackageManager.PNPM else ''}\".strip(),\n        ]\n        manager = questionary.select(\n            \"Which JavaScript package manager would you prefer `bootstrap` command to use by default?\", choices=choices\n        ).ask()\n        if manager is None:\n            raise click.ClickException(\"No valid JavaScript package manager selected. Aborting...\")\n        package_manager = manager.split()[0].lower()\n\n    save_js_package_manager(package_manager)\n    logger.info(f\"JavaScript package manager set to `{package_manager}`\")\n"
  },
  {
    "path": "src/algokit/core/config_commands/py_package_manager.py",
    "content": "import enum\nimport logging\nfrom pathlib import Path\n\nimport click\nimport questionary\n\nfrom algokit.core.conf import get_app_config_dir\n\nlogger = logging.getLogger(__name__)\n\n\nclass PyPackageManager(str, enum.Enum):\n    POETRY = \"poetry\"\n    UV = \"uv\"\n\n    def __str__(self) -> str:\n        return self.value\n\n\ndef _get_py_config_file() -> Path:\n    return get_app_config_dir() / \"default-py-package-manager\"\n\n\ndef get_py_package_manager() -> str | None:\n    \"\"\"Get the default Python package manager for use by AlgoKit CLI.\n    None implies it has not been set yet, likely to be first time user.\n    \"\"\"\n\n    config_file = _get_py_config_file()\n    if config_file.exists():\n        return config_file.read_text().strip()\n    return None\n\n\ndef save_py_package_manager(manager: str) -> None:\n    if manager not in PyPackageManager:\n        raise ValueError(f\"Invalid Python package manager: {manager}\")\n    config_file = _get_py_config_file()\n    config_file.write_text(manager)\n\n\n@click.command(\"py-package-manager\", short_help=\"Configure the default Python package manager for AlgoKit.\")\n@click.argument(\"package_manager\", required=False, type=click.Choice([PyPackageManager.POETRY, PyPackageManager.UV]))\ndef py_package_manager_configuration_command(*, package_manager: str | None) -> None:\n    \"\"\"Set the default Python package manager for use by AlgoKit CLI.\"\"\"\n\n    if package_manager is None:\n        current_manager = get_py_package_manager() or PyPackageManager.POETRY\n        choices = [\n            f\"poetry {'(active)' if current_manager == PyPackageManager.POETRY else ''}\".strip(),\n            f\"uv {'(active)' if current_manager == PyPackageManager.UV else ''}\".strip(),\n        ]\n        manager = questionary.select(\n            \"Which Python package manager would you prefer `bootstrap` command to use by default?\", choices=choices\n        ).ask()\n        if manager is None:\n            raise click.ClickException(\"No valid Python package manager selected. Aborting...\")\n        package_manager = manager.split()[0].lower()\n\n    save_py_package_manager(package_manager)\n    logger.info(f\"Python package manager set to `{package_manager}`\")\n"
  },
  {
    "path": "src/algokit/core/config_commands/version_prompt.py",
    "content": "import importlib.resources as importlib_resources\nimport logging\nimport re\nfrom datetime import timedelta\nfrom time import time\n\nimport click\nimport httpx\n\nfrom algokit import __name__ as algokit_name\nfrom algokit.core.conf import get_app_config_dir, get_app_state_dir, get_current_package_version\nfrom algokit.core.utils import is_binary_mode\n\nlogger = logging.getLogger(__name__)\n\nLATEST_URL = \"https://api.github.com/repos/algorandfoundation/algokit-cli/releases/latest\"\nVERSION_CHECK_INTERVAL = timedelta(weeks=1).total_seconds()\nDISABLE_CHECK_MARKER = \"disable-version-prompt\"\nDISTRIBUTION_METHOD_UPDATE_COMMAND = {\n    \"snap\": \"`snap refresh algokit`\",\n    \"winget\": \"`winget upgrade algokit`\",\n    \"brew\": \"`brew upgrade algokit`\",\n}\nUNKNOWN_DISTRIBUTION_METHOD_UPDATE_INSTRUCTION = \"the tool used to install AlgoKit\"\n# TODO: Set this version as part of releasing the binary distributions.\nBINARY_DISTRIBUTION_RELEASE_VERSION = \"99.99.99\"\n\n\ndef do_version_prompt() -> None:\n    if _skip_version_prompt():\n        logger.debug(\"Version prompt disabled\")\n        return\n\n    current_version = get_current_package_version()\n    latest_version = get_latest_version_or_cached()\n    if latest_version is None:\n        logger.debug(\"Could not determine latest version\")\n        return\n\n    current_version_sequence = _get_version_sequence(current_version)\n    if current_version_sequence < _get_version_sequence(latest_version):\n        update_instruction = UNKNOWN_DISTRIBUTION_METHOD_UPDATE_INSTRUCTION\n        if is_binary_mode():\n            distribution = _get_distribution_method()\n            update_instruction = (\n                DISTRIBUTION_METHOD_UPDATE_COMMAND.get(distribution, UNKNOWN_DISTRIBUTION_METHOD_UPDATE_INSTRUCTION)\n                if distribution\n                else UNKNOWN_DISTRIBUTION_METHOD_UPDATE_INSTRUCTION\n            )\n        # If you're not using the binary mode, then you've used pipx to install AlgoKit.\n        # One exception is that older versions of the brew package used pipx,\n        # however require updating via brew, so we show the default update instruction instead.\n        elif current_version_sequence >= _get_version_sequence(BINARY_DISTRIBUTION_RELEASE_VERSION):\n            update_instruction = \"`pipx upgrade algokit`\"\n\n        logger.info(\n            f\"You are using AlgoKit version {current_version}, however version {latest_version} is available. \"\n            f\"Please update using {update_instruction}.\"\n        )\n    else:\n        logger.debug(\"Current version is up to date\")\n\n\ndef _get_version_sequence(version: str) -> list[int | str]:\n    match = re.match(r\"(\\d+)\\.(\\d+)\\.(\\d+)(.*)\", version)\n    if match:\n        return [int(x) for x in match.groups()[:3]] + [match.group(4)]\n    return [version]\n\n\ndef get_latest_version_or_cached() -> str | None:\n    version_check_path = get_app_state_dir() / \"last-version-check\"\n\n    try:\n        last_checked = version_check_path.stat().st_mtime\n        version = version_check_path.read_text(encoding=\"utf-8\")\n    except OSError:\n        logger.debug(f\"{version_check_path} inaccessible\")\n        last_checked = 0\n        version = None\n    else:\n        logger.debug(f\"{version} found in cache {version_check_path}\")\n\n    if (time() - last_checked) > VERSION_CHECK_INTERVAL:\n        try:\n            version = get_latest_github_version()\n        except Exception as ex:\n            logger.debug(\"Checking for latest version failed\", exc_info=ex)\n            # update last checked time even if check failed\n            version_check_path.touch()\n        else:\n            version_check_path.write_text(version, encoding=\"utf-8\")\n    # handle case where the first check failed, so we have an empty file\n    return version or None\n\n\ndef get_latest_github_version() -> str:\n    headers = {\"ACCEPT\": \"application/vnd.github+json\", \"X-GitHub-Api-Version\": \"2022-11-28\"}\n\n    response = httpx.get(LATEST_URL, headers=headers)\n    response.raise_for_status()\n\n    json = response.json()\n    tag_name = json[\"tag_name\"]\n    logger.debug(f\"Latest version tag: {tag_name}\")\n    match = re.match(r\"v(\\d+\\.\\d+\\.\\d+)\", tag_name)\n    if not match:\n        raise ValueError(f\"Unable to extract version from tag_name: {tag_name}\")\n    return match.group(1)\n\n\ndef _skip_version_prompt() -> bool:\n    disable_marker = get_app_config_dir() / DISABLE_CHECK_MARKER\n    return disable_marker.exists()\n\n\ndef _get_distribution_method() -> str | None:\n    file_path = importlib_resources.files(algokit_name) / \"resources\" / \"distribution-method\"\n    with file_path.open(\"r\", encoding=\"utf-8\", errors=\"strict\") as file:\n        content = file.read().strip()\n\n        if content in [\"snap\", \"winget\", \"brew\"]:\n            return content\n        else:\n            return None\n\n\nskip_version_check_option = click.option(\n    \"--skip-version-check\",\n    is_flag=True,\n    show_default=False,\n    default=False,\n    help=\"Skip version checking and prompting.\",\n)\n\n\n@click.command(\n    \"version-prompt\", short_help=\"Enables or disables checking and prompting if a new version of AlgoKit is available\"\n)\n@click.argument(\"enable\", required=False, type=click.Choice([\"enable\", \"disable\"]), default=None)\ndef version_prompt_configuration_command(*, enable: str | None) -> None:\n    \"\"\"Controls whether AlgoKit checks and prompts for new versions.\n    Set to [disable] to prevent AlgoKit performing this check permanently, or [enable] to resume checking.\n    If no argument is provided then outputs current setting.\n\n    Also see --skip-version-check which can be used to disable check for a single command.\"\"\"\n    if enable is None:\n        logger.info(\"disable\" if _skip_version_prompt() else \"enable\")\n    else:\n        disable_marker = get_app_config_dir() / DISABLE_CHECK_MARKER\n        if enable == \"enable\":\n            disable_marker.unlink(missing_ok=True)\n            logger.info(\"📡 Resuming check for new versions\")\n        else:\n            disable_marker.touch()\n            logger.info(\"🚫 Will stop checking for new versions\")\n"
  },
  {
    "path": "src/algokit/core/dispenser.py",
    "content": "import base64\nimport contextlib\nimport logging\nimport os\nimport time\nfrom dataclasses import dataclass\nfrom datetime import datetime, timezone\nfrom enum import Enum\nfrom typing import Any, ClassVar\n\nimport httpx\nimport jwt\nimport keyring\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\nfrom algokit.core._vendor.auth0.authentication.token_verifier import (\n    AsymmetricSignatureVerifier,\n    TokenVerifier,\n)\n\nlogger = logging.getLogger(__name__)\n\n# Constants\nALGORITHMS = [\"RS256\"]\nDISPENSER_KEYRING_NAMESPACE = \"algokit_dispenser\"\nDISPENSER_KEYRING_ID_TOKEN_KEY = \"algokit_dispenser_id_token\"\nDISPENSER_KEYRING_ACCESS_TOKEN_KEY = \"algokit_dispenser_access_token\"\nDISPENSER_KEYRING_REFRESH_TOKEN_KEY = \"algokit_dispenser_refresh_token\"\nDISPENSER_KEYRING_USER_ID_KEY = \"algokit_dispenser_user_id\"\nDISPENSER_REQUEST_TIMEOUT = 15\nDISPENSER_ACCESS_TOKEN_KEY = \"ALGOKIT_DISPENSER_ACCESS_TOKEN\"\nDISPENSER_LOGIN_TIMEOUT = 300  # 5 minutes\n\n\nclass DispenserApiAudiences(str, Enum):\n    USER = \"user\"\n    CI = \"ci\"\n\n\n@dataclass\nclass AccountKeyringData:\n    id_token: str\n    access_token: str\n    refresh_token: str\n    user_id: str\n\n\nclass ApiConfig:\n    BASE_URL = \"https://api.dispenser.algorandfoundation.tools\"\n\n\nclass AuthConfig:\n    DOMAIN = \"dispenser-prod.eu.auth0.com\"\n    BASE_URL = f\"https://{DOMAIN}\"\n    JWKS_URL = f\"{BASE_URL}/.well-known/jwks.json\"\n    OAUTH_TOKEN_URL = f\"{BASE_URL}/oauth/token\"\n    OAUTH_DEVICE_CODE_URL = f\"{BASE_URL}/oauth/device/code\"\n    OAUTH_REVOKE_URL = f\"{BASE_URL}/oauth/revoke\"\n    AUDIENCES: ClassVar[dict[str, str]] = {\n        DispenserApiAudiences.USER: \"api-prod-dispenser-user\",\n        DispenserApiAudiences.CI: \"api-prod-dispenser-ci\",\n    }\n    CLIENT_IDS: ClassVar[dict[str, str]] = {\n        DispenserApiAudiences.USER: \"UKcJQcqFaZRQvik45QW5lsSRERUf8Ub6\",\n        DispenserApiAudiences.CI: \"BOZkxGUiiWkaAXZebCQ20MTIYuQSqqpI\",\n    }\n\n\nclass APIErrorCode:\n    DISPENSER_OUT_OF_FUNDS = \"dispenser_out_of_funds\"\n    FORBIDDEN = \"forbidden\"\n    FUND_LIMIT_EXCEEDED = \"fund_limit_exceeded\"\n    DISPENSER_ERROR = \"dispenser_error\"\n    MISSING_PARAMETERS = \"missing_params\"\n    AUTHORIZATION_ERROR = \"authorization_error\"\n    REPUTATION_REFRESH_FAILED = \"reputation_refresh_failed\"\n    TXN_EXPIRED = \"txn_expired\"\n    TXN_INVALID = \"txn_invalid\"\n    TXN_ALREADY_PROCESSED = \"txn_already_processed\"\n    INVALID_ASSET = \"invalid_asset\"\n    UNEXPECTED_ERROR = \"unexpected_error\"\n\n\ndef _get_dispenser_credential(key: str) -> str:\n    \"\"\"\n    Get dispenser account credentials from the keyring.\n    \"\"\"\n\n    response = keyring.get_password(DISPENSER_KEYRING_NAMESPACE, key)\n\n    if not response:\n        raise Exception(f\"No keyring data found for key: {key}\")\n\n    return response\n\n\ndef _get_dispenser_credentials() -> AccountKeyringData:\n    \"\"\"\n    Get dispenser account credentials from the keyring.\n    \"\"\"\n\n    id_token = _get_dispenser_credential(DISPENSER_KEYRING_ID_TOKEN_KEY)\n    access_token = _get_dispenser_credential(DISPENSER_KEYRING_ACCESS_TOKEN_KEY)\n    refresh_token = _get_dispenser_credential(DISPENSER_KEYRING_REFRESH_TOKEN_KEY)\n    user_id = _get_dispenser_credential(DISPENSER_KEYRING_USER_ID_KEY)\n\n    return AccountKeyringData(\n        id_token=id_token, access_token=access_token, refresh_token=refresh_token, user_id=user_id\n    )\n\n\ndef _get_auth_token() -> str:\n    \"\"\"\n    Retrieve the authorization token based on the environment.\n    CI environment variables take precedence over keyring.\n    \"\"\"\n    try:\n        ci_access_token = os.environ.get(DISPENSER_ACCESS_TOKEN_KEY)\n\n        if ci_access_token:\n            logger.debug(\"Using CI access token over keyring credentials\")\n\n        return ci_access_token if ci_access_token else _get_dispenser_credentials().access_token\n    except Exception as ex:\n        raise Exception(\"Token not found\") from ex\n\n\ndef _validate_jwt_id_token(id_token: str, audience: str) -> None:\n    \"\"\"\n    Validate the id token.\n    \"\"\"\n\n    sv = AsymmetricSignatureVerifier(AuthConfig.JWKS_URL)\n    tv = TokenVerifier(signature_verifier=sv, issuer=f\"{AuthConfig.BASE_URL}/\", audience=audience)\n    tv.verify(id_token)\n\n\ndef _get_access_token_rsa_pub_key(access_token: str) -> rsa.RSAPublicKey:\n    \"\"\"\n    Fetch the RSA public key based on provided access token.\n    \"\"\"\n    jwks = httpx.get(AuthConfig.JWKS_URL).json()\n    for key in jwks[\"keys\"]:\n        if key[\"kid\"] == jwt.get_unverified_header(access_token)[\"kid\"]:\n            return rsa.RSAPublicNumbers(\n                e=int.from_bytes(base64.urlsafe_b64decode(key[\"e\"] + \"==\"), byteorder=\"big\"),\n                n=int.from_bytes(base64.urlsafe_b64decode(key[\"n\"] + \"==\"), byteorder=\"big\"),\n            ).public_key()\n\n    raise Exception(\"No matching key found\")\n\n\ndef _refresh_user_access_token() -> None:\n    \"\"\"\n    Refresh the user access token.\n    \"\"\"\n\n    data = _get_dispenser_credentials()\n    headers = {\n        \"Content-Type\": \"application/x-www-form-urlencoded\",\n        \"Authorization\": f\"Bearer {data.access_token}\",\n    }\n    token_data = {\n        \"grant_type\": \"refresh_token\",\n        \"client_id\": AuthConfig.CLIENT_IDS[DispenserApiAudiences.USER],\n        \"refresh_token\": data.refresh_token,\n    }\n    response = httpx.post(\n        AuthConfig.OAUTH_TOKEN_URL, data=token_data, headers=headers, timeout=DISPENSER_REQUEST_TIMEOUT\n    )\n    response.raise_for_status()\n\n    set_dispenser_credentials(response.json())\n\n\ndef _request_device_code(api_audience: DispenserApiAudiences, custom_scopes: str | None = None) -> dict[str, Any]:\n    \"\"\"\n    Request a device code for user authentication.\n    \"\"\"\n\n    scope = f\"openid profile email {custom_scopes or ''}\".strip()\n    device_code_payload = {\n        \"client_id\": AuthConfig.CLIENT_IDS[api_audience],\n        \"scope\": scope,\n        \"audience\": AuthConfig.AUDIENCES[api_audience],\n    }\n    response = httpx.post(AuthConfig.OAUTH_DEVICE_CODE_URL, data=device_code_payload, timeout=DISPENSER_REQUEST_TIMEOUT)\n    response.raise_for_status()\n\n    data = response.json()\n    if not isinstance(data, dict):\n        logger.debug(\"Expected a dictionary response from OAuth token request, got: %s\", type(data).__name__)\n        raise Exception(\"Unexpected response type from OAuth device code request\")\n\n    return data\n\n\ndef _get_hours_until_reset(resets_at: str) -> float:\n    now_utc = datetime.now(timezone.utc)\n    reset_date = datetime.strptime(resets_at, \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=timezone.utc)\n    return round((reset_date - now_utc).total_seconds() / 3600, 1)\n\n\ndef request_token(api_audience: DispenserApiAudiences, device_code: str) -> dict[str, Any]:\n    \"\"\"\n    Request OAuth tokens.\n    \"\"\"\n\n    token_payload = {\n        \"grant_type\": \"urn:ietf:params:oauth:grant-type:device_code\",\n        \"device_code\": device_code,\n        \"client_id\": AuthConfig.CLIENT_IDS[api_audience],\n        \"audience\": AuthConfig.AUDIENCES[api_audience],\n    }\n    response = httpx.post(AuthConfig.OAUTH_TOKEN_URL, data=token_payload, timeout=DISPENSER_REQUEST_TIMEOUT)\n\n    data = response.json()\n    if not isinstance(data, dict):\n        logger.debug(f\"Expected a dictionary response from OAuth token request, got: {type(data).__name__}\")\n        raise Exception(\"Unexpected response type from OAuth token request\")\n\n    return data\n\n\ndef process_dispenser_request(*, url_suffix: str, data: dict | None = None, method: str = \"POST\") -> httpx.Response:\n    \"\"\"\n    Generalized method to process http requests to dispenser API\n    \"\"\"\n\n    headers = {\"Authorization\": f\"Bearer {_get_auth_token()}\"}\n\n    # Set request arguments\n    request_args = {\n        \"url\": f\"{ApiConfig.BASE_URL}/{url_suffix}\",\n        \"headers\": headers,\n        \"timeout\": DISPENSER_REQUEST_TIMEOUT,\n    }\n\n    if method.upper() != \"GET\" and data is not None:\n        request_args[\"json\"] = data\n\n    try:\n        response: httpx.Response = getattr(httpx, method.lower())(**request_args)\n        response.raise_for_status()\n        return response\n\n    except httpx.HTTPStatusError as err:\n        error_message = f\"Error processing dispenser API request: {err.response.status_code}\"\n        error_response = None\n        with contextlib.suppress(Exception):\n            error_response = err.response.json()\n\n        if error_response and error_response.get(\"code\") == APIErrorCode.FUND_LIMIT_EXCEEDED:\n            hours_until_reset = _get_hours_until_reset(error_response.get(\"resetsAt\"))\n            error_message = (\n                \"Limit exceeded. \"\n                f\"Try again in ~{hours_until_reset} hours if your request doesn't exceed the daily limit.\"\n            )\n\n        elif err.response.status_code == httpx.codes.BAD_REQUEST:\n            error_message = err.response.json()[\"message\"]\n\n        raise Exception(error_message) from err\n\n    except Exception as err:\n        error_message = \"Error processing dispenser API request\"\n        logger.debug(f\"{error_message}: {err}\", exc_info=True)\n        raise err\n\n\ndef set_dispenser_credentials(token_data: dict[str, str]) -> None:\n    \"\"\"\n    Set the keyring passwords.\n    \"\"\"\n\n    # Verify signature is set to false since we already validate id_tokens in _validate_jwt_id_token\n    decoded_id_token = jwt.decode(token_data[\"id_token\"], algorithms=ALGORITHMS, options={\"verify_signature\": False})\n\n    keyring.set_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_ID_TOKEN_KEY, token_data[\"id_token\"])\n    keyring.set_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_ACCESS_TOKEN_KEY, token_data[\"access_token\"])\n    keyring.set_password(\n        DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_REFRESH_TOKEN_KEY, token_data.get(\"refresh_token\", \"\")\n    )\n    keyring.set_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_USER_ID_KEY, decoded_id_token.get(\"sub\"))\n\n\ndef clear_dispenser_credentials() -> None:\n    \"\"\"\n    Clear the keyring passwords.\n    \"\"\"\n\n    keyring.delete_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_ID_TOKEN_KEY)\n    keyring.delete_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_ACCESS_TOKEN_KEY)\n    keyring.delete_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_REFRESH_TOKEN_KEY)\n    keyring.delete_password(DISPENSER_KEYRING_NAMESPACE, DISPENSER_KEYRING_USER_ID_KEY)\n\n\ndef is_authenticated() -> bool:\n    \"\"\"\n    Check if the user is authenticated by checking if the token is still valid.\n    If the token is expired, attempt to refresh it.\n    \"\"\"\n\n    try:\n        access_token = _get_auth_token()\n        rsa_pub_key = _get_access_token_rsa_pub_key(access_token)\n\n        jwt.decode(\n            access_token,\n            rsa_pub_key,\n            options={\"verify_signature\": True},\n            algorithms=ALGORITHMS,\n            audience=[\n                AuthConfig.AUDIENCES[DispenserApiAudiences.USER],\n                AuthConfig.AUDIENCES[DispenserApiAudiences.CI],\n            ],\n        )\n\n        return True\n    except jwt.ExpiredSignatureError:\n        logger.debug(\"Access token is expired. Attempting to refresh the token...\")\n\n        try:\n            _refresh_user_access_token()\n            return True\n        except Exception:\n            logger.warning(\n                \"Failed to refresh the access token. Please authenticate first before proceeding with this command.\",\n                exc_info=True,\n            )\n\n        return False\n    except Exception as ex:\n        logger.debug(f\"Access token validation error: {ex}\", exc_info=True)\n        return False\n\n\ndef revoke_refresh_token() -> None:\n    \"\"\"\n    Revoke the refresh token.\n    \"\"\"\n\n    data = _get_dispenser_credentials()\n\n    if not data.refresh_token:\n        logger.debug(\"No refresh token found, nothing to revoke.\")\n        return\n\n    payload = {\"token\": data.refresh_token, \"client_id\": AuthConfig.CLIENT_IDS[DispenserApiAudiences.USER]}\n    headers = {\"content-type\": \"application/json\"}\n\n    try:\n        response = httpx.post(AuthConfig.OAUTH_REVOKE_URL, json=payload, headers=headers)\n        response.raise_for_status()\n        logger.debug(\"Token revoked successfully\")\n    except httpx.HTTPStatusError as ex:\n        raise Exception(f\"Failed to revoke token: {ex}\") from ex\n    except Exception as ex:\n        raise Exception(f\"An unexpected error occurred: {ex}\") from ex\n\n\ndef get_oauth_tokens(api_audience: DispenserApiAudiences, custom_scopes: str | None = None) -> dict[str, Any] | None:\n    \"\"\"\n    Authenticate and get OAuth tokens.\n    \"\"\"\n\n    device_code_data = _request_device_code(api_audience, custom_scopes)\n\n    if not device_code_data:\n        return None\n\n    logger.info(f\"Navigate to: {device_code_data['verification_uri_complete']}\")\n    logger.info(f\"Confirm code: {device_code_data['user_code']}\")\n\n    start_time = time.time()\n    while True:\n        token_data = request_token(api_audience, device_code_data[\"device_code\"])\n\n        if \"id_token\" in token_data:\n            _validate_jwt_id_token(token_data[\"id_token\"], audience=AuthConfig.CLIENT_IDS[api_audience])\n            return token_data\n\n        error = token_data.get(\"error\", \"\")\n        if error not in (\"authorization_pending\", \"slow_down\"):\n            raise Exception(token_data.get(\"error_description\", \"\"))\n\n        # Check if 5 minutes have passed\n        if time.time() - start_time > DISPENSER_LOGIN_TIMEOUT:\n            logger.warning(\"Authentication cancelled. Timeout reached after 5 minutes of inactivity.\")\n            break\n\n        time.sleep(device_code_data.get(\"interval\", 5))\n\n    return None\n"
  },
  {
    "path": "src/algokit/core/doctor.py",
    "content": "import dataclasses\nimport logging\nimport re\nimport traceback\nfrom shutil import which\n\nfrom algokit.core import proc\nfrom algokit.core.utils import extract_version_triple, is_minimum_version\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass\nclass DoctorResult:\n    ok: bool\n    output: str\n    extra_help: list[str] | None = None\n\n\ndef check_dependency(\n    cmd: list[str],\n    *,\n    missing_help: list[str] | None = None,\n    include_location: bool = False,\n    minimum_version: str | None = None,\n    minimum_version_help: list[str] | None = None,\n) -> DoctorResult:\n    \"\"\"Check a dependency by running a command.\n\n    :param cmd: command to run\n    :param missing_help: Optional additional text to display if command is not found\n    :param include_location: Include the path to `command` in the output?`\n    :param minimum_version: Optional value to check minimum version against.\n    :param minimum_version_help: Custom help output if minimum version not met.\n    \"\"\"\n    result = _run_command(cmd, missing_help=missing_help)\n    if result.ok:\n        result = _process_version(\n            run_output=result.output,\n            minimum_version=minimum_version,\n            minimum_version_help=minimum_version_help,\n        )\n        if include_location:\n            try:\n                location = which(cmd[0])\n            except Exception as ex:\n                logger.debug(f\"Failed to locate {cmd[0]}: {ex}\", exc_info=True)\n                result.output += \"f (location: unknown)\"\n            else:\n                result.output += f\" (location: {location})\"\n    return result\n\n\ndef _run_command(\n    cmd: list[str],\n    *,\n    missing_help: list[str] | None = None,\n) -> DoctorResult:\n    try:\n        proc_result = proc.run(cmd)\n    except FileNotFoundError:\n        logger.debug(\"Command not found\", exc_info=True)\n        return DoctorResult(ok=False, output=\"Command not found!\", extra_help=missing_help)\n    except PermissionError:\n        logger.debug(\"Permission denied running command\", exc_info=True)\n        return DoctorResult(ok=False, output=\"Permission denied attempting to run command\")\n    except Exception as ex:\n        logger.debug(f\"Unexpected exception running command: {ex}\", exc_info=True)\n        return DoctorResult(\n            ok=False,\n            output=\"Unexpected error running command\",\n            extra_help=_format_exception_only(ex),\n        )\n    else:\n        if proc_result.exit_code != 0:\n            return DoctorResult(\n                ok=False,\n                output=f\"Command exited with code: {proc_result.exit_code}\",\n                extra_help=proc_result.output.splitlines(),\n            )\n        return DoctorResult(ok=True, output=proc_result.output)\n\n\ndef _process_version(\n    *,\n    run_output: str,\n    minimum_version: str | None,\n    minimum_version_help: list[str] | None,\n) -> DoctorResult:\n    try:\n        version_output = _get_version_or_first_non_blank_line(run_output)\n    except Exception as ex:\n        logger.debug(f\"Unexpected error checking dependency: {ex}\", exc_info=True)\n        return DoctorResult(\n            ok=False,\n            output=\"Unexpected error checking dependency\",\n            extra_help=_format_exception_only(ex),\n        )\n    if minimum_version is not None:\n        try:\n            version_triple = extract_version_triple(version_output)\n            version_ok = is_minimum_version(version_triple, minimum_version)\n        except Exception as ex:\n            logger.debug(f\"Unexpected error parsing version: {ex}\", exc_info=True)\n            return DoctorResult(\n                ok=False,\n                output=version_output,\n                extra_help=[\n                    f'Failed to parse version from: \"{version_output}\"',\n                    f\"Error: {ex}\",\n                    f\"Unable to check against minimum version of {minimum_version}\",\n                ],\n            )\n        if not version_ok:\n            return DoctorResult(\n                ok=False,\n                output=version_output,\n                extra_help=(minimum_version_help or [f\"Minimum version required: {minimum_version}\"]),\n            )\n    return DoctorResult(ok=True, output=version_output)\n\n\ndef _get_version_or_first_non_blank_line(output: str) -> str:\n    match = re.search(r\"\\d+\\.\\d+\\.\\d+[^\\s'\\\"(),]*\", output)\n    if match:\n        return match.group()\n    lines = output.splitlines()\n    non_blank_lines = filter(None, (ln.strip() for ln in lines))\n    # return first non-blank line or empty string if all blank\n    return next(non_blank_lines, \"\")\n\n\ndef _format_exception_only(ex: Exception) -> list[str]:\n    return [ln.rstrip(\"\\n\") for ln in traceback.format_exception_only(type(ex), ex)]\n"
  },
  {
    "path": "src/algokit/core/generate.py",
    "content": "import dataclasses\nimport logging\nfrom pathlib import Path\n\nimport click\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config\nfrom algokit.core.utils import get_python_paths\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass(kw_only=True)\nclass Generator:\n    name: str\n    path: str\n    description: str | None = None\n\n\ndef _format_generator_name(name: str) -> str:\n    \"\"\"\n    Format the generator name to be used as a command name.\n    :param name: Generator name.\n    :return: Formatted generator name.\n    \"\"\"\n\n    return name.strip().replace(\" \", \"-\").replace(\"_\", \"-\")\n\n\ndef run_generator(answers: dict, path: Path) -> None:\n    \"\"\"\n    Run the custom generator with the given answers and path.\n    :param answers: Answers to pass to the generator.\n    :param path: Path to the generator.\n    \"\"\"\n\n    # Below ensures that if the generator copier.yaml relies on python_path answer\n    # it will be set to the system python path if available by algokit cli\n    answers_dict = answers.copy()\n    system_python_path = next(get_python_paths(), None)\n    if system_python_path is not None:\n        answers_dict.setdefault(\"python_path\", system_python_path)\n    else:\n        answers_dict.setdefault(\"python_path\", \"no_system_python_available\")\n\n    # copier is lazy imported for two reasons\n    # 1. it is slow to import on first execution after installing\n    # 2. the import fails if git is not installed (which we check above)\n    from copier._main import Worker\n\n    cwd = Path.cwd()\n    expected_answers_file = cwd / \".algokit\" / \".copier-answers.yml\"\n    relative_answers_file = expected_answers_file.relative_to(cwd) if expected_answers_file.exists() else None\n\n    with Worker(\n        answers_file=relative_answers_file,\n        src_path=str(path),\n        dst_path=cwd,\n        data=answers_dict,\n        quiet=True,\n        unsafe=True,\n    ) as copier_worker:\n        logger.debug(f\"Running generator in {copier_worker.src_path}\")\n        copier_worker.run_copy()\n\n    logger.info(f\"Generator {path} executed successfully\")\n\n\ndef load_generators(project_dir: Path) -> list[Generator]:\n    \"\"\"\n    Load the generators for the given project from .algokit.toml file.\n    :param project_dir: Project directory path.\n    :return: Generators.\n    \"\"\"\n    # Load and parse the TOML configuration file\n    config = get_algokit_config(project_dir=project_dir)\n    generators: list[Generator] = []\n\n    if not config:\n        return generators\n\n    generators_table = config.get(\"generate\", {})\n\n    if not isinstance(generators_table, dict):\n        raise click.ClickException(f\"Bad data for [generators] key in '{ALGOKIT_CONFIG}'\")\n\n    for name, generators_config in generators_table.items():\n        match generators_config:\n            case {\"path\": str(path), **remaining}:\n                if not Path(path).exists():\n                    logger.warning(f\"Path '{path}' for generator '{name}' does not exist, skipping\")\n                    continue\n\n                description = remaining.get(\"description\", None)\n                generator = Generator(\n                    name=_format_generator_name(name),\n                    description=str(description) if description else None,\n                    path=path,\n                )\n                generators.append(generator)\n\n            case {\"path\": _}:\n                logger.warning(f\"Missing path for generator '{name}' in '{ALGOKIT_CONFIG}', skipping\")\n\n            case _:\n                logger.debug(f'Invalid generator configuration key \"{name}\" of value \"{generators_config}\", skipping')\n\n    return generators\n"
  },
  {
    "path": "src/algokit/core/goal.py",
    "content": "import logging\nimport re\nimport shutil\nfrom pathlib import Path, PurePath\n\nfrom algokit.core.conf import get_app_config_dir\nfrom algokit.core.config_commands.container_engine import get_container_engine\nfrom algokit.core.sandbox import ContainerEngine\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_volume_mount_path_docker() -> Path:\n    return Path(\"/root/goal_mount/\")\n\n\ndef get_volume_mount_path_local(directory_name: str) -> Path:\n    path = get_app_config_dir().joinpath(directory_name, \"goal_mount\")\n    if get_container_engine() == ContainerEngine.PODMAN:\n        # Pre create the directory to avoid permission issues\n        path.mkdir(parents=True, exist_ok=True)\n    return path\n\n\nfilename_pattern = re.compile(r\"^[\\w\\-\\.]+\\.\\w+$\")\n\n\ndef is_path_or_filename(argument: str) -> bool:\n    path = PurePath(argument)\n    return len(path.parts) > 1 or (len(path.parts) == 1 and filename_pattern.match(path.parts[0]) is not None)\n\n\ndef delete_files_from_volume_mount(filename: str, volume_mount_path_docker: Path) -> None:\n    try:\n        volume_mount_path_docker.joinpath(filename).unlink()\n    except Exception as e:\n        logger.error(e)\n\n\ndef list_files_in_volume(volume_path: Path) -> list[str]:\n    file_paths = []\n    if volume_path.exists() and volume_path.is_dir():\n        for file in volume_path.rglob(\"*\"):\n            if file.is_file():\n                file_paths.append(str(file))\n    else:\n        logger.error(f\"{volume_path} does not exist or is not a directory.\")\n    return file_paths\n\n\ndef preprocess_command_args(\n    command: list[str], volume_mount_path_local: Path, docker_mount_path_local: Path\n) -> tuple[list[Path], list[Path], list[str]]:\n    input_files = []\n    output_files = []\n    try:\n        for i, arg in enumerate(command):\n            if is_path_or_filename(arg):\n                absolute_arg_path = Path(arg).expanduser().absolute()\n                arg_changed = docker_mount_path_local.joinpath(absolute_arg_path.name)\n                command[i] = str(arg_changed)\n\n                file_exists = absolute_arg_path.exists()\n                is_output_arg = i > 0 and command[i - 1] in [\n                    \"-o\",\n                    \"--outdir\",\n                    \"--outfile\",\n                    \"--out\",\n                    \"--result-out\",\n                    \"--lsig-out\",\n                ]\n                if file_exists and not is_output_arg:\n                    input_files.append(absolute_arg_path)\n                    shutil.copy(absolute_arg_path, volume_mount_path_local)\n                elif is_output_arg:  # it is an output file that doesn't exist yet\n                    output_files.append(absolute_arg_path)\n                else:\n                    raise FileNotFoundError(f\"{arg} does not exist.\")\n    except Exception as e:\n        logger.error(e)\n        raise e\n    return input_files, output_files, command\n\n\ndef post_process(input_files: list[Path], output_files: list[Path], volume_mount_path_local: Path) -> None:\n    for input_file in input_files:\n        delete_files_from_volume_mount(input_file.name, volume_mount_path_local)\n\n    files_in_volume_mount = {Path(file) for file in list_files_in_volume(volume_mount_path_local)}\n    for output_file in output_files:\n        stem = output_file.stem\n        ext = output_file.suffix\n\n        # Copy outputs split into multiple files. For example `goal clerk split -i ./input.gtxn -o ./output.txn`\n        # will produce a file (output-0.txn etc) for each transaction in the group being split.\n        r = re.compile(rf\"^(?:{stem})(?:-[0-9]+)?(?:\\{ext})$\") if ext else re.compile(rf\"^(?:{stem})(?:-[0-9]+)?$\")\n\n        matched_files_in_volume_mount = filter(lambda f: (r.match(f.name)), files_in_volume_mount)\n\n        for matched_file_in_volume_mount in matched_files_in_volume_mount:\n            shutil.copy(\n                volume_mount_path_local.joinpath(matched_file_in_volume_mount.name),\n                output_file.parent.joinpath(matched_file_in_volume_mount.name),\n            )\n            delete_files_from_volume_mount(matched_file_in_volume_mount.name, volume_mount_path_local)\n"
  },
  {
    "path": "src/algokit/core/init.py",
    "content": "import json\nimport re\nimport shutil\nimport subprocess\nfrom logging import getLogger\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom typing import Any, NoReturn, cast\n\nimport click\nimport yaml\nfrom copier._main import Worker\nfrom copier._types import MISSING\nfrom copier._user_data import AnswersMap, Question\n\nfrom algokit.core.project import get_project_dir_names_from_workspace\n\nlogger = getLogger(__name__)\n\n\nDEFAULT_MIN_VERSION = \"1.8.0\"\nDEFAULT_PROJECTS_ROOT_PATH = \"projects\"\nALGOKIT_TEMPLATES_REPO_URL = \"https://github.com/algorandfoundation/algokit-templates\"\nALGOKIT_USER_DIR = \".algokit\"\nALGOKIT_TEMPLATES_DIR = \"algokit-templates\"\n\n\ndef populate_default_answers(worker: Worker) -> None:\n    \"\"\"Helper function to pre-populate Worker.data with default answers, based on Worker.answers implementation (see\n    https://github.com/copier-org/copier/blob/v7.1.0/copier/main.py#L363).\n\n    Used as a work-around for the behaviour of Worker(default=True, ...) which in >=7.1 raises an error instead of\n    prompting if no default is provided\"\"\"\n    answers = AnswersMap(\n        user_defaults=worker.user_defaults,\n        init=worker.data,\n        last=worker.subproject.last_answers,\n        metadata=worker.template.metadata,\n    )\n\n    for var_name, details in worker.template.questions_data.items():\n        if var_name in worker.data:\n            continue\n        question = Question(\n            answers=answers,\n            jinja_env=worker.jinja_env,\n            var_name=var_name,\n            # https://github.com/copier-org/copier/releases/tag/v9.7.0 introduces changes to Question model,\n            # which now requires passing context param.\n            context={**worker._render_context(), **answers.combined},  # noqa: SLF001\n            **details,\n        )\n        default_value = question.get_default()\n        if default_value is not MISSING:\n            worker.data[var_name] = default_value\n\n\ndef get_git_user_info(param: str) -> str | None:\n    \"\"\"Get git user info from the system. Returns None if git is not available.\"\"\"\n\n    if not shutil.which(\"git\"):\n        return None\n\n    try:\n        return check_output(f\"git config user.{param}\", shell=True).decode(\"utf-8\").strip()\n    except Exception:\n        logger.warning(\n            f\"Failed to get user info from git, please input your '{param}' manually or use default placeholder.\"\n        )\n        logger.debug(\"Failed to get user info from git\", exc_info=True)\n        return None\n\n\ndef is_valid_project_dir_name(value: str) -> bool:\n    \"\"\"Check if the project directory name for algokit project is valid.\"\"\"\n\n    algokit_project_names = get_project_dir_names_from_workspace()\n    if value in algokit_project_names:\n        return False\n    return bool(re.match(r\"^[\\w\\-.]+$\", value))\n\n\ndef resolve_vscode_workspace_file(project_root: Path | None) -> Path | None:\n    \"\"\"Resolve the path to the VSCode workspace file for the given project.\n    Works by looking for algokit workspace and checking if there is a matching\n    vscode config at the same level.\"\"\"\n    if not project_root:\n        return None\n    return next(project_root.glob(\"*.code-workspace\"), None)\n\n\ndef append_project_to_vscode_workspace(project_path: Path, workspace_path: Path) -> None:\n    \"\"\"Append project to the code workspace, ensuring compatibility across Windows and Unix systems.\"\"\"\n    if not workspace_path.exists():\n        raise FileNotFoundError(f\"Workspace path {workspace_path} does not exist.\")\n\n    try:\n        workspace = _load_vscode_workspace(workspace_path)\n\n        # Compute the project path relative to the workspace root\n        processed_project_path = project_path.relative_to(workspace_path.parent)\n        project_abs_path = (workspace_path.parent / processed_project_path).resolve(strict=False)\n\n        # Gather existing paths as absolute paths\n        existing_abs_paths = []\n        for folder in workspace.get(\"folders\", []):\n            folder_path = Path(folder.get(\"path\", \"\").replace(\"\\\\\", \"/\"))\n            existing_abs_path = (workspace_path.parent / folder_path).resolve(strict=False)\n            existing_abs_paths.append(existing_abs_path)\n\n        # Check if the project path is already in the workspace\n        if project_abs_path not in existing_abs_paths:\n            workspace.setdefault(\"folders\", []).append({\"path\": str(processed_project_path).replace(\"\\\\\", \"/\")})\n            _save_vscode_workspace(workspace_path, workspace)\n            logger.debug(f\"Appended project {project_path} to workspace {workspace_path}.\")\n        else:\n            logger.debug(f\"Project {project_path} is already in workspace {workspace_path}, not appending.\")\n\n    except json.JSONDecodeError as json_err:\n        logger.warning(f\"Invalid JSON format in the workspace file {workspace_path}. {json_err}\")\n    except Exception as e:\n        logger.warning(f\"Failed to append project {project_path} to workspace {workspace_path}. {e}\")\n\n\ndef _load_vscode_workspace(workspace_path: Path) -> dict[str, Any]:\n    \"\"\"Load the workspace file as a JSON object.\"\"\"\n    with workspace_path.open(mode=\"r\", encoding=\"utf-8\") as f:\n        data = json.load(f)\n        assert isinstance(data, dict)\n        return cast(\"dict[str, Any]\", data)\n\n\ndef _save_vscode_workspace(workspace_path: Path, workspace: dict) -> None:\n    \"\"\"Save the modified workspace back to the file.\"\"\"\n    with workspace_path.open(mode=\"w\", encoding=\"utf-8\") as f:\n        json.dump(workspace, f, indent=2)\n\n\ndef _fail_and_bail() -> NoReturn:\n    \"\"\"Exit the program with an error code\"\"\"\n    logger.info(\"🛑 Bailing out... 👋\")\n    raise click.exceptions.Exit(code=1)\n\n\ndef _manage_templates_repository() -> None:\n    \"\"\"Manage the templates repository by cloning or updating it.\"\"\"\n    algokit_dir = Path.home() / ALGOKIT_USER_DIR\n    templates_dir = algokit_dir / ALGOKIT_TEMPLATES_DIR\n\n    try:\n        if not templates_dir.exists():\n            # Clone the repository if it doesn't exist\n            click.echo(\"Cloning templates repository...\")\n            algokit_dir.mkdir(exist_ok=True)\n            subprocess.run(\n                [\"git\", \"clone\", ALGOKIT_TEMPLATES_REPO_URL, str(templates_dir)],\n                check=True,\n                capture_output=True,\n                text=True,\n            )\n        else:\n            # Pull latest changes if the repository exists\n            subprocess.run(\n                [\"git\", \"-C\", str(templates_dir), \"pull\"],\n                check=True,\n                capture_output=True,\n                text=True,\n            )\n    except subprocess.CalledProcessError as e:\n        logger.error(f\"Failed to fetch templates: {e.stderr}\")\n        _fail_and_bail()\n\n\ndef _open_ide(project_path: Path, readme_path: Path | None = None, *, open_ide: bool = True) -> None:\n    \"\"\"Open an IDE for the given project path, preferring VSCode over PyCharm if both are available.\"\"\"\n    vscode_workspace_file = resolve_vscode_workspace_file(project_path)\n    code_cmd = shutil.which(\"code\")\n    pycharm_cmd = shutil.which(\"charm\")\n\n    if open_ide and ((project_path / \".vscode\").is_dir() or vscode_workspace_file) and code_cmd:\n        target_path = str(vscode_workspace_file if vscode_workspace_file else project_path)\n        logger.info(\n            \"VSCode configuration detected in project directory, and 'code' command is available on path, \"\n            \"attempting to launch VSCode\"\n        )\n        code_cmd_and_args = [code_cmd, target_path]\n        if readme_path:\n            code_cmd_and_args.append(str(readme_path))\n        subprocess.run(code_cmd_and_args, check=False)\n        return\n\n    if open_ide and pycharm_cmd:\n        logger.info(\"PyCharm command is available on path, attempting to launch PyCharm\")\n        pycharm_cmd_and_args = [pycharm_cmd, str(project_path)]\n        if readme_path:\n            pycharm_cmd_and_args.append(str(readme_path))\n        subprocess.run(pycharm_cmd_and_args, check=False)\n        return\n\n    if readme_path:\n        logger.info(f\"Your template includes a {readme_path.name} file, you might want to review that as a next step.\")\n\n\ndef _load_algokit_examples(examples_config_path: str) -> list[dict]:\n    \"\"\"\n    Load and parse the examples from a YAML configuration file.\n\n    Args:\n        examples_config_path: Path to the YAML configuration file containing example templates\n\n    Returns:\n        A list of dictionaries with 'id' and 'name' of each example\n    \"\"\"\n    examples = []\n\n    config_file = Path(examples_config_path)\n    if config_file.is_file():\n        with config_file.open() as file:\n            file_content = yaml.safe_load(file)\n            for example in file_content.get(\"examples\", []):\n                examples.append(\n                    {\"id\": example.get(\"id\"), \"type\": example.get(\"type\"), \"name\": example.get(\"project_name\")}\n                )\n\n    return examples\n"
  },
  {
    "path": "src/algokit/core/log_handlers.py",
    "content": "import logging\nimport os\nimport sys\nfrom logging.handlers import RotatingFileHandler\nfrom types import TracebackType\nfrom typing import Any, ClassVar\n\nimport click\nfrom click.globals import resolve_color_default\n\nfrom .conf import get_app_state_dir\n\n__all__ = [\n    \"EXTRA_EXCLUDE_FROM_CONSOLE\",\n    \"EXTRA_EXCLUDE_FROM_LOGFILE\",\n    \"color_option\",\n    \"initialise_logging\",\n    \"uncaught_exception_logging_handler\",\n    \"verbose_option\",\n]\n\n\nclass ClickHandler(logging.Handler):\n    \"\"\"Handle console output with click.echo(...)\n\n    Slightly special in that this class acts as both a sink and an additional formatter,\n    but they're kind of intertwined for our use case of actually displaying things to the user.\n    \"\"\"\n\n    styles: ClassVar[dict[str, dict[str, Any]]] = {\n        \"critical\": {\"fg\": \"red\", \"bold\": True},\n        \"error\": {\"fg\": \"red\"},\n        \"warning\": {\"fg\": \"yellow\"},\n        \"debug\": {\"fg\": \"cyan\"},\n    }\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            msg = self.format(record)\n            level = record.levelname.lower()\n            if level in self.styles:\n                # if user hasn't disabled colors/styling, just use that\n                if resolve_color_default() is not False:\n                    level_style = self.styles[level]\n                    msg = click.style(msg, **level_style)\n                # otherwise, prefix the level name\n                else:\n                    msg = f\"{level.upper()}: {msg}\"\n            click.echo(msg)\n        except Exception:\n            self.handleError(record)\n\n\nclass NoExceptionFormatter(logging.Formatter):\n    \"\"\"Prevent automatically displaying exception/traceback info.\n    (without interfering with other formatters that might later want to add such information)\n    \"\"\"\n\n    def formatException(self, *_args: Any) -> str:  # noqa: N802\n        return \"\"\n\n    def formatStack(self, *_args: Any) -> str:  # noqa: N802\n        return \"\"\n\n\nCONSOLE_LOG_HANDLER_NAME = \"console_log_handler\"\n\nEXCLUDE_FROM_KEY = \"exclude_from\"\nEXCLUDE_FROM_CONSOLE_VALUE = \"console\"\nEXCLUDE_FROM_LOGFILE_VALUE = \"logfile\"\n\nEXTRA_EXCLUDE_FROM_CONSOLE = {EXCLUDE_FROM_KEY: EXCLUDE_FROM_CONSOLE_VALUE}\nEXTRA_EXCLUDE_FROM_LOGFILE = {EXCLUDE_FROM_KEY: EXCLUDE_FROM_LOGFILE_VALUE}\n\n\nclass ManualExclusionFilter(logging.Filter):\n    def __init__(self, exclude_value: str):\n        super().__init__()\n        self.exclude_value = exclude_value\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        return getattr(record, EXCLUDE_FROM_KEY, None) != self.exclude_value\n\n\ndef initialise_logging() -> None:\n    console_log_handler = ClickHandler()\n    # default to INFO, this case be upgraded later based on -v flag\n    console_log_handler.setLevel(logging.INFO)\n    console_log_handler.name = CONSOLE_LOG_HANDLER_NAME\n    console_log_handler.formatter = NoExceptionFormatter()\n    console_log_handler.addFilter(ManualExclusionFilter(exclude_value=EXCLUDE_FROM_CONSOLE_VALUE))\n\n    file_log_handler = RotatingFileHandler(\n        filename=get_app_state_dir() / \"cli.log\",\n        maxBytes=1 * 1024 * 1024,\n        backupCount=5,\n        encoding=\"utf-8\",\n    )\n    file_log_handler.setLevel(logging.DEBUG)\n    file_log_handler.formatter = logging.Formatter(\n        \"%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%dT%H:%M:%S\"\n    )\n    file_log_handler.addFilter(ManualExclusionFilter(exclude_value=EXCLUDE_FROM_LOGFILE_VALUE))\n\n    logging.basicConfig(level=logging.DEBUG, handlers=[console_log_handler, file_log_handler], force=True)\n\n\ndef uncaught_exception_logging_handler(\n    exc_type: type[BaseException], exc_value: BaseException, exc_traceback: TracebackType | None\n) -> None:\n    \"\"\"Function to be used as sys.excepthook, which logs uncaught exceptions.\"\"\"\n    if issubclass(exc_type, KeyboardInterrupt):\n        # don't log ctrl-c or equivalents\n        sys.__excepthook__(exc_type, exc_value, exc_traceback)\n    else:\n        logging.critical(f\"Unhandled {exc_type.__name__}: {exc_value}\", exc_info=(exc_type, exc_value, exc_traceback))\n\n\ndef _set_verbose(_ctx: click.Context, _param: click.Option, value: bool) -> None:  # noqa: FBT001\n    if value:\n        for handler in logging.getLogger().handlers:\n            if handler.name == CONSOLE_LOG_HANDLER_NAME:\n                handler.setLevel(logging.DEBUG)\n                return\n        raise RuntimeError(f\"Couldn't locate required logger named {CONSOLE_LOG_HANDLER_NAME}\")\n\n\ndef _set_force_styles_to(ctx: click.Context, _param: click.Option, value: bool | None) -> None:  # noqa: FBT001\n    if value is not None:\n        ctx.color = value\n\n\nverbose_option = click.option(\n    \"--verbose\",\n    \"-v\",\n    is_flag=True,\n    callback=_set_verbose,\n    expose_value=False,\n    help=\"Enable logging of DEBUG messages to the console.\",\n)\n\ncolor_option = click.option(\n    \"--color/--no-color\",\n    # support NO_COLOR (ref: https://no-color.org) env var as default value,\n    default=lambda: False if os.getenv(\"NO_COLOR\") else None,\n    callback=_set_force_styles_to,\n    expose_value=False,\n    help=\"Force enable or disable of console output styling.\",\n)\n"
  },
  {
    "path": "src/algokit/core/proc.py",
    "content": "import dataclasses\nimport logging\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom subprocess import Popen\nfrom subprocess import run as subprocess_run\n\nimport click\n\nfrom algokit.core.log_handlers import EXTRA_EXCLUDE_FROM_CONSOLE\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass\nclass RunResult:\n    command: str\n    exit_code: int\n    output: str\n\n\ndef run(  # noqa: PLR0913\n    command: list[str],\n    *,\n    cwd: Path | None = None,\n    env: dict[str, str] | None = None,\n    bad_return_code_error_message: str | None = None,\n    prefix_process: bool = True,\n    stdout_log_level: int = logging.DEBUG,\n    pass_stdin: bool = False,\n) -> RunResult:\n    \"\"\"Wraps subprocess.Popen() similarly to subprocess.run() but adds: logging and streaming (unicode) I/O capture\n\n    Note that not all options or usage scenarios here are covered, just some common use cases\n    \"\"\"\n    command_str = \" \".join(command)\n    logger.debug(f\"Running '{command_str}' in '{cwd or Path.cwd()}'\")\n\n    lines = []\n    exit_code = None\n    with Popen(\n        command,\n        stdout=subprocess.PIPE,  # capture stdout\n        stderr=subprocess.STDOUT,  # redirect stderr to stdout, so they're interleaved in the correct ordering\n        stdin=sys.stdin if pass_stdin else None,\n        text=True,  # make all I/O in unicode/text\n        cwd=cwd,\n        env=env,\n        bufsize=1,  # line buffering, works because text=True\n        encoding=\"utf-8\",\n    ) as proc:\n        assert proc.stdout  # type narrowing\n        while exit_code is None:\n            line = proc.stdout.readline()\n            if not line:\n                # only poll if no output, so that we consume entire output stream\n                exit_code = proc.poll()\n            else:\n                lines.append(line)\n                logger.log(\n                    level=stdout_log_level,\n                    msg=(click.style(f\"{command[0]}:\", bold=True) if prefix_process else \"\") + f\" {line.strip()}\",\n                )\n    if exit_code == 0:\n        logger.debug(f\"'{command_str}' completed successfully\", extra=EXTRA_EXCLUDE_FROM_CONSOLE)\n    else:\n        logger.debug(f\"'{command_str}' failed, exited with code = {exit_code}\", extra=EXTRA_EXCLUDE_FROM_CONSOLE)\n        if bad_return_code_error_message:\n            raise click.ClickException(bad_return_code_error_message)\n    output = \"\".join(lines)\n    return RunResult(command=command_str, exit_code=exit_code, output=output)\n\n\ndef run_interactive(\n    command: list[str],\n    *,\n    cwd: Path | None = None,\n    env: dict[str, str] | None = None,\n    bad_return_code_error_message: str | None = None,\n    timeout: int | None = None,\n) -> RunResult:\n    \"\"\"Wraps subprocess.run() as an user interactive session and\n        also adds logging of the command being executed, but not the output\n\n    Note that not all options or usage scenarios here are covered, just some common use cases\n    \"\"\"\n    command_str = \" \".join(command)\n    logger.debug(f\"Running '{command_str}' in '{cwd or Path.cwd()}'\")\n\n    result = subprocess_run(command, cwd=cwd, env=env, check=False, timeout=timeout)\n\n    if result.returncode == 0:\n        logger.debug(f\"'{command_str}' completed successfully\", extra=EXTRA_EXCLUDE_FROM_CONSOLE)\n    else:\n        logger.debug(\n            f\"'{command_str}' failed, exited with code = {result.returncode}\", extra=EXTRA_EXCLUDE_FROM_CONSOLE\n        )\n        if bad_return_code_error_message:\n            raise click.ClickException(bad_return_code_error_message)\n    return RunResult(command=command_str, exit_code=result.returncode, output=\"\")\n"
  },
  {
    "path": "src/algokit/core/project/__init__.py",
    "content": "from enum import Enum\nfrom functools import cache\nfrom pathlib import Path\nfrom typing import Any\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config\nfrom algokit.core.utils import alphanumeric_sort_key\n\nWORKSPACE_LOOKUP_LEVELS = 2\n\n\nclass ProjectType(str, Enum):\n    \"\"\"\n    Enum class for specifying the type of algokit projects.\n\n    Attributes:\n        WORKSPACE (str): Represents a workspace project type.\n        BACKEND (str): Represents a backend project type, typically for server-side operations.\n        FRONTEND (str): Represents a frontend project type, typically for client-side operations.\n        CONTRACT (str): Represents a contract project type, typically for blockchain contracts.\n    \"\"\"\n\n    WORKSPACE = \"workspace\"\n    BACKEND = \"backend\"\n    FRONTEND = \"frontend\"\n    CONTRACT = \"contract\"\n\n\ndef _get_subprojects_paths(config: dict[str, Any], project_dir: Path) -> list[Path]:\n    \"\"\"Searches for project directories within the specified workspace. It filters out directories that\n    do not contain an algokit configuration file.\n\n    Args:\n        config (dict[str, Any]): The configuration of the project.\n        working directory is used.\n        project_dir (Path): The base directory to search for project root directories. If None, the current\n        working directory is used.\n\n    Returns:\n        list[Path]: A list containing paths to project root directories that contain an algokit configuration file.\n    \"\"\"\n\n    projects_root = config.get(\"project\", {}).get(\"projects_root_path\", None)\n    if projects_root is None:\n        return []\n\n    project_root_path = project_dir / projects_root\n\n    if not project_root_path.exists():\n        return []\n\n    return [\n        sub_project\n        for sub_project in project_root_path.iterdir()\n        if sub_project.is_dir() and (sub_project / ALGOKIT_CONFIG).exists()\n    ]\n\n\n@cache\ndef get_project_configs(\n    project_dir: Path | None = None,\n    lookup_level: int = WORKSPACE_LOOKUP_LEVELS,\n    project_type: str | None = None,\n    project_names: tuple[str, ...] | None = None,\n) -> list[dict[str, Any]]:\n    \"\"\"Recursively finds configurations for all algokit projects within the specified directory or the\n    current working directory.\n\n    This function reads the .algokit.toml configuration file from each project directory and returns a list of\n    dictionaries, each representing a project's configuration. Additionally appends 'cwd' at the root of each dict\n    object loa\n\n    Args:\n        project_dir (Path | None): The base directory to search for project configurations. If None, the current\n        working directory is used.\n        lookup_level (int): The number of levels to go up the directory to search for workspace projects\n        project_type (str | None): The type of project to filter by. If None, all project types are returned.\n        project_names (tuple[str, ...] | None): The names of the projects to filter by. If None, gets all projects.\n\n    Returns:\n        list[dict[str, Any] | None]: A list of dictionaries, each containing the configuration of an algokit project.\n        Returns None for projects where the configuration could not be read.\n    \"\"\"\n\n    if lookup_level < 0:\n        return []\n\n    project_dir = project_dir or Path.cwd()\n    project_config = get_algokit_config(project_dir=project_dir)\n\n    if not project_config:\n        return get_project_configs(\n            project_dir=project_dir.parent,\n            lookup_level=lookup_level - 1,\n            project_type=project_type,\n            project_names=project_names,\n        )\n\n    configs = []\n    for sub_project_dir in _get_subprojects_paths(project_config, project_dir):\n        config = get_algokit_config(project_dir=sub_project_dir) or {}\n        type_mismatch = project_type and config.get(\"project\", {}).get(\"type\") != project_type\n        name_mismatch = project_names and config.get(\"project\", {}).get(\"name\") not in project_names\n        if not type_mismatch and not name_mismatch:\n            config[\"cwd\"] = sub_project_dir\n            configs.append(config)\n\n    # Sort configs by the directory name alphanumerically\n    sorted_configs = sorted(configs, key=lambda x: alphanumeric_sort_key(x[\"cwd\"].name))\n\n    return (\n        sorted_configs\n        if sorted_configs\n        else get_project_configs(\n            project_dir=project_dir.parent,\n            lookup_level=lookup_level - 1,\n            project_type=project_type,\n            project_names=project_names,\n        )\n    )\n\n\n@cache\ndef get_project_dir_names_from_workspace(project_dir: Path | None = None) -> list[str]:\n    \"\"\"\n    Generates a list of project names from the .algokit.toml file within the specified directory or the current\n    working directory.\n\n    This function is useful for identifying all the projects within a given workspace by their names.\n\n    Args:\n        project_dir (Path | None): The base directory to search for project names. If None,\n        the current working directory is used.\n\n    Returns:\n        list[str]: A list of project names found within the specified directory.\n    \"\"\"\n\n    project_dir = project_dir or Path.cwd()\n    config = get_algokit_config(project_dir=project_dir)\n\n    if not config:\n        return []\n\n    return [p.name for p in _get_subprojects_paths(config, project_dir)]\n\n\ndef get_workspace_project_path(\n    project_dir: Path | None = None, lookup_level: int = WORKSPACE_LOOKUP_LEVELS\n) -> Path | None:\n    \"\"\"Recursively searches for the workspace project path within the specified directory.\n\n    Args:\n        project_dir (Path): The base directory to search for the workspace project path.\n        lookup_level (int): The number of levels to go up the directory to search for workspace projects.\n\n    Returns:\n        Path | None: The path to the workspace project directory or None if not found.\n    \"\"\"\n\n    if lookup_level < 0:\n        return None\n\n    project_dir = project_dir or Path.cwd()\n    project_config = get_algokit_config(project_dir=project_dir)\n\n    if not project_config or project_config.get(\"project\", {}).get(\"type\") != ProjectType.WORKSPACE:\n        return get_workspace_project_path(project_dir=project_dir.parent, lookup_level=lookup_level - 1)\n\n    return project_dir\n"
  },
  {
    "path": "src/algokit/core/project/bootstrap.py",
    "content": "import logging\nimport os\nimport re\nfrom pathlib import Path\n\nimport click\nimport questionary\nfrom packaging import version\n\nfrom algokit.core import proc, questionary_extensions\nfrom algokit.core._toml import loads as toml_loads\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config, get_current_package_version\nfrom algokit.core.config_commands.js_package_manager import (\n    JSPackageManager,\n    get_js_package_manager,\n    save_js_package_manager,\n)\nfrom algokit.core.config_commands.py_package_manager import (\n    PyPackageManager,\n    get_py_package_manager,\n    save_py_package_manager,\n)\nfrom algokit.core.utils import find_valid_pipx_command, is_windows\n\nENV_TEMPLATE_PATTERN = \".env*.template\"\nMAX_BOOTSTRAP_DEPTH = 2\nPKG_MANAGER_TRANSLATIONS = {\n    JSPackageManager.PNPM: [\n        (\"npm install\", \"pnpm install\"),\n        (\"npm run \", \"pnpm run \"),\n        (\"npm test\", \"pnpm test\"),\n        (\"npm start\", \"pnpm start\"),\n        (\"npm build\", \"pnpm build\"),\n    ],\n    JSPackageManager.NPM: [\n        (\"pnpm install\", \"npm install\"),\n        (\"pnpm run \", \"npm run \"),\n        (\"pnpm test\", \"npm test\"),\n        (\"pnpm start\", \"npm start\"),\n        (\"pnpm build\", \"npm build\"),\n    ],\n    PyPackageManager.UV: [\n        (\"poetry install\", \"uv sync\"),\n        (\"poetry run \", \"uv run \"),\n        (\"poetry add \", \"uv add \"),\n        (\"poetry remove \", \"uv remove \"),\n        (\"poetry init\", \"uv init\"),\n        (\"poetry lock\", \"uv lock\"),\n    ],\n    PyPackageManager.POETRY: [\n        (\"uv sync\", \"poetry install\"),\n        (\"uv run \", \"poetry run \"),\n        (\"uv add \", \"poetry add \"),\n        (\"uv remove \", \"poetry remove \"),\n        (\"uv init\", \"poetry init\"),\n        (\"uv lock\", \"poetry lock\"),\n    ],\n}\nlogger = logging.getLogger(__name__)\n\n\ndef _has_pyproject_toml(project_dir: Path) -> bool:\n    return (project_dir / \"pyproject.toml\").exists()\n\n\ndef _get_py_package_manager_override(project_dir: Path) -> str | None:\n    \"\"\"Get Python package manager override from .algokit.toml configuration.\"\"\"\n    algokit_config = get_algokit_config(project_dir=project_dir)\n    if algokit_config and \"package_manager\" in algokit_config and \"python\" in algokit_config[\"package_manager\"]:\n        manager = algokit_config[\"package_manager\"][\"python\"]\n        logger.debug(f\"Using Python package manager from .algokit.toml: {manager}\")\n        return str(manager)\n    return None\n\n\ndef _get_js_package_manager_override(project_dir: Path) -> str | None:\n    \"\"\"Get JavaScript package manager override from .algokit.toml configuration.\"\"\"\n    algokit_config = get_algokit_config(project_dir=project_dir)\n    if algokit_config and \"package_manager\" in algokit_config and \"javascript\" in algokit_config[\"package_manager\"]:\n        manager = algokit_config[\"package_manager\"][\"javascript\"]\n        logger.debug(f\"Using JavaScript package manager from .algokit.toml: {manager}\")\n        return str(manager)\n    return None\n\n\ndef is_uv_project(project_dir: Path) -> bool:\n    uv_path = project_dir / \"uv.lock\"\n    return uv_path.exists()\n\n\ndef _has_python_project(project_dir: Path) -> bool:\n    \"\"\"Check if the directory contains a Python project.\"\"\"\n    poetry_path = project_dir / \"poetry.toml\"\n    pyproject_path = project_dir / \"pyproject.toml\"\n    return poetry_path.exists() or pyproject_path.exists()\n\n\ndef _has_javascript_project(project_dir: Path) -> bool:\n    \"\"\"Check if the directory contains a JavaScript project.\"\"\"\n    return (project_dir / \"package.json\").exists()\n\n\ndef _determine_python_package_manager(project_dir: Path) -> str:\n    \"\"\"\n    Determine Python package manager with proper precedence:\n    1. Project override (.algokit.toml) - Explicit project configuration\n    2. User preference (algokit config) - User's explicit choice\n    3. Smart defaults (project structure) - Only when no preference exists\n    4. Interactive prompt - Falls back to user input and saves preference\n    \"\"\"\n\n    # 1. Project-specific override (highest priority)\n    override = _get_py_package_manager_override(project_dir)\n    if override:\n        return override\n\n    # 2. User's global preference (respects user's explicit choice)\n    user_preference = get_py_package_manager()\n    if user_preference:\n        return user_preference\n\n    # 3. Smart defaults based on project structure (only when no user preference)\n    poetry_path = project_dir / \"poetry.toml\"\n    pyproject_path = project_dir / \"pyproject.toml\"\n\n    if poetry_path.exists():\n        # Standalone poetry.toml suggests Poetry\n        return PyPackageManager.POETRY\n\n    if pyproject_path.exists() and \"[tool.poetry]\" in pyproject_path.read_text(\"utf-8\"):\n        # pyproject.toml with [tool.poetry] section suggests Poetry\n        return PyPackageManager.POETRY\n\n    # 4. Interactive prompt for first-time users\n    manager = questionary.select(\n        \"Which Python package manager would you prefer `bootstrap` command to use?\",\n        choices=[PyPackageManager.POETRY, PyPackageManager.UV],\n    ).ask()\n    if manager is None:\n        # Default to Poetry if user cancels\n        manager = PyPackageManager.POETRY\n    save_py_package_manager(manager)\n    return str(manager)\n\n\ndef _determine_javascript_package_manager(project_dir: Path) -> str:\n    \"\"\"\n    Determine JavaScript package manager with proper precedence:\n    1. Project override (.algokit.toml) - Explicit project configuration\n    2. User preference (algokit config) - User's explicit choice\n    3. Smart defaults (lock files) - Only when no preference exists\n    4. Interactive prompt - Falls back to user input and saves preference\n    \"\"\"\n\n    # 1. Project-specific override (highest priority)\n    override = _get_js_package_manager_override(project_dir)\n    if override:\n        return override\n\n    # 2. User's global preference (respects user's explicit choice)\n    user_preference = get_js_package_manager()\n    if user_preference:\n        return user_preference\n\n    # 3. Smart defaults based on lock files (only when no user preference)\n    if (project_dir / \"pnpm-lock.yaml\").exists():\n        return JSPackageManager.PNPM\n\n    if (project_dir / \"package-lock.json\").exists():\n        return JSPackageManager.NPM\n\n    # 4. Interactive prompt for first-time users\n    manager = questionary_extensions.prompt_select(\n        \"Which JavaScript package manager would you prefer `bootstrap` command to use?\",\n        *[questionary.Choice(title=npm.value, value=npm) for npm in JSPackageManager],\n    )\n    if manager is None:\n        # Default to NPM if user cancels\n        manager = JSPackageManager.NPM\n    save_js_package_manager(manager)\n    return str(manager)\n\n\ndef _bootstrap_python_project(project_dir: Path, manager: str) -> None:\n    \"\"\"Bootstrap a Python project with the specified package manager.\"\"\"\n    if manager == PyPackageManager.UV:\n        logger.debug(\"Running `algokit project bootstrap uv`\")\n        bootstrap_uv(project_dir)\n    else:  # Default to Poetry for backward compatibility\n        logger.debug(\"Running `algokit project bootstrap poetry`\")\n        bootstrap_poetry(project_dir)\n\n\ndef _bootstrap_javascript_project(project_dir: Path, manager: str, *, ci_mode: bool) -> None:\n    \"\"\"Bootstrap a JavaScript project with the specified package manager.\"\"\"\n    if manager == JSPackageManager.NPM:\n        logger.debug(\"Running `algokit project bootstrap npm`\")\n        bootstrap_npm(project_dir, ci_mode=ci_mode)\n    elif manager == JSPackageManager.PNPM:\n        logger.debug(\"Running `algokit project bootstrap pnpm`\")\n        bootstrap_pnpm(project_dir, ci_mode=ci_mode)\n\n\ndef _translate_package_manager_in_toml(project_dir: Path, js_manager: str | None, py_manager: str | None) -> None:\n    \"\"\"Translate package manager commands in .algokit.toml file.\"\"\"\n    toml_path = project_dir / ALGOKIT_CONFIG\n    if not toml_path.exists():\n        return\n\n    try:\n        content = toml_path.read_text()\n        config = toml_loads(content)\n\n        # Early exit if no run commands\n        if not (run_commands := config.get(\"project\", {}).get(\"run\", {})):\n            return\n\n        original = content\n\n        # Process all commands\n        for command_config in run_commands.values():\n            if not isinstance(command_config, dict):\n                continue\n\n            for cmd in command_config.get(\"commands\", []):\n                if (translated := _translate_single_command(cmd, js_manager, py_manager)) != cmd:\n                    # Replace command preserving quotes\n                    content = re.sub(f\"([\\\"']){re.escape(cmd)}([\\\"'])\", f\"\\\\1{translated}\\\\2\", content)\n                    logger.debug(f\"Translating: '{cmd}' -> '{translated}'\")\n\n        # Write back if changed\n        if content != original:\n            toml_path.write_text(content)\n            logger.info(f\"Updated package manager commands in {ALGOKIT_CONFIG}\")\n\n    except Exception as e:\n        logger.warning(f\"Failed to translate package managers in {ALGOKIT_CONFIG}: {e}\")\n\n\ndef _warn_incompatible_commands(cmd: str, js_manager: str | None, py_manager: str | None) -> None:\n    \"\"\"Warn about commands that cannot be translated between package managers.\"\"\"\n\n    # Define incompatible command prefixes\n    py_incompatibles = {\n        PyPackageManager.UV: {\n            \"poetry show\",\n            \"poetry config\",\n            \"poetry export\",\n            \"poetry search\",\n            \"poetry check\",\n            \"poetry publish\",\n        },\n        PyPackageManager.POETRY: {\"uv pip\", \"uv venv\", \"uv tool\", \"uv python\"},\n    }\n\n    js_incompatibles = {\n        JSPackageManager.PNPM: {\"npm fund\", \"npm exec\", \"npx\", \"npm audit\"},\n        JSPackageManager.NPM: {\"pnpm dlx\", \"pnpm exec\", \"pnpm audit\"},\n    }\n\n    # Check for incompatible Python commands\n    if py_manager:\n        py_manager_enum = PyPackageManager(py_manager)\n        if py_manager_enum in py_incompatibles:\n            for prefix in py_incompatibles[py_manager_enum]:\n                if cmd.startswith(prefix):\n                    logger.warning(\n                        f\"⚠️  Command '{cmd}' may not be compatible with {py_manager}. \"\n                        \"The command will remain unchanged and may not work as expected.\"\n                    )\n                    return\n\n    # Check for incompatible JavaScript commands\n    if js_manager:\n        js_manager_enum = JSPackageManager(js_manager)\n        if js_manager_enum in js_incompatibles:\n            for prefix in js_incompatibles[js_manager_enum]:\n                if cmd.startswith(prefix):\n                    logger.warning(\n                        f\"⚠️  Command '{cmd}' may not be compatible with {js_manager}. \"\n                        \"The command will remain unchanged and may not work as expected.\"\n                    )\n                    return\n\n\ndef _translate_single_command(cmd: str, js_manager: str | None, py_manager: str | None) -> str:\n    \"\"\"Minimal translation - only for semantically equivalent commands.\"\"\"\n    if not cmd:\n        return cmd\n\n    _warn_incompatible_commands(cmd, js_manager, py_manager)\n\n    for manager in (js_manager, py_manager):\n        if manager and (translations := PKG_MANAGER_TRANSLATIONS.get(manager)):\n            for old, new in translations:\n                if old.endswith(\" \"):\n                    if cmd.startswith(old):\n                        return new + cmd[len(old) :]\n\n                elif cmd == old or cmd.startswith(f\"{old} \"):\n                    remainder = cmd[len(old) :] if cmd != old else \"\"\n                    return new + remainder\n\n    return cmd\n\n\ndef bootstrap_any(project_dir: Path, *, ci_mode: bool) -> None:\n    \"\"\"Bootstrap a project with automatic package manager selection.\"\"\"\n\n    logger.debug(f\"Checking {project_dir} for bootstrapping needs\")\n\n    # Environment files\n    if next(project_dir.glob(ENV_TEMPLATE_PATTERN), None):\n        logger.debug(\"Running `algokit project bootstrap env`\")\n        bootstrap_env(project_dir, ci_mode=ci_mode)\n\n    # Determine package managers\n    js_manager = None\n    py_manager = None\n\n    # Python projects\n    if _has_python_project(project_dir):\n        py_manager = _determine_python_package_manager(project_dir)\n        _bootstrap_python_project(project_dir, py_manager)\n\n    # JavaScript projects\n    if _has_javascript_project(project_dir):\n        js_manager = _determine_javascript_package_manager(project_dir)\n        _bootstrap_javascript_project(project_dir, js_manager, ci_mode=ci_mode)\n\n    # Translate package manager commands in .algokit.toml\n    if js_manager or py_manager:\n        _translate_package_manager_in_toml(project_dir, js_manager, py_manager)\n\n\ndef bootstrap_any_including_subdirs(  # noqa: PLR0913\n    base_path: Path,\n    *,\n    ci_mode: bool,\n    max_depth: int = MAX_BOOTSTRAP_DEPTH,\n    depth: int = 0,\n    project_names: list[str] | None = None,\n    project_type: str | None = None,\n) -> None:\n    if depth > max_depth:\n        return\n\n    config_project = (get_algokit_config(project_dir=base_path) or {}).get(\"project\", {})\n    skip = bool(config_project) and (\n        (project_type and config_project.get(\"type\") != project_type)\n        or (project_names and config_project.get(\"name\") not in project_names)\n    )\n\n    if not skip:\n        bootstrap_any(base_path, ci_mode=ci_mode)\n\n    for sub_dir in sorted(base_path.iterdir()):  # sort needed for test output ordering\n        if sub_dir.is_dir() and sub_dir.name.lower() not in [\".venv\", \"node_modules\", \"__pycache__\"]:\n            bootstrap_any_including_subdirs(\n                sub_dir,\n                ci_mode=ci_mode,\n                max_depth=max_depth,\n                depth=depth + 1,\n                project_names=project_names,\n                project_type=project_type,\n            )\n        else:\n            logger.debug(f\"Skipping {sub_dir}\")\n\n\ndef bootstrap_env(project_dir: Path, *, ci_mode: bool) -> None:\n    # List all .env*.template files in the directory\n    env_template_paths = sorted(project_dir.glob(ENV_TEMPLATE_PATTERN))\n\n    # If no template files found, log it\n    if not env_template_paths:\n        logger.info(\"No .env or .env.{network_name}.template files found; nothing to do here, skipping bootstrap.\")\n        return\n\n    # Process each template file\n    for env_template_path in env_template_paths:\n        # Determine the output file name (strip .template suffix)\n        env_path = Path(env_template_path).with_suffix(\"\")\n\n        if env_path.exists():\n            logger.info(f\"{env_path.name} already exists; skipping bootstrap of {env_path.name}\")\n            continue\n\n        logger.debug(f\"{env_path} doesn't exist yet\")\n        logger.debug(f\"{env_template_path} exists\")\n        logger.info(f\"Copying {env_template_path} to {env_path} and prompting for empty values\")\n\n        # find all empty values in .env file and prompt the user for a value\n        with (\n            Path(env_template_path).open(encoding=\"utf-8\") as env_template_file,\n            env_path.open(mode=\"w\", encoding=\"utf-8\") as env_file,\n        ):\n            comment_lines: list[str] = []\n            for line in env_template_file:\n                # strip newline character(s) from end of line for simpler handling\n                stripped_line = line.strip()\n                # if it is a comment line, keep it in var and continue\n                if stripped_line.startswith(\"#\"):\n                    comment_lines.append(line)\n                    env_file.write(line)\n                # keep blank lines in output but don't accumulate them in comments\n                elif not stripped_line:\n                    env_file.write(line)\n                else:\n                    # lines not blank and not empty\n                    var_name, *var_value = stripped_line.split(\"=\", maxsplit=1)\n                    # if it is an empty value, the user should be prompted for value with the comment line above\n                    if var_value and not var_value[0]:\n                        var_name = var_name.strip()\n                        if not ci_mode:\n                            logger.info(\"\".join(comment_lines))\n                            new_value = questionary_extensions.prompt_text(f\"Please provide a value for {var_name}:\")\n                            env_file.write(f\"{var_name}={new_value}\\n\")\n                        # In CI mode, we _don't_ prompt for values, because... it's CI\n                        # we can omit the line entirely in the case of blank value,\n                        # and just to be nice we can check to make sure the var is defined in the current\n                        # env and if not, print a warning\n                        # note that due to the multiple env files, this might be an aberrant warning as\n                        # it might be for an .env<name>.template that is not used in the current CI process?\n                        elif var_name not in os.environ:\n                            logger.warning(f\"Prompt skipped for {var_name} due to CI mode, but this value is not set\")\n                    else:  # this is a line with value\n                        env_file.write(line)\n                    comment_lines = []\n\n\ndef bootstrap_poetry(project_dir: Path) -> None:\n    try:\n        proc.run(\n            [\"poetry\", \"--version\"],\n            bad_return_code_error_message=\"poetry --version failed, please check your poetry install\",\n        )\n        try_install_poetry = False\n    except OSError:\n        try_install_poetry = True\n\n    if try_install_poetry:\n        logger.info(\"Poetry not found; attempting to install it...\")\n        if not questionary_extensions.prompt_confirm(\n            \"We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies?\",\n            default=True,\n        ):\n            raise click.ClickException(\n                \"Unable to install poetry via pipx; please install poetry \"\n                \"manually via https://python-poetry.org/docs/ and try `algokit project bootstrap poetry` again.\"\n            )\n        pipx_command = find_valid_pipx_command(\n            \"Unable to find pipx install so that poetry can be installed; \"\n            \"please install pipx via https://pypa.github.io/pipx/ \"\n            \"and then try `algokit project bootstrap poetry` again.\"\n        )\n        proc.run(\n            [*pipx_command, \"install\", \"poetry\"],\n            bad_return_code_error_message=(\n                \"Unable to install poetry via pipx; please install poetry \"\n                \"manually via https://python-poetry.org/docs/ and try `algokit project bootstrap poetry` again.\"\n            ),\n        )\n\n    logger.info(\"Installing Python dependencies and setting up Python virtual environment via Poetry\")\n    try:\n        proc.run([\"poetry\", \"install\"], stdout_log_level=logging.INFO, cwd=project_dir)\n    except OSError as e:\n        if try_install_poetry:\n            raise click.ClickException(\n                \"Unable to access Poetry on PATH after installing it via pipx; \"\n                \"check pipx installations are on your path by running `pipx ensurepath` \"\n                \"and try `algokit project bootstrap poetry` again.\"\n            ) from e\n        raise  # unexpected error, we already ran without IOError before\n\n\ndef bootstrap_npm(project_dir: Path, *, ci_mode: bool) -> None:\n    def get_install_command(*, ci_mode: bool) -> list[str]:\n        has_package_lock = (project_dir / \"package-lock.json\").exists()\n        if ci_mode and not has_package_lock:\n            raise click.ClickException(\n                \"Cannot run `npm ci` because `package-lock.json` is missing. \"\n                \"Please run `npm install` instead and commit it to your source control.\"\n            )\n        return [\"ci\" if ci_mode else \"install\"]\n\n    package_json_path = project_dir / \"package.json\"\n    if not package_json_path.exists():\n        logger.info(f\"{package_json_path} doesn't exist; nothing to do here, skipping bootstrap of npm\")\n    else:\n        logger.info(\"Installing npm dependencies\")\n        cmd = [\"npm\" if not is_windows() else \"npm.cmd\", *get_install_command(ci_mode=ci_mode)]\n        try:\n            proc.run(\n                cmd,\n                stdout_log_level=logging.INFO,\n                cwd=project_dir,\n            )\n        except OSError as e:\n            raise click.ClickException(\n                f\"Failed to run `{' '.join(cmd)}` for {package_json_path}. Is npm installed and available on PATH?\"\n            ) from e\n\n\ndef bootstrap_pnpm(project_dir: Path, *, ci_mode: bool) -> None:\n    def get_install_command(*, ci_mode: bool) -> list[str]:\n        # PNPM auto-detects CI environments and uses appropriate behavior automatically\n        # Only check for lockfile existence in CI mode for better error messages\n        if ci_mode:\n            has_package_lock = (project_dir / \"pnpm-lock.yaml\").exists()\n            if not has_package_lock:\n                raise click.ClickException(\n                    \"Cannot run in CI mode because `pnpm-lock.yaml` is missing. \"\n                    \"Please run `pnpm install` to generate the lockfile and commit it to your source control.\"\n                )\n        return [\"install\"]  # Let PNPM handle CI detection automatically\n\n    package_json_path = project_dir / \"package.json\"\n    if not package_json_path.exists():\n        logger.info(f\"{package_json_path} doesn't exist; nothing to do here, skipping bootstrap of pnpm\")\n    else:\n        logger.info(\"Installing pnpm dependencies\")\n        cmd = [\"pnpm\" if not is_windows() else \"pnpm.cmd\", *get_install_command(ci_mode=ci_mode)]\n        try:\n            proc.run(cmd, stdout_log_level=logging.INFO, cwd=project_dir)\n        except OSError as e:\n            raise click.ClickException(\n                f\"Failed to run `{' '.join(cmd)}` for {package_json_path}. Is pnpm installed and available on PATH?\"\n            ) from e\n\n\ndef migrate_pyproject_to_uv(project_dir: Path) -> None:\n    pyproject_path = project_dir / \"pyproject.toml\"\n    if not pyproject_path.exists():\n        raise click.ClickException(\"pyproject.toml doesn't exist; nothing to do here, skipping migration\")\n    try:\n        proc.run([\"uvx\", \"migrate-to-uv\"], cwd=project_dir, stdout_log_level=logging.INFO)\n    except OSError as e:\n        raise click.ClickException(\n            \"Failed to run `uvx migrate-to-uv` for pyproject.toml. Is uv installed and available on PATH?\"\n        ) from e\n\n\ndef bootstrap_uv(project_dir: Path) -> None:  # noqa: C901\n    try:\n        proc.run(\n            [\"uv\", \"--version\"],\n            bad_return_code_error_message=\"uv --version failed, please check your uv install\",\n        )\n        try_install_uv = False\n    except OSError:\n        try_install_uv = True\n\n    if try_install_uv:\n        logger.info(\"UV not found; attempting to install it...\")\n        if not questionary_extensions.prompt_confirm(\n            \"We couldn't find `uv`; can we install it for you via curl so we can install Python dependencies?\",\n            default=True,\n        ):\n            raise click.ClickException(\n                \"Unable to install uv; please install uv \"\n                \"manually via https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\"\n            )\n\n        # Use the standalone installer as recommended in the UV docs\n        if is_windows():\n            cmd = [\"powershell\", \"-ExecutionPolicy\", \"ByPass\", \"-c\", \"irm https://astral.sh/uv/install.ps1 | iex\"]\n            try:\n                proc.run(\n                    cmd,\n                    bad_return_code_error_message=(\n                        \"Unable to install uv; please install uv \"\n                        \"manually via https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\"\n                    ),\n                )\n            except Exception as e:\n                raise click.ClickException(\n                    \"Failed to install uv. Please install it manually via \"\n                    \"https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\"\n                ) from e\n        else:\n            # For Unix platforms, use proc.run with sh -c to handle the pipe safely\n            try:\n                proc.run(\n                    [\"sh\", \"-c\", \"curl -LsSf https://astral.sh/uv/install.sh | sh\"],\n                    bad_return_code_error_message=(\n                        \"Unable to install uv; please install uv \"\n                        \"manually via https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\"\n                    ),\n                )\n            except Exception as e:\n                raise click.ClickException(\n                    \"Failed to install uv. Please install it manually via \"\n                    \"https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\"\n                ) from e\n\n    # Check if pyproject.toml contains poetry configuration\n    pyproject_path = project_dir / \"pyproject.toml\"\n    is_poetry_project = pyproject_path.exists() and \"[tool.poetry]\" in pyproject_path.read_text(\"utf-8\")\n    if is_poetry_project:\n        if questionary_extensions.prompt_confirm(\n            \"Would you like to attempt to migrate the pyproject configuration to uv compliant format?\\n\"\n            \"⚠️  This will run a third-party tool (https://mkniewallner.github.io/migrate-to-uv/) \"\n            \"that will attempt to convert your poetry project to uv. \"\n            \"You are advised to double check the migrated file and it's recommended to run this \"\n            \"in a version controlled repository to revert changes if needed.\",\n            default=False,\n        ):\n            migrate_pyproject_to_uv(project_dir)\n        else:\n            raise click.ClickException(\n                \"This project is configured to use Poetry. Please use `algokit project bootstrap poetry`, \"\n                \"set poetry as default package manager via `algokit config py-package-manager`, \"\n                \"or modify your pyproject.toml to be compatible with UV.\"\n            )\n\n    logger.info(\"Installing Python dependencies and setting up Python virtual environment via UV\")\n    try:\n        # Sync will create/update the virtual environment and install dependencies\n        proc.run([\"uv\", \"sync\"], stdout_log_level=logging.INFO, cwd=project_dir)\n    except OSError as e:\n        if try_install_uv:\n            raise click.ClickException(\n                \"Unable to access UV on PATH after installing it; \"\n                \"try restarting your terminal and running `algokit project bootstrap uv` again.\"\n            ) from e\n        raise  # unexpected error, we already ran without IOError before\n\n\ndef get_min_algokit_version(project_dir: Path) -> str | None:\n    config = get_algokit_config(project_dir=project_dir)\n    if config is None:\n        return None\n    try:\n        return str(config[\"algokit\"][\"min_version\"])\n    except KeyError:\n        logger.debug(f\"No 'min_version' specified in {ALGOKIT_CONFIG} file.\")\n        return None\n    except Exception as ex:\n        logger.debug(f\"Couldn't read algokit min_version from {ALGOKIT_CONFIG} file: {ex}\", exc_info=True)\n        return None\n\n\ndef project_minimum_algokit_version_check(project_dir: Path, *, ignore_version_check_fail: bool = False) -> None:\n    \"\"\"\n    Checks the current version of AlgoKit against the minimum required version specified in the AlgoKit config file.\n    \"\"\"\n\n    min_version = get_min_algokit_version(project_dir)\n    if min_version is None:\n        return\n    algokit_version = get_current_package_version()\n    if version.parse(algokit_version) < version.parse(min_version):\n        message = (\n            f\"This template requires AlgoKit version {min_version} or higher, \"\n            f\"but you have AlgoKit version {algokit_version}. Please update AlgoKit.\"\n        )\n        if ignore_version_check_fail:\n            logger.warning(message)\n        else:\n            raise click.ClickException(message)\n"
  },
  {
    "path": "src/algokit/core/project/deploy.py",
    "content": "import dataclasses\nimport logging\nfrom pathlib import Path\n\nimport click\nimport dotenv\nfrom algokit_utils import ClientManager\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config\nfrom algokit.core.sandbox import (\n    DEFAULT_ALGOD_PORT,\n    DEFAULT_ALGOD_SERVER,\n    DEFAULT_ALGOD_TOKEN,\n    DEFAULT_INDEXER_PORT,\n    DEFAULT_INDEXER_SERVER,\n    DEFAULT_INDEXER_TOKEN,\n)\nfrom algokit.core.utils import load_env_file, split_command_string\n\nlogger = logging.getLogger(__name__)\n\n\nclass _KnownEnvironments:\n    LOCALNET = \"localnet\"\n    MAINNET = \"mainnet\"\n    TESTNET = \"testnet\"\n\n\nDEFAULT_MAINNET_ALGOD_SERVER = ClientManager.get_algonode_config(\"mainnet\", \"algod\").server\nDEFAULT_TESTNET_ALGOD_SERVER = ClientManager.get_algonode_config(\"testnet\", \"algod\").server\nDEFAULT_MAINNET_INDEXER_SERVER = ClientManager.get_algonode_config(\"mainnet\", \"indexer\").server\nDEFAULT_TESTNET_INDEXER_SERVER = ClientManager.get_algonode_config(\"testnet\", \"indexer\").server\n\n\n_ENVIRONMENT_CONFIG: dict[str, dict[str, str | None]] = {\n    _KnownEnvironments.LOCALNET: {\n        # this file should contain environment variables specific to algokit localnet\n        \"ALGOD_TOKEN\": str(DEFAULT_ALGOD_TOKEN),\n        \"ALGOD_SERVER\": str(DEFAULT_ALGOD_SERVER),\n        \"ALGOD_PORT\": str(DEFAULT_ALGOD_PORT),\n        \"INDEXER_TOKEN\": str(DEFAULT_INDEXER_TOKEN),\n        \"INDEXER_SERVER\": str(DEFAULT_INDEXER_SERVER),\n        \"INDEXER_PORT\": str(DEFAULT_INDEXER_PORT),\n    },\n    _KnownEnvironments.MAINNET: {\n        \"ALGOD_SERVER\": DEFAULT_MAINNET_ALGOD_SERVER,\n        \"INDEXER_SERVER\": DEFAULT_MAINNET_INDEXER_SERVER,\n    },\n    _KnownEnvironments.TESTNET: {\n        \"ALGOD_SERVER\": DEFAULT_TESTNET_ALGOD_SERVER,\n        \"INDEXER_SERVER\": DEFAULT_TESTNET_INDEXER_SERVER,\n    },\n}\n\n\ndef load_deploy_env_files(name: str | None, project_dir: Path) -> dict[str, str | None]:\n    \"\"\"\n    Load the deploy configuration for the given network.\n    :param name: Network name.\n    :param project_dir: Project directory path.\n    \"\"\"\n    result = load_env_file(project_dir)\n    if name is not None:\n        specific_env_path = project_dir / f\".env.{name}\"\n        if specific_env_path.exists():\n            result |= dotenv.dotenv_values(specific_env_path, verbose=True)\n\n        if name in _ENVIRONMENT_CONFIG:\n            logger.debug(f\"Using default environment config for algod and indexer for network {name}\")\n            result |= _ENVIRONMENT_CONFIG[name]\n        elif not specific_env_path.exists():\n            raise click.ClickException(f\"No such file: {specific_env_path}\")\n    return result\n\n\n@dataclasses.dataclass(kw_only=True)\nclass DeployConfig:\n    command: list[str] | None = None\n    environment_secrets: list[str] | None = None\n\n\ndef load_deploy_config(name: str | None, project_dir: Path) -> DeployConfig:  # noqa: C901\n    \"\"\"\n    Load the deploy command for the given network/environment from .algokit.toml file.\n    :param name: Network or environment name.\n    :param project_dir: Project directory path.\n    :return: Deploy command.\n    \"\"\"\n\n    # Load and parse the TOML configuration file\n    config = get_algokit_config(project_dir=project_dir)\n\n    deploy_config = DeployConfig()\n\n    if config is None:\n        # in the case of no algokit toml file, we return the (empty) defaults\n        return deploy_config\n\n    # ensure there is at least some config under [project.deploy] and that it's a dict type\n    # (which should implicitly exist even if only [project.deploy.{name}] exists)\n    legacy_deploy_table = config.get(\"deploy\")\n    project_deploy_table = config.get(\"project\", {}).get(\"deploy\", {})\n    deploy_table = project_deploy_table or legacy_deploy_table\n\n    match deploy_table:\n        case dict():\n            pass  # expected case if there is a file with deploy config\n        case None:\n            return deploy_config  # file has no deploy config, we return with (empty) defaults\n        case _:\n            raise click.ClickException(f\"Bad data for deploy in '{ALGOKIT_CONFIG}' file: {deploy_table}\")\n\n    assert isinstance(deploy_table, dict)  # because mypy is not all-knowing\n\n    for tbl in [deploy_table, deploy_table.get(name)]:\n        match tbl:\n            case {\"command\": str(command)}:\n                try:\n                    deploy_config.command = split_command_string(command)\n                except ValueError as ex:\n                    logger.debug(f\"Failed to parse command string: {command}\", exc_info=True)\n                    raise click.ClickException(f\"Failed to parse command '{command}': {ex}\") from ex\n            case {\"command\": list(command_parts)}:\n                deploy_config.command = [str(x) for x in command_parts]\n            case {\"command\": bad_data}:\n                raise click.ClickException(f\"Invalid data provided under 'command' key: {bad_data}\")\n        match tbl:\n            case {\"environment_secrets\": list(env_names)}:\n                deploy_config.environment_secrets = [str(x) for x in env_names]\n            case {\"environment_secrets\": bad_data}:\n                raise click.ClickException(f\"Invalid data provided under 'environment_secrets' key: {bad_data}\")\n\n    return deploy_config\n"
  },
  {
    "path": "src/algokit/core/project/run.py",
    "content": "import dataclasses\nimport logging\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any\n\nimport click\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_algokit_config\nfrom algokit.core.proc import run\nfrom algokit.core.project import ProjectType\nfrom algokit.core.utils import (\n    load_env_file,\n    resolve_command_path,\n    split_command_string,\n)\n\nlogger = logging.getLogger(\"rich\")\n\n\n@dataclasses.dataclass(kw_only=True)\nclass ProjectCommand:\n    \"\"\"Represents a command to be executed within a project context.\n\n    Attributes:\n        name (str): The name of the command.\n        command (list[str]): The command to be executed, as a list of strings.\n        cwd (Path | None): The current working directory from which the command should be executed.\n        description (str | None): A brief description of the command.\n        project_name (str): The name of the project associated with this command.\n    \"\"\"\n\n    name: str\n    project_type: str\n    commands: list[list[str]]\n    cwd: Path | None = None\n    description: str | None = None\n    project_name: str\n    env_file: Path | None\n\n\n@dataclasses.dataclass(kw_only=True)\nclass WorkspaceProjectCommand:\n    \"\"\"Represents a command that encompasses multiple project commands within a workspace.\n\n    Attributes:\n        name (str): The name of the workspace command.\n        description (str | None): A brief description of the workspace command.\n        commands (list[ProjectCommand]): A list of `ProjectCommand` instances to be executed.\n        execution_order (list[str]): The order in which the commands should be executed.\n    \"\"\"\n\n    name: str\n    description: str | None = None\n    commands: list[ProjectCommand]\n    execution_order: list[str]\n\n\ndef _load_commands_from_standalone(\n    config: dict[str, Any],\n    project_dir: Path,\n) -> list[ProjectCommand]:\n    \"\"\"Loads commands for standalone projects based on the project configuration.\n\n    Args:\n        config (dict[str, Any]): The project configuration.\n        project_dir (Path): The directory of the project.\n\n    Returns:\n        list[ProjectCommand]: A list of project commands derived from the configuration.\n\n    Raises:\n        click.ClickException: If the project configuration is invalid.\n    \"\"\"\n    commands: list[ProjectCommand] = []\n    project_config = config.get(\"project\", {})\n    project_commands = project_config.get(\"run\", {})\n    project_name = project_config.get(\"name\")  # Ensure name is present\n    project_type = project_config.get(\"type\")\n\n    if not project_name:\n        raise click.ClickException(\n            \"Project name is required in the .algokit.toml file for projects of type 'contract', 'backend' or 'frontend\"\n        )\n\n    if not isinstance(project_commands, dict):\n        raise click.ClickException(f\"Bad data for [project.commands] key in '{ALGOKIT_CONFIG}'\")\n\n    for name, command_config in project_commands.items():\n        raw_commands = command_config.get(\"commands\")\n        description = command_config.get(\"description\", \"Description not available\")\n        raw_env_file = command_config.get(\"env_file\", None)\n        env_file = Path(raw_env_file) if raw_env_file else None\n\n        if not raw_commands:\n            logger.debug(f\"Command '{name}' has no custom commands to execute, skipping...\")\n            continue\n\n        commands.append(\n            ProjectCommand(\n                name=name,\n                commands=[split_command_string(cmd) for cmd in raw_commands],\n                cwd=project_dir,  # Assumed to be Path object\n                description=description,\n                project_name=project_name,\n                env_file=env_file,\n                project_type=project_type,\n            )\n        )\n\n    return commands\n\n\ndef _load_commands_from_workspace(\n    config: dict[str, Any],\n    project_dir: Path,\n) -> list[WorkspaceProjectCommand]:\n    \"\"\"Loads workspace commands based on the workspace configuration.\n\n    Args:\n        config (dict[str, Any]): The workspace configuration.\n        project_dir (Path): The directory of the workspace.\n\n    Returns:\n        list[WorkspaceProjectCommand]: A list of workspace project commands derived from the configuration.\n    \"\"\"\n    workspace_commands: dict[str, WorkspaceProjectCommand] = {}\n    execution_order = config.get(\"project\", {}).get(\"run\", {})\n    sub_projects_root = config.get(\"project\", {}).get(\"projects_root_path\")\n\n    if not sub_projects_root:\n        logger.warning(\"Missing 'projects_root_path' in workspace config; skipping command loading\")\n        return []\n\n    sub_projects_root_dir = project_dir / sub_projects_root\n    if not sub_projects_root_dir.exists() or not sub_projects_root_dir.is_dir():\n        logger.warning(f\"Path {sub_projects_root_dir} does not exist or is not a directory, skipping...\")\n        return []\n\n    for subproject_dir in sorted(sub_projects_root_dir.iterdir(), key=lambda p: p.name):\n        if not subproject_dir.is_dir():\n            continue\n\n        subproject_config = get_algokit_config(project_dir=subproject_dir, verbose_validation=True)\n        if not subproject_config:\n            continue\n\n        standalone_commands = _load_commands_from_standalone(subproject_config, subproject_dir)\n\n        for standalone_cmd in standalone_commands:\n            if standalone_cmd.name not in workspace_commands:\n                workspace_commands[standalone_cmd.name] = WorkspaceProjectCommand(\n                    name=standalone_cmd.name,\n                    description=f'Run all \"{standalone_cmd.name}\" commands in the workspace project.',\n                    commands=[standalone_cmd],\n                    execution_order=execution_order.get(standalone_cmd.name, []),\n                )\n            else:\n                workspace_commands[standalone_cmd.name].commands.append(standalone_cmd)\n\n    return list(workspace_commands.values())\n\n\ndef run_command(\n    *, command: ProjectCommand, from_workspace: bool = False, extra_args: tuple[str, ...] | None = None\n) -> None:\n    \"\"\"Executes a specified project command.\n\n    Args:\n        command (ProjectCommand): The project command to be executed.\n        from_workspace (bool): Indicates whether the command is being executed from a workspace context.\n        extra_args (tuple[str, ...] | None): Optional; additional arguments to pass to the command.\n\n    Raises:\n        click.ClickException: If the command execution fails.\n    \"\"\"\n    is_verbose = not from_workspace or logger.level == logging.DEBUG\n\n    if is_verbose:\n        logger.info(f\"Running `{command.name}` command in {command.cwd}...\")\n\n    config_dotenv = (\n        load_env_file(command.env_file) if command.env_file else load_env_file(command.cwd) if command.cwd else {}\n    )\n    # environment variables take precedence over those in .env* files\n    config_env = {**{k: v for k, v in config_dotenv.items() if v is not None}, **os.environ}\n\n    for index, cmd in enumerate(command.commands):\n        try:\n            resolved_command = resolve_command_path(cmd)\n            if index == len(command.commands) - 1 and extra_args:\n                resolved_command.extend(extra_args)\n        except click.ClickException as e:\n            logger.error(f\"'{command.name}' failed executing: '{' '.join(cmd)}'\")\n            raise e\n\n        result = run(\n            command=resolved_command,\n            cwd=command.cwd,\n            env=config_env,\n            stdout_log_level=logging.DEBUG,\n        )\n\n        if result.exit_code != 0:\n            header = f\" project run '{command.name}' command output: \".center(80, \"·\")\n            logger.error(f\"\\n{header}\\n{result.output}\")\n            raise click.ClickException(\n                f\"'{command.name}' failed executing '{' '.join(cmd)}' with exit code = {result.exit_code}\"\n            )\n\n        # Log after each command if not from workspace, and also log success after the last command\n        if is_verbose:\n            log_msg = f\"Command Executed: '{' '.join(cmd)}'\\nOutput: {result.output}\\n\"\n            if index == len(command.commands) - 1:\n                if extra_args:\n                    log_msg += f\"Extra Args: '{' '.join(extra_args)}'\\n\"\n                log_msg += f\"✅ {command.project_name}: '{' '.join(cmd)}' executed successfully.\"\n            logger.info(log_msg)\n\n\ndef run_workspace_command(\n    *,\n    workspace_command: WorkspaceProjectCommand,\n    project_names: list[str] | None = None,\n    project_type: str | None = None,\n    sequential: bool = False,\n    extra_args: tuple[str, ...] | None = None,\n) -> None:\n    \"\"\"Executes a workspace command, potentially limited to specified projects.\n\n    Args:\n        workspace_command (WorkspaceProjectCommand): The workspace command to be executed.\n        project_names (list[str] | None): Optional; specifies a subset of projects to execute the command for.\n        project_type (str | None): Optional; specifies a subset of project types to execute the command for.\n        sequential (bool): Whether to execute commands sequentially. Defaults to False.\n        extra_args (tuple[str, ...] | None): Optional; additional arguments to pass to the command.\n    \"\"\"\n\n    def _execute_command(cmd: ProjectCommand) -> None:\n        \"\"\"Helper function to execute a single project command within the workspace context.\"\"\"\n        logger.info(f\"⏳ {cmd.project_name}: '{cmd.name}' command in progress...\")\n        try:\n            run_command(command=cmd, from_workspace=True, extra_args=extra_args or ())\n            executed_commands = \" && \".join(\" \".join(command) for command in cmd.commands)\n            if extra_args:\n                executed_commands += f\" {' '.join(extra_args)}\"\n            logger.info(f\"✅ {cmd.project_name}: '{executed_commands}' executed successfully.\")\n        except Exception as e:\n            logger.error(f\"❌ {cmd.project_name}: {e}\")\n            raise click.ClickException(f\"failed to execute '{cmd.name}' command in '{cmd.project_name}'\") from e\n\n    def _filter_command(cmd: ProjectCommand) -> bool:\n        return (not project_names or cmd.project_name in project_names) and (\n            not project_type or project_type == cmd.project_type\n        )\n\n    is_sequential = workspace_command.execution_order or sequential\n    logger.info(f\"Running commands {'sequentially' if is_sequential else 'concurrently'}.\")\n\n    filtered_commands = list(filter(_filter_command, workspace_command.commands))\n\n    if project_names:\n        existing_projects = {cmd.project_name for cmd in filtered_commands}\n        missing_projects = set(project_names) - existing_projects\n        if missing_projects:\n            logger.warning(f\"Missing projects: {', '.join(missing_projects)}. Proceeding with available ones.\")\n\n    if is_sequential:\n        if workspace_command.execution_order:\n            order_map = {name: i for i, name in enumerate(workspace_command.execution_order)}\n            filtered_commands.sort(key=lambda c: order_map.get(c.project_name, len(order_map)))\n\n        for cmd in filtered_commands:\n            _execute_command(cmd)\n    else:\n        with ThreadPoolExecutor() as executor:\n            list(executor.map(_execute_command, filtered_commands))\n\n\ndef load_commands(project_dir: Path) -> list[ProjectCommand] | list[WorkspaceProjectCommand] | None:\n    \"\"\"Determines and loads the appropriate project commands based on the project type.\n\n    Args:\n        project_dir (Path): The directory of the project.\n\n    Returns:\n        list[ProjectCommand] | list[WorkspaceProjectCommand] | None: A list of project or workspace commands,\n        or None if the project configuration is not found.\n    \"\"\"\n    config = get_algokit_config(project_dir=project_dir, verbose_validation=True)\n    if not config:\n        return None\n\n    project_type = config.get(\"project\", {}).get(\"type\")\n    return (\n        _load_commands_from_workspace(config, project_dir)\n        if project_type == ProjectType.WORKSPACE\n        else _load_commands_from_standalone(config, project_dir)\n    )\n"
  },
  {
    "path": "src/algokit/core/questionary_extensions.py",
    "content": "from collections.abc import Callable, Sequence\nfrom typing import Any\n\nimport prompt_toolkit.document\nimport questionary\nfrom questionary.prompts.common import build_validator\n\n\nclass NonEmptyValidator(questionary.Validator):\n    def validate(self, document: prompt_toolkit.document.Document) -> None:\n        value = document.text.strip()\n        if not value:\n            raise questionary.ValidationError(message=\"Please enter a value\")\n\n\nclass ChainedValidator(questionary.Validator):\n    def __init__(self, *validators: questionary.Validator):\n        self._validators = validators\n\n    def validate(self, document: prompt_toolkit.document.Document) -> None:\n        for validator in self._validators:\n            validator.validate(document)\n\n\ndef prompt_confirm(message: str, *, default: bool) -> bool:\n    # note: we use unsafe_ask here (and everywhere else) so we don't have to\n    # handle None returns for KeyboardInterrupt - click will handle these nicely enough for us\n    # at the root level\n    result = questionary.confirm(\n        message,\n        default=default,\n    ).unsafe_ask()\n    assert isinstance(result, bool)\n    return result\n\n\ndef prompt_text(\n    message: str,\n    *,\n    validators: Sequence[type[questionary.Validator] | questionary.Validator | Callable[[str], bool]] | None = None,\n    validate_while_typing: bool = False,\n) -> str:\n    if validators:\n        validate, *others = filter(None, map(build_validator, validators))\n        if others:\n            validate = ChainedValidator(validate, *others)\n    else:\n        validate = None\n    result = questionary.text(\n        message,\n        validate=validate,\n        validate_while_typing=validate_while_typing,\n    ).unsafe_ask()\n    assert isinstance(result, str)\n    return result\n\n\ndef prompt_select(\n    message: str,\n    *choices: str | questionary.Choice,\n) -> Any:  # noqa: ANN401\n    return questionary.select(\n        message,\n        choices=choices,\n    ).unsafe_ask()\n"
  },
  {
    "path": "src/algokit/core/sandbox.py",
    "content": "from __future__ import annotations\n\nimport dataclasses\nimport enum\nimport json\nimport logging\nimport re\nimport time\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Any, cast\n\nimport httpx\n\nfrom algokit.core.conf import get_app_config_dir, get_app_state_dir\nfrom algokit.core.config_commands.container_engine import get_container_engine\nfrom algokit.core.proc import RunResult, run, run_interactive\n\nlogger = logging.getLogger(__name__)\n\nDOCKER_COMPOSE_MINIMUM_VERSION = \"2.5.0\"\nPODMAN_COMPOSE_MINIMUM_VERSION = \"1.0.6\"\n\n\nSANDBOX_BASE_NAME = \"sandbox\"\nCONTAINER_ENGINE_CONFIG_FILE = get_app_config_dir() / \"active-container-engine\"\n\n\nclass ContainerEngine(str, enum.Enum):\n    DOCKER = \"docker\"\n    PODMAN = \"podman\"\n\n    def __str__(self) -> str:\n        return self.value\n\n\nclass ComposeFileStatus(enum.Enum):\n    MISSING = enum.auto()\n    UP_TO_DATE = enum.auto()\n    OUT_OF_DATE = enum.auto()\n\n\ndef get_min_compose_version() -> str:\n    container_engine = get_container_engine()\n    return (\n        DOCKER_COMPOSE_MINIMUM_VERSION if container_engine == ContainerEngine.DOCKER else PODMAN_COMPOSE_MINIMUM_VERSION\n    )\n\n\nclass ComposeSandbox:\n    def __init__(self, name: str = SANDBOX_BASE_NAME, config_path: Path | None = None) -> None:\n        self.name = SANDBOX_BASE_NAME if name == SANDBOX_BASE_NAME else f\"{SANDBOX_BASE_NAME}_{name}\"\n        self.directory = (config_path or get_app_config_dir()) / self.name\n        if not self.directory.exists():\n            logger.debug(f\"The {self.name} directory does not exist yet; creating it\")\n            self.directory.mkdir()\n        self._conduit_yaml = get_conduit_yaml()\n        self._latest_yaml = get_docker_compose_yml(name=f\"algokit_{self.name}\")\n        self._latest_config_json = get_config_json()\n        self._latest_algod_network_template = get_algod_network_template()\n\n    @property\n    def compose_file_path(self) -> Path:\n        return self.directory / \"docker-compose.yml\"\n\n    @property\n    def conduit_file_path(self) -> Path:\n        return self.directory / \"conduit.yml\"\n\n    @property\n    def algod_config_file_path(self) -> Path:\n        return self.directory / \"algod_config.json\"\n\n    @property\n    def algod_network_template_file_path(self) -> Path:\n        return self.directory / \"algod_network_template.json\"\n\n    @classmethod\n    def from_environment(cls) -> ComposeSandbox | None:\n        try:\n            run_results = run(\n                [get_container_engine(), \"compose\", \"ls\", \"--format\", \"json\", \"--filter\", \"name=algokit_sandbox*\"],\n                bad_return_code_error_message=\"Failed to list running LocalNet\",\n            )\n            if run_results.exit_code != 0:\n                return None\n        except Exception as err:\n            logger.debug(f\"Error checking for existing sandbox: {err}\", exc_info=True)\n            return None\n\n        try:\n            json_lines = cls._extract_json_lines(run_results.output)\n            if not json_lines:\n                return None\n\n            data = json.loads(json_lines[0])\n            return cls._create_instance_from_data(data)\n        except (json.JSONDecodeError, KeyError, IndexError) as err:\n            logger.info(f\"Error checking config file: {err}\", exc_info=True)\n            return None\n\n    @staticmethod\n    def _extract_json_lines(output: str) -> list[str]:\n        valid_json_lines = []\n        for line in output.splitlines():\n            # strip ANSI color codes\n            parsed_line = re.sub(r\"\\x1b\\[[0-9;]*[a-zA-Z]\", \"\", line)\n            try:\n                json.loads(parsed_line)\n                valid_json_lines.append(parsed_line)\n            except json.JSONDecodeError:\n                continue\n        return valid_json_lines\n\n    @classmethod\n    def _create_instance_from_data(cls, data: list[dict[str, Any]]) -> ComposeSandbox | None:\n        for item in data:\n            config_file = item.get(\"ConfigFiles\", \"\").split(\",\")[0]\n            config_file_path = Path(config_file)\n            full_name = config_file_path.parent.name\n            name = (\n                full_name.replace(f\"{SANDBOX_BASE_NAME}_\", \"\")\n                if full_name.startswith(f\"{SANDBOX_BASE_NAME}_\")\n                else full_name\n            )\n            config_path = config_file_path.parent.parent\n            return cls(name, config_path)\n        return None\n\n    def set_algod_dev_mode(self, *, dev_mode: bool) -> None:\n        content = self.algod_network_template_file_path.read_text()\n        new_value = \"true\" if dev_mode else \"false\"\n        new_content = re.sub(r'\"DevMode\":\\s*(true|false)', f'\"DevMode\": {new_value}', content)\n        self.algod_network_template_file_path.write_text(new_content)\n\n    def is_algod_dev_mode(self) -> bool:\n        content = self.algod_network_template_file_path.read_text()\n        search = re.search(r'\"DevMode\":\\s*(true|false)', content)\n        return search is not None and search.group(1) == \"true\"\n\n    def compose_file_status(self) -> ComposeFileStatus:\n        try:\n            compose_content = self.compose_file_path.read_text()\n            config_content = self.algod_config_file_path.read_text()\n            algod_network_template_content = self.algod_network_template_file_path.read_text()\n\n        except FileNotFoundError:\n            # treat as out of date if compose file exists but algod config doesn't\n            # so that existing setups aren't suddenly reset\n            if self.compose_file_path.exists():\n                return ComposeFileStatus.OUT_OF_DATE\n            return ComposeFileStatus.MISSING\n        else:\n            try:\n                # Perform some content normalization to ensure we correctly detect up to date files\n\n                # Ensure the NUM_ROUNDS placeholder is the same in both files\n                algod_network_json_content = json.loads(\n                    algod_network_template_content.replace(\"NUM_ROUNDS\", '\"NUM_ROUNDS\"')\n                )\n                latest_algod_network_json_content = json.loads(\n                    self._latest_algod_network_template.replace(\"NUM_ROUNDS\", '\"NUM_ROUNDS\"')\n                )\n\n                # Remove DevMode from comparison as the value is configurable via the `--dev` option\n                del algod_network_json_content[\"Genesis\"][\"DevMode\"]\n                del latest_algod_network_json_content[\"Genesis\"][\"DevMode\"]\n\n                if (\n                    compose_content == self._latest_yaml\n                    and config_content == self._latest_config_json\n                    and algod_network_json_content == latest_algod_network_json_content\n                ):\n                    return ComposeFileStatus.UP_TO_DATE\n                else:\n                    return ComposeFileStatus.OUT_OF_DATE\n            except (json.JSONDecodeError, KeyError):\n                # If config files are corrupted or malformed, treat as out of date\n                return ComposeFileStatus.OUT_OF_DATE\n\n    def write_compose_file(self) -> None:\n        self.conduit_file_path.write_text(self._conduit_yaml)\n        self.compose_file_path.write_text(self._latest_yaml)\n        self.algod_config_file_path.write_text(self._latest_config_json)\n        self.algod_network_template_file_path.write_text(self._latest_algod_network_template)\n\n    def _run_compose_command(\n        self,\n        compose_args: str,\n        stdout_log_level: int = logging.INFO,\n        bad_return_code_error_message: str | None = None,\n    ) -> RunResult:\n        return run(\n            [get_container_engine(), \"compose\", *compose_args.split()],\n            cwd=self.directory,\n            stdout_log_level=stdout_log_level,\n            bad_return_code_error_message=bad_return_code_error_message,\n        )\n\n    def up(self) -> None:\n        logger.info(\"Starting AlgoKit LocalNet now...\")\n        self._run_compose_command(\n            f\"up --detach --quiet-pull{' --wait' if get_container_engine() == ContainerEngine.DOCKER else ''}\",\n            bad_return_code_error_message=\"Failed to start LocalNet\",\n        )\n        logger.debug(\"AlgoKit LocalNet started, waiting for health check\")\n        if _wait_for_algod() and _wait_for_indexer():\n            logger.info(\"Started; execute `algokit explore` to explore LocalNet in a web user interface.\")\n        else:\n            logger.warning(\"AlgoKit LocalNet failed to return a successful health check\")\n\n    def stop(self) -> None:\n        logger.info(\"Stopping AlgoKit LocalNet now...\")\n        self._run_compose_command(\"stop\", bad_return_code_error_message=\"Failed to stop LocalNet\")\n        logger.info(\"LocalNet Stopped; execute `algokit localnet start` to start it again.\")\n\n    def down(self) -> None:\n        logger.info(\"Cleaning up the running AlgoKit LocalNet...\")\n        self._run_compose_command(\"down\", stdout_log_level=logging.DEBUG)\n\n    def pull(self) -> None:\n        logger.info(\"Fetching any container updates from DockerHub...\")\n        self._run_compose_command(\"pull --ignore-pull-failures --quiet\")\n        logger.debug(\"Image version cache reset\")\n        _update_image_version_cache(indexer_outdated=False, algod_outdated=False)\n\n    def logs(self, *, follow: bool = False, no_color: bool = False, tail: str | None = None) -> None:\n        compose_args = [\"logs\"]\n        if follow:\n            compose_args += [\"--follow\"]\n        if no_color:\n            compose_args += [\"--no-color\"]\n        if tail is not None:\n            compose_args += [\"--tail\", tail]\n        run_interactive(\n            [get_container_engine(), \"compose\", *compose_args],\n            cwd=self.directory,\n            bad_return_code_error_message=\"Failed to get logs, are the containers running?\",\n        )\n\n    def ps(self, service_name: str | None = None) -> list[dict[str, Any]]:\n        run_results = self._run_compose_command(\n            f\"ps {service_name or ''} --format json\", stdout_log_level=logging.DEBUG\n        )\n        if run_results.exit_code != 0:\n            return []\n\n        # `docker compose ps --format json` on version < 2.21.0 outputs a JSON arary\n        if run_results.output.startswith(\"[\"):\n            data = json.loads(run_results.output)\n        # `docker compose ps --format json` on version >= 2.21.0 outputs seperate JSON objects, each on a new line\n        else:\n            json_lines = self._extract_json_lines(run_results.output)\n            data = [json.loads(line) for line in json_lines]\n\n        assert isinstance(data, list)\n        return cast(\"list[dict[str, Any]]\", data)\n\n    def _get_local_image_versions(self, image_name: str) -> list[str]:\n        \"\"\"\n        Get the local versions of a Docker image. Note that a single image may be pulled from multiple repo digests.\n        \"\"\"\n        try:\n            arg = \"{{range .RepoDigests}}{{println .}}{{end}}\"\n            local_versions_output = run([get_container_engine(), \"image\", \"inspect\", image_name, \"--format\", arg])\n            return [line.split(\"@\")[1] if \"@\" in line else line for line in local_versions_output.output.splitlines()]\n        except Exception as e:\n            logger.debug(f\"Failed to get local image versions: {e}\", exc_info=True)\n            return []\n\n    def _get_latest_image_version(self, image_name: str) -> str | None:\n        \"\"\"\n        Get the latest version of a Docker image from Docker Hub\n        \"\"\"\n        args = image_name.split(\":\")\n        name = args[0]\n        tag = args[1] if len(args) > 1 else \"latest\"\n        url = f\"https://registry.hub.docker.com/v2/repositories/{name}/tags/{tag}\"\n        try:\n            data = httpx.get(url=url)\n            return str(data.json()[\"digest\"])\n        except Exception as err:\n            logger.debug(f\"Error checking image status: {err}\", exc_info=True)\n            return None\n\n    def is_image_up_to_date(self, image_name: str) -> bool:\n        local_versions = self._get_local_image_versions(image_name)\n        latest_version = self._get_latest_image_version(image_name)\n        return latest_version is None or latest_version in local_versions\n\n    def check_docker_compose_for_new_image_versions(self, *, force: bool = False) -> None:\n        should_check_registry = force or _should_check_image_versions()\n\n        if should_check_registry:\n            # Check Docker registry for new versions\n            is_indexer_outdated = not self.is_image_up_to_date(INDEXER_IMAGE)\n            is_algod_outdated = not self.is_image_up_to_date(ALGORAND_IMAGE)\n            _update_image_version_cache(indexer_outdated=is_indexer_outdated, algod_outdated=is_algod_outdated)\n        else:\n            # Use cached state\n            cached_state = _get_image_version_cache()\n            if cached_state is None:\n                return\n            is_indexer_outdated = cached_state.indexer_outdated\n            is_algod_outdated = cached_state.algod_outdated\n\n        if is_indexer_outdated:\n            logger.warning(\n                \"indexer has a new version available, run `algokit localnet reset --update` to get the latest version\"\n            )\n\n        if is_algod_outdated:\n            logger.warning(\n                \"algod has a new version available, run `algokit localnet reset --update` to get the latest version\"\n            )\n\n\nDEFAULT_ALGOD_SERVER = \"http://localhost\"\nDEFAULT_INDEXER_SERVER = \"http://localhost\"\nDEFAULT_ALGOD_TOKEN = \"a\" * 64\nDEFAULT_INDEXER_TOKEN = \"a\" * 64\nDEFAULT_ALGOD_PORT = 4001\nDEFAULT_INDEXER_PORT = 8980\nDEFAULT_WAIT_FOR_ALGOD = 60\nDEFAULT_WAIT_FOR_INDEXER = 60\nDEFAULT_HEALTH_TIMEOUT = 1\nALGOD_HEALTH_URL = f\"{DEFAULT_ALGOD_SERVER}:{DEFAULT_ALGOD_PORT}/v2/status\"\nINDEXER_HEALTH_URL = f\"{DEFAULT_INDEXER_SERVER}:{DEFAULT_INDEXER_PORT}/health\"\nINDEXER_IMAGE = \"algorand/indexer:latest\"\nALGORAND_IMAGE = \"algorand/algod:latest\"\nCONDUIT_IMAGE = \"algorandfoundation/conduit-localnet:latest\"\nIMAGE_VERSION_CHECK_INTERVAL = timedelta(weeks=1).total_seconds()\n\n\n@dataclasses.dataclass\nclass ImageVersionCache:\n    \"\"\"Cache state for image version checks.\"\"\"\n\n    indexer_outdated: bool\n    algod_outdated: bool\n\n\ndef _get_image_version_cache_path() -> Path:\n    \"\"\"Get the path to the image version check cache file.\"\"\"\n    return get_app_state_dir() / \"last-localnet-version-check\"\n\n\ndef _get_image_version_cache() -> ImageVersionCache | None:\n    \"\"\"Get the cached image version state.\n\n    Returns an ImageVersionCache with outdated flags, or None if cache is missing/invalid.\n    \"\"\"\n    cache_path = _get_image_version_cache_path()\n    try:\n        content = cache_path.read_text(encoding=\"utf-8\")\n        data = json.loads(content)\n        return ImageVersionCache(\n            indexer_outdated=data.get(\"indexer_outdated\", False),\n            algod_outdated=data.get(\"algod_outdated\", False),\n        )\n    except (OSError, json.JSONDecodeError):\n        return None\n\n\ndef _should_check_image_versions() -> bool:\n    \"\"\"Determine if we should check for new image versions based on cache.\"\"\"\n    cache_path = _get_image_version_cache_path()\n    try:\n        last_checked = cache_path.stat().st_mtime\n    except OSError:\n        logger.debug(f\"{cache_path} inaccessible, will check for image updates\")\n        return True\n\n    elapsed = time.time() - last_checked\n    if elapsed > IMAGE_VERSION_CHECK_INTERVAL:\n        logger.debug(\"Image version cache expired, will check for updates\")\n        return True\n\n    logger.debug(f\"Skipping image version check, last checked {elapsed / 3600:.1f}h ago\")\n    return False\n\n\ndef _update_image_version_cache(*, indexer_outdated: bool, algod_outdated: bool) -> None:\n    \"\"\"Update the image version check cache with current state.\"\"\"\n    cache_path = _get_image_version_cache_path()\n    try:\n        cache_data = {\"indexer_outdated\": indexer_outdated, \"algod_outdated\": algod_outdated}\n        cache_path.write_text(json.dumps(cache_data), encoding=\"utf-8\")\n    except OSError as ex:\n        logger.debug(f\"Failed to update image version cache: {ex}\")\n\n\ndef _wait_for_service(\n    url: str,\n    token: str,\n    header_name: str,\n    service_name: str,\n    timeout: int = DEFAULT_WAIT_FOR_ALGOD,\n) -> bool:\n    \"\"\"Generic function to wait for a service to become ready via health check.\"\"\"\n    end_time = time.time() + timeout\n    last_exception: httpx.RequestError | None = None\n    while time.time() < end_time:\n        try:\n            health = httpx.get(url, timeout=DEFAULT_HEALTH_TIMEOUT, headers={header_name: token})\n        except httpx.RequestError as ex:\n            last_exception = ex\n        else:\n            if health.is_success:\n                logger.debug(f\"AlgoKit LocalNet health check successful, {service_name} is ready\")\n                return True\n            logger.debug(f\"AlgoKit LocalNet health check returned {health.status_code}, waiting\")\n        time.sleep(DEFAULT_HEALTH_TIMEOUT)\n    if last_exception:\n        logger.debug(f\"AlgoKit LocalNet health request failed for {service_name}\", exc_info=last_exception)\n    return False\n\n\ndef _wait_for_algod() -> bool:\n    \"\"\"Wait for algod service to become ready.\"\"\"\n    return _wait_for_service(\n        ALGOD_HEALTH_URL,\n        DEFAULT_ALGOD_TOKEN,\n        \"X-Algo-API-Token\",\n        \"algod\",\n        DEFAULT_WAIT_FOR_ALGOD,\n    )\n\n\ndef _wait_for_indexer() -> bool:\n    \"\"\"Wait for indexer service to become ready.\"\"\"\n    return _wait_for_service(\n        INDEXER_HEALTH_URL,\n        DEFAULT_INDEXER_TOKEN,\n        \"X-Indexer-API-Token\",\n        \"indexer\",\n        DEFAULT_WAIT_FOR_INDEXER,\n    )\n\n\ndef get_config_json() -> str:\n    return (\n        '{ \"GossipFanout\": 1, \"EndpointAddress\": \"0.0.0.0:8080\", \"DNSBootstrapID\": \"\",'\n        ' \"IncomingConnectionsLimit\": 0, \"Archival\":true, \"isIndexerActive\":false, \"EnableDeveloperAPI\":true,'\n        ' \"EnablePrivateNetworkAccessHeader\":true}'\n    )\n\n\ndef get_algod_network_template() -> str:\n    return \"\"\"{\n    \"Genesis\": {\n      \"NetworkName\": \"followermodenet\",\n      \"RewardsPoolBalance\": 0,\n      \"FirstPartKeyRound\": 0,\n      \"LastPartKeyRound\": NUM_ROUNDS,\n      \"Wallets\": [\n        {\n          \"Name\": \"Wallet1\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet2\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet3\",\n          \"Stake\": 20,\n          \"Online\": true\n        }\n      ],\n      \"DevMode\": true\n    },\n    \"Nodes\": [\n      {\n        \"Name\": \"data\",\n        \"IsRelay\": true,\n        \"Wallets\": [\n          {\n            \"Name\": \"Wallet1\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet2\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet3\",\n            \"ParticipationOnly\": false\n          }\n        ]\n      },\n      {\n        \"Name\": \"follower\",\n        \"IsRelay\": false,\n        \"ConfigJSONOverride\":\n        \"{\\\\\"EnableFollowMode\\\\\":true,\\\\\"EndpointAddress\\\\\":\\\\\"0.0.0.0:8081\\\\\",\\\\\"MaxAcctLookback\\\\\":64,\\\\\"CatchupParallelBlocks\\\\\":64,\\\\\"CatchupBlockValidateMode\\\\\":3}\"\n      }\n    ]\n  }\n\"\"\"\n\n\ndef get_conduit_yaml() -> str:\n    return \"\"\"# Log verbosity: PANIC, FATAL, ERROR, WARN, INFO, DEBUG, TRACE\nlog-level: INFO\n\n# If no log file is provided logs are written to stdout.\n#log-file:\n\n# Number of retries to perform after a pipeline plugin error.\nretry-count: 10\n\n# Time duration to wait between retry attempts.\nretry-delay: \"5s\"\n\n# Optional filepath to use for pidfile.\n#pid-filepath: /path/to/pidfile\n\n# Whether or not to print the conduit banner on startup.\nhide-banner: false\n\n# When enabled prometheus metrics are available on '/metrics'\nmetrics:\n  mode: OFF\n  addr: \":9999\"\n  prefix: \"conduit\"\n\n# The importer is typically an algod follower node.\nimporter:\n  name: localnet_algod\n  config:\n    lead-node-url: \"http://algod:8080\"\n    follower-node-url: \"http://algod:8081\"\n    token: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n# Zero or more processors may be defined to manipulate what data\n# reaches the exporter.\nprocessors:\n\n# An exporter is defined to do something with the data.\nexporter:\n  name: postgresql\n  config:\n    # Pgsql connection string\n    # See https://github.com/jackc/pgconn for more details\n    connection-string: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb\"\n\n    # Maximum connection number for connection pool\n    # This means the total number of active queries that can be running\n    # concurrently can never be more than this\n    max-conn: 20\n\"\"\"\n\n\ndef get_docker_compose_yml(\n    name: str = \"algokit_sandbox\",\n    algod_port: int = DEFAULT_ALGOD_PORT,\n    kmd_port: int = 4002,\n    tealdbg_port: int = 9392,\n) -> str:\n    return f\"\"\"name: \"{name}\"\n\nservices:\n  algod:\n    container_name: \"{name}_algod\"\n    image: {ALGORAND_IMAGE}\n    ports:\n      - {algod_port}:8080\n      - {kmd_port}:7833\n      - {tealdbg_port}:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"{name}_conduit\"\n    image: {CONDUIT_IMAGE}\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"{name}_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"{name}_indexer\"\n    image: {INDEXER_IMAGE}\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n\"\"\"  # noqa: E501\n\n\ndef fetch_algod_status_data(service_info: dict[str, Any]) -> dict[str, Any]:\n    results: dict[str, Any] = {}\n    try:\n        # Docker image response\n        # Search for DEFAULT_ALGOD_PORT in ports, if found use it, if not found this is an error\n        if not any(item[\"PublishedPort\"] == DEFAULT_ALGOD_PORT for item in service_info[\"Publishers\"]):\n            return {\"Status\": \"Error\"}\n\n        results[\"Port\"] = DEFAULT_ALGOD_PORT\n        # container specific response\n        with httpx.Client() as client:\n            algod_headers = {\"X-Algo-API-Token\": DEFAULT_ALGOD_TOKEN}\n            http_status_response = client.get(\n                f\"{DEFAULT_ALGOD_SERVER}:{DEFAULT_ALGOD_PORT}/v2/status\", headers=algod_headers, timeout=3\n            )\n            http_versions_response = client.get(\n                f\"{DEFAULT_ALGOD_SERVER}:{DEFAULT_ALGOD_PORT}/versions\", headers=algod_headers, timeout=3\n            )\n            if (\n                http_status_response.status_code != httpx.codes.OK\n                or http_versions_response.status_code != httpx.codes.OK\n            ):\n                return {\"Status\": \"Error\"}\n\n            # status response\n            status_response = http_status_response.json()\n            results[\"Last round\"] = status_response[\"last-round\"]\n            results[\"Time since last round\"] = \"%.1fs\" % (status_response[\"time-since-last-round\"] / 1e9)\n            # genesis response\n            genesis_response = http_versions_response.json()\n            results[\"Genesis ID\"] = genesis_response[\"genesis_id\"]\n            results[\"Genesis hash\"] = genesis_response[\"genesis_hash_b64\"]\n            major_version = genesis_response[\"build\"][\"major\"]\n            minor_version = genesis_response[\"build\"][\"minor\"]\n            build_version = genesis_response[\"build\"][\"build_number\"]\n            results[\"Version\"] = f\"{major_version}.{minor_version}.{build_version}\"\n        return results\n    except Exception as err:\n        logger.debug(f\"Error checking algod status: {err}\", exc_info=True)\n        return {\"Status\": \"Error\"}\n\n\ndef fetch_indexer_status_data(service_info: dict[str, Any]) -> dict[str, Any]:\n    results: dict[str, Any] = {}\n    try:\n        # Docker image response\n        if not any(item[\"PublishedPort\"] == DEFAULT_INDEXER_PORT for item in service_info[\"Publishers\"]):\n            return {\"Status\": \"Error\"}\n\n        results[\"Port\"] = DEFAULT_INDEXER_PORT\n        # container specific response\n        health_url = f\"{DEFAULT_ALGOD_SERVER}:{DEFAULT_INDEXER_PORT}/health\"\n        http_response = httpx.get(health_url, timeout=5)\n\n        if http_response.status_code != httpx.codes.OK:\n            return {\"Status\": \"Error\"}\n\n        response = http_response.json()\n        logger.debug(f\"{health_url} response: {response}\")\n        results[\"Last round\"] = response[\"round\"]\n        results[\"Version\"] = response[\"version\"]\n        return results\n    except Exception as err:\n        logger.debug(f\"Error checking indexer status: {err}\", exc_info=True)\n        return {\"Status\": \"Error\"}\n\n\nCOMPOSE_VERSION_COMMAND = [get_container_engine(), \"compose\", \"version\", \"--format\", \"json\"]\n"
  },
  {
    "path": "src/algokit/core/tasks/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/tasks/analyze.py",
    "content": "import json\nimport logging\nimport os\nimport re\nfrom pathlib import Path\n\nfrom jsondiff import diff\nfrom pydantic import BaseModel, Field\n\nfrom algokit.core.proc import RunResult, run\nfrom algokit.core.utils import find_valid_pipx_command\n\nlogger = logging.getLogger(__name__)\n\nTEALER_REPORTS_ROOT = Path.cwd() / \".algokit/static-analysis\"\nTEALER_SNAPSHOTS_ROOT = TEALER_REPORTS_ROOT / \"snapshots\"\nTEALER_DOT_FILES_ROOT = TEALER_REPORTS_ROOT / \"tealer\"\nTEALER_VERSION = \"0.1.2\"\n\n\nclass TealerBlock(BaseModel):\n    short: str\n    blocks: list[list[str]]\n\n\nclass TealerExecutionPath(BaseModel):\n    data_type: str = Field(alias=\"type\")\n    count: int\n    description: str\n    check: str\n    impact: str\n    confidence: str\n    data_help: str = Field(alias=\"help\")\n    paths: list[TealerBlock]\n\n\nclass TealerAnalysisReport(BaseModel):\n    success: bool\n    data_error: str | None = Field(alias=\"error\")\n    result: list[TealerExecutionPath]\n\n\ndef _extract_line(block: list[str]) -> str:\n    return f\"{int(block[0].split(':')[0])}-{int(block[-1].split(':')[0])}\"\n\n\ndef _extract_lines(block: list[list[str]]) -> str:\n    return \"->\".join([_extract_line(b) for b in block])\n\n\ndef generate_report_filename(file: Path, duplicate_files: dict[str, int]) -> str:\n    base_filename = file.stem\n    duplicate_count = duplicate_files.get(base_filename, 0)\n    duplicate_files[base_filename] = duplicate_count + 1\n    return f\"{base_filename}_{duplicate_count}.json\" if duplicate_count else f\"{base_filename}.json\"\n\n\ndef load_tealer_report(file_path: str) -> TealerAnalysisReport:\n    \"\"\"\n    Load and parse the tealer report from the specified file path.\n\n    Args:\n        file_path (str): The path to the tealer report file.\n\n    Returns:\n        TealerAnalysisReport: Parsed tealer analysis report.\n    \"\"\"\n    with Path(file_path).open(encoding=\"utf-8\") as file:\n        data = json.load(file)\n    return TealerAnalysisReport(**data)\n\n\ndef prepare_artifacts_folders(output_dir: Path | None) -> None:\n    \"\"\"\n    Create necessary artifacts folders if they do not exist.\n\n    Args:\n        output_dir (Path | None): The output directory path.\n    \"\"\"\n    if output_dir:\n        output_dir.mkdir(parents=True, exist_ok=True)\n\n    TEALER_REPORTS_ROOT.mkdir(parents=True, exist_ok=True)\n    TEALER_SNAPSHOTS_ROOT.mkdir(parents=True, exist_ok=True)\n    TEALER_DOT_FILES_ROOT.mkdir(parents=True, exist_ok=True)\n\n\ndef ensure_tealer_installed() -> None:\n    \"\"\"\n    Install tealer if it's not already installed.\n    \"\"\"\n    try:\n        run(\n            [\"tealer\", \"--version\"],\n            bad_return_code_error_message=\"tealer --version failed, please check your tealer install\",\n        )\n    except Exception as e:\n        logger.debug(e)\n        logger.info(\"Tealer not found; attempting to install it...\")\n        pipx_command = find_valid_pipx_command(\n            \"Unable to find pipx install so that `tealer` static analyzer can be installed; \"\n            \"please install pipx via https://pypa.github.io/pipx/ \"\n            \"and then try `algokit task analyze ...` again.\"\n        )\n        run(\n            [*pipx_command, \"install\", f\"tealer=={TEALER_VERSION}\"],\n            bad_return_code_error_message=(\n                \"Unable to install tealer via pipx; please install tealer \"\n                \"manually and try `algokit task analyze ...` again.\"\n            ),\n        )\n        logger.info(\"Tealer installed successfully via pipx!\")\n\n\ndef generate_tealer_command(cur_file: Path, report_output_path: Path, detectors_to_exclude: list[str]) -> list[str]:\n    \"\"\"\n    Generate the tealer command for analyzing TEAL programs.\n\n    Args:\n        cur_file (Path): The current file to be analyzed.\n        report_output_path (Path): The path to the report output.\n        detectors_to_exclude (list[str]): List of detectors to be excluded.\n\n    Returns:\n        list[str]: The generated tealer command.\n    \"\"\"\n\n    command = [\n        \"tealer\",\n        \"--json\",\n        str(report_output_path),\n        \"detect\",\n        \"--contracts\",\n        str(cur_file),\n    ]\n    if detectors_to_exclude:\n        excluded_detectors = \", \".join(detectors_to_exclude)\n        command.extend([\"--exclude\", excluded_detectors])\n    return command\n\n\ndef run_tealer(command: list[str]) -> RunResult:\n    \"\"\"\n    Run the tealer command and return the result.\n\n    Args:\n        command (list[str]): The command to be executed.\n\n    Returns:\n        RunResult: The result of running the tealer command.\n    \"\"\"\n\n    return run(\n        command,\n        cwd=Path.cwd(),\n        env={\n            \"TEALER_ROOT_OUTPUT_DIR\": str(TEALER_DOT_FILES_ROOT),\n            **os.environ,\n        },\n    )\n\n\ndef has_baseline_diff(*, cur_file: Path, report_output_path: Path, old_report: TealerAnalysisReport) -> bool:\n    \"\"\"\n    Handle the difference between the old and new reports for baseline comparison.\n\n    Args:\n        cur_file (Path): The current file being analyzed.\n        report_output_path (Path): The path to the report output.\n        old_report (TealerAnalysisReport): The old report for comparison.\n    Returns:\n        None\n    \"\"\"\n\n    new_report = load_tealer_report(str(report_output_path))\n    baseline_diff = diff(old_report.model_dump(by_alias=True), new_report.model_dump(by_alias=True))\n    if baseline_diff:\n        new_report_path = report_output_path.with_suffix(\".received.json\")\n        new_report_path.write_text(json.dumps(new_report.model_dump(by_alias=True), indent=2))\n        logger.error(\n            f\"Diff detected in {cur_file}! Please check the content of the snapshot report \"\n            f\"{report_output_path} against the latest received report at {new_report_path}.\"\n        )\n\n        return True\n\n    return False\n\n\ndef generate_summaries(reports: dict, detectors_to_exclude: list[str]) -> dict[Path, list[list[str]]]:\n    \"\"\"\n    Generate the summaries for STDOUT from the tealer reports.\n\n    Args:\n        reports (dict): A dictionary containing the reports.\n        detectors_to_exclude (list[str]): List of detectors to be excluded.\n\n    Returns:\n        dict[Path, list[list[str]]]: A dictionary containing the table rows.\n    \"\"\"\n\n    # Initialize an empty dictionary to store table rows.\n    table_data: dict[Path, list[list[str]]] = {}\n\n    # Iterate through each report in the reports dictionary.\n    for report_path, _ in reports.items():\n        report = load_tealer_report(report_path)\n\n        relative_path = Path(report_path).relative_to(Path.cwd())\n\n        # Process each item in the report's result.\n        for item in report.result:\n            if item.count == 0 or item.check in detectors_to_exclude:\n                continue\n\n            check_type = item.check\n            impact_level = item.impact\n            detailed_description = item.description + \" \" + item.data_help\n\n            # Extract URL from the description, if present.\n            found_url = re.search(r\"(?P<url>https?://[^\\s]+)\", detailed_description)\n            description_with_url = found_url.group(\"url\") if found_url else detailed_description\n\n            # Compile a list of paths or mark as 'N/A' if none.\n            path_details = \",\\n\".join(_extract_lines(block.blocks) for block in item.paths) or \"N/A\"\n\n            # Add the compiled data to the table_data dictionary.\n            if relative_path not in table_data:\n                table_data[relative_path] = []\n            table_data[relative_path].append([check_type, impact_level, description_with_url, path_details])\n\n    return table_data\n"
  },
  {
    "path": "src/algokit/core/tasks/ipfs.py",
    "content": "import json\nimport logging\nfrom pathlib import Path\n\nimport httpx\nimport keyring\n\nlogger = logging.getLogger(__name__)\n\nALGOKIT_PINATA_NAMESPACE = \"algokit_pinata\"\nALGOKIT_PINATA_TOKEN_KEY = \"algokit_pinata_access_token\"\n\nMAX_FILE_SIZE = 100 * 1024 * 1024  # 100MB\nDEFAULT_TIMEOUT = 90\n\n\nclass PinataError(Exception):\n    \"\"\"Base class for Piñata errors.\"\"\"\n\n    def __init__(self, response: httpx.Response):\n        self.response = response\n        super().__init__(f\"Pinata error: {response.status_code}\")\n\n    def __str__(self) -> str:\n        return f\"Pinata error: {self.response.status_code}. {self.response.text}\"\n\n\nclass PinataBadRequestError(PinataError):\n    pass\n\n\nclass PinataUnauthorizedError(PinataError):\n    pass\n\n\nclass PinataForbiddenError(PinataError):\n    pass\n\n\nclass PinataInternalServerError(PinataError):\n    pass\n\n\nclass PinataHttpError(PinataError):\n    pass\n\n\ndef get_pinata_jwt() -> str | None:\n    \"\"\"\n    Retrieves a password from the keyring library using the\n    ALGOKIT_PINATA_NAMESPACE and ALGOKIT_PINATA_TOKEN_KEY variables.\n\n    Returns:\n        str | None: The retrieved password from the keyring, or None if no password is found.\n    \"\"\"\n    try:\n        old_api_key = keyring.get_password(\"algokit_web3_storage\", \"algokit_web3_storage_access_token\")\n        if old_api_key:\n            logger.warning(\n                \"You are using the old Web3 Storage API key. Please login again using `algokit task ipfs login` with \"\n                \"Pinata ipfs provider. Follow the instructions on https://docs.pinata.cloud/docs/getting-started\"\n                \"to create an account and obtain a JWT.\"\n            )\n            keyring.delete_password(\"algokit_web3_storage\", \"algokit_web3_storage_access_token\")\n    except Exception:\n        pass\n    return keyring.get_password(ALGOKIT_PINATA_NAMESPACE, ALGOKIT_PINATA_TOKEN_KEY)\n\n\ndef set_pinata_jwt(jwt: str | None) -> None:\n    \"\"\"\n    Sets or deletes a password in the keyring library based on the provided JWT.\n\n    Args:\n        jwt (str | None): The JWT to be set in the keyring library. If None, the password will be deleted.\n\n    Returns:\n        None\n    \"\"\"\n    if jwt:\n        keyring.set_password(ALGOKIT_PINATA_NAMESPACE, ALGOKIT_PINATA_TOKEN_KEY, jwt)\n    else:\n        keyring.delete_password(ALGOKIT_PINATA_NAMESPACE, ALGOKIT_PINATA_TOKEN_KEY)\n\n\ndef upload_to_pinata(file_path: Path, jwt: str, name: str | None = None) -> str:\n    \"\"\"\n    Uploads a file to the Piñata API.\n\n    Args:\n        file_path (Path): The path to the file that needs to be uploaded.\n        jwt (str): The JWT for accessing the Piñata API.\n        name (str | None, optional): The name to be assigned to the uploaded file. If not provided,\n        the name of the file at `file_path` will be used. Defaults to None.\n        If not provided, the content will be read from the file at `file_path`. Defaults to None.\n\n    Returns:\n        str: The CID (Content Identifier) of the uploaded file.\n\n    Raises:\n        ValueError: If the CID is not a string.\n        PinataBadRequestError: If there is a bad request error.\n        PinataUnauthorizedError: If there is an unauthorized error.\n        PinataForbiddenError: If there is a forbidden error.\n        PinataInternalServerError: If there is an internal server error.\n        PinataHttpError: If there is an HTTP error.\n\n    Example Usage:\n        file_path = Path(\"path/to/file.txt\")\n        jwt = \"your_jwt\"\n        name = \"file.txt\"\n\n        cid = upload_to_pinata(file_path, jwt, name)\n        print(cid) # e.g. \"bafybeih6z7z2z3z4z5z6z7z8z9z0\"\n    \"\"\"\n\n    with file_path.open(\"rb\") as file:\n        file_content = file.read()\n\n    headers = {\n        \"accept\": \"application/json\",\n        \"Authorization\": f\"Bearer {jwt}\",\n    }\n\n    pinata_options = {\"cidVersion\": \"1\"}\n    data = {\"pinataOptions\": json.dumps(pinata_options)}\n    files = {\"file\": (name or file_path.name, file_content)}\n    try:\n        response = httpx.post(\n            url=\"https://api.pinata.cloud/pinning/pinFileToIPFS\",\n            data=data,\n            files=files,\n            headers=headers,\n            timeout=DEFAULT_TIMEOUT,\n        )\n\n        response.raise_for_status()\n        cid = response.json().get(\"IpfsHash\")\n        if not isinstance(cid, str):\n            raise ValueError(\"IpfsHash is not a string.\")\n        return cid\n    except httpx.HTTPStatusError as ex:\n        if ex.response.status_code == httpx.codes.BAD_REQUEST:\n            raise PinataBadRequestError(ex.response) from ex\n        if ex.response.status_code == httpx.codes.UNAUTHORIZED:\n            raise PinataUnauthorizedError(ex.response) from ex\n        if ex.response.status_code == httpx.codes.FORBIDDEN:\n            raise PinataForbiddenError(ex.response) from ex\n        if ex.response.status_code == httpx.codes.INTERNAL_SERVER_ERROR:\n            raise PinataInternalServerError(ex.response) from ex\n\n        raise PinataHttpError(ex.response) from ex\n"
  },
  {
    "path": "src/algokit/core/tasks/mint/__init__.py",
    "content": ""
  },
  {
    "path": "src/algokit/core/tasks/mint/mint.py",
    "content": "import base64\nimport hashlib\nimport json\nimport logging\nimport mimetypes\nimport pathlib\nimport re\nfrom dataclasses import asdict\n\nfrom algokit_utils import SigningAccount\nfrom algosdk import encoding, transaction\nfrom algosdk.transaction import wait_for_confirmation\nfrom algosdk.v2client import algod\nfrom multiformats import CID\n\nfrom algokit.core.tasks.ipfs import upload_to_pinata\nfrom algokit.core.tasks.mint.models import AssetConfigTxnParams, TokenMetadata\n\nlogger = logging.getLogger(__name__)\n\n\ndef _reserve_address_from_cid(cid: str) -> str:\n    \"\"\"\n    Returns the reserve address associated with a given CID (Content Identifier).\n\n    Args:\n        cid (str): The CID for which the reserve address needs to be determined.\n\n    Returns:\n        str: The reserve address associated with the given CID.\n    \"\"\"\n\n    # Workaround to fix `multiformats` package issue, remove first two bytes before using `encode_address`.\n    # Initial fix using `py-multiformats-cid` and `multihash.decode` was dropped due to PEP 517 incompatibility.\n    digest = CID.decode(cid).digest[2:]\n    reserve_address = str(encoding.encode_address(digest))  # type: ignore[no-untyped-call]\n    assert encoding.is_valid_address(reserve_address)  # type: ignore[no-untyped-call]\n    return reserve_address\n\n\ndef _create_url_from_cid(cid: str) -> str:\n    \"\"\"\n    Creates an ARC19 asset template URL based on the given CID (Content Identifier).\n\n    Args:\n        cid (str): The CID for which the URL needs to be created.\n\n    Returns:\n        str: The URL created based on the given CID.\n\n    Raises:\n        AssertionError: If the constructed URL does not match the expected format.\n    \"\"\"\n\n    cid_object = CID.decode(cid)\n    version = cid_object.version\n    codec = cid_object.codec.name\n    hash_function_name = cid_object.hashfun.name\n\n    url = f\"template-ipfs://{{ipfscid:{version}:{codec}:reserve:{hash_function_name}}}\"\n    valid = re.compile(\n        r\"template-ipfs://{ipfscid:(?P<version>[01]):(?P<codec>[a-z0-9\\-]+):(?P<field>[a-z0-9\\-]+):(?P<hash>[a-z0-9\\-]+)}\"\n    )\n    assert bool(valid.match(url))\n    return url\n\n\ndef _file_integrity(filename: pathlib.Path) -> str:\n    \"\"\"\n    Calculate the SHA-256 hash of a file to ensure its integrity.\n\n    Args:\n        filename (pathlib.Path): The path to the file for which the integrity needs to be calculated.\n\n    Returns:\n        str: The integrity of the file in the format \"sha-256<hash>\".\n    \"\"\"\n    with filename.open(\"rb\") as f:\n        file_bytes = f.read()  # read entire file as bytes\n        readable_hash = hashlib.sha256(file_bytes).hexdigest()\n        return \"sha-256\" + readable_hash\n\n\ndef _file_mimetype(filename: pathlib.Path) -> str:\n    \"\"\"\n    Returns the MIME type of a file based on its extension.\n\n    Args:\n        filename (pathlib.Path): The path to the file.\n\n    Returns:\n        str: The MIME type of the file.\n    \"\"\"\n    extension = pathlib.Path(filename).suffix\n    return mimetypes.types_map[extension]\n\n\ndef _create_asset_txn(\n    *,\n    asset_config_params: AssetConfigTxnParams,\n    token_metadata: TokenMetadata,\n    use_metadata_hash: bool = True,\n) -> transaction.AssetConfigTxn:\n    \"\"\"\n    Create an instance of the AssetConfigTxn class by setting the parameters and metadata\n    for the asset configuration transaction.\n\n    Args:\n        asset_config_params (AssetConfigTxnParams): An instance of the AssetConfigTxnParams class\n        that contains the parameters for the asset configuration transaction.\n        token_metadata (TokenMetadata): An instance of the TokenMetadata class that contains the metadata for the asset.\n        use_metadata_hash (bool, optional): A boolean flag indicating whether to use the metadata hash\n        in the asset configuration transaction. Defaults to True.\n\n    Returns:\n        AssetConfigTxn: An instance of the AssetConfigTxn class representing the asset configuration transaction.\n    \"\"\"\n    json_metadata = token_metadata.to_json()\n    metadata = json.loads(json_metadata)\n\n    if use_metadata_hash:\n        if \"extra_metadata\" in metadata:\n            h = hashlib.new(\"sha512_256\")\n            h.update(b\"arc0003/amj\")\n            h.update(json_metadata.encode(\"utf-8\"))\n            json_metadata_hash = h.digest()\n\n            h = hashlib.new(\"sha512_256\")\n            h.update(b\"arc0003/am\")\n\n            h.update(json_metadata_hash)\n            h.update(base64.b64decode(metadata[\"extra_metadata\"]))\n            asset_config_params.metadata_hash = h.digest()\n        else:\n            h = hashlib.new(\"sha256\")\n            h.update(json_metadata.encode(\"utf-8\"))\n            asset_config_params.metadata_hash = h.digest()\n    else:\n        asset_config_params.metadata_hash = b\"\"\n\n    return transaction.AssetConfigTxn(**asdict(asset_config_params))  # type: ignore[no-untyped-call]\n\n\ndef mint_token(  # noqa: PLR0913\n    *,\n    client: algod.AlgodClient,\n    jwt: str,\n    creator_account: SigningAccount,\n    unit_name: str,\n    total: int,\n    token_metadata: TokenMetadata,\n    mutable: bool,\n    image_path: pathlib.Path | None = None,\n) -> tuple[int, str]:\n    \"\"\"\n    Mint new token on the Algorand blockchain.\n\n    Args:\n        client (algod.AlgodClient): An instance of the `algod.AlgodClient` class representing the Algorand node.\n        jwt (str): The JWT for accessing the Piñata API.\n        creator_account (SigningAccount): An instance of the `SigningAccount` class representing the account that\n        will create the token.\n        asset_name (str): A string representing the name of the token.\n        unit_name (str): A string representing the unit name of the token.\n        total (int): An integer representing the total supply of the token.\n        token_metadata (TokenMetadata): An instance of the `TokenMetadata` class representing the metadata of the token.\n        mutable (bool): A boolean indicating whether the token is mutable or not.\n        image_path (pathlib.Path | None, optional): A `pathlib.Path` object representing the path to the\n        image file associated with the token. Defaults to None.\n        decimals (int | None, optional): An integer representing the number of decimal places for the token.\n        Defaults to 0.\n\n    Returns:\n        tuple[int, str]: A tuple containing the asset index and transaction ID of the minted token.\n\n    Raises:\n        ValueError: If the token name in the metadata JSON does not match the provided asset name.\n        ValueError: If the decimals in the metadata JSON does not match the provided decimals amount.\n    \"\"\"\n\n    if image_path:\n        token_metadata.image_integrity = _file_integrity(image_path)\n        token_metadata.image_mimetype = _file_mimetype(image_path)\n        logger.info(\"Uploading image to pinata...\")\n        token_metadata.image = \"ipfs://\" + upload_to_pinata(image_path, jwt=jwt)\n        logger.info(f\"Image uploaded to pinata: {token_metadata.image}\")\n\n    logger.info(\"Uploading metadata to pinata...\")\n    metadata_cid = upload_to_pinata(\n        token_metadata.to_file_path(),\n        jwt=jwt,\n    )\n    logger.info(f\"Metadata uploaded to pinata: {metadata_cid}\")\n\n    asset_config_params = AssetConfigTxnParams(\n        sender=creator_account.address,\n        sp=client.suggested_params(),\n        reserve=_reserve_address_from_cid(metadata_cid) if mutable else \"\",\n        unit_name=unit_name,\n        asset_name=token_metadata.name,\n        url=_create_url_from_cid(metadata_cid) + \"#arc3\" if mutable else \"ipfs://\" + metadata_cid + \"#arc3\",\n        manager=creator_account.address if mutable else \"\",\n        total=total,\n        decimals=token_metadata.decimals,\n    )\n\n    logger.debug(f\"Asset config params: {asset_config_params.to_json()}\")\n    asset_config_txn = _create_asset_txn(\n        asset_config_params=asset_config_params,\n        token_metadata=token_metadata,\n        use_metadata_hash=not mutable,\n    )\n    signed_asset_config_txn = asset_config_txn.sign(creator_account.private_key)  # type: ignore[no-untyped-call]\n    asset_config_txn_id = client.send_transaction(signed_asset_config_txn)\n    response = wait_for_confirmation(client, asset_config_txn_id, 4)\n\n    return response[\"asset-index\"], asset_config_txn_id\n"
  },
  {
    "path": "src/algokit/core/tasks/mint/models.py",
    "content": "import json\nimport tempfile\nfrom dataclasses import asdict, dataclass\nfrom pathlib import Path\n\nfrom algosdk.transaction import SuggestedParams\n\nMIN_BG_COLOR_LENGTH = 6  # Based on ARC-0003 spec, must be a 6 character hex without a pre-pended #\n\n\n@dataclass\nclass Properties:\n    arbitrary_attributes: dict[str, str | int | float | dict | list]\n\n\n@dataclass\nclass LocalizationIntegrity:\n    locale_hashes: dict[str, str]\n\n\n@dataclass\nclass Localization:\n    uri: str\n    default: str\n    locales: list[str]\n    integrity: LocalizationIntegrity\n\n\n@dataclass\nclass TokenMetadata:\n    name: str\n    decimals: int\n    description: str | None = None\n    properties: Properties | None = None\n    image: str | None = None\n    image_integrity: str | None = None\n    image_mimetype: str | None = None\n    background_color: str | None = None\n    external_url: str | None = None\n    external_url_integrity: str | None = None\n    external_url_mimetype: str | None = None\n    animation_url: str | None = None\n    animation_url_integrity: str | None = None\n    animation_url_mimetype: str | None = None\n    localization: Localization | None = None\n    extra_metadata: str | None = None\n\n    def __post_init__(self) -> None:\n        if self.image_mimetype and not self.image_mimetype.startswith(\"image/\"):\n            raise ValueError(\"image_mimetype must start with 'image/'\")\n        if self.external_url_mimetype and self.external_url_mimetype != \"text/html\":\n            raise ValueError(\"external_url_mimetype must be 'text/html'\")\n        if self.background_color and (\n            len(self.background_color) != MIN_BG_COLOR_LENGTH\n            or not all(char.isdigit() or char.islower() for char in self.background_color)\n        ):\n            raise ValueError(\"background_color must be a six-character hexadecimal without a pre-pended #.\")\n\n    def to_json(self, indent: int | None = 4) -> str:\n        # Filter out None values before converting to JSON\n        data_dict = {k: v for k, v in asdict(self).items() if v is not None}\n        return json.dumps(data_dict, indent=indent)\n\n    # Persist to a tmp directory and return the path\n    def to_file_path(self) -> Path:\n        file_path = Path(tempfile.mkstemp()[1])\n        try:\n            with file_path.open(mode=\"w\", encoding=\"utf-8\") as file:\n                file.write(self.to_json(None))\n            return file_path\n        except FileNotFoundError as err:\n            raise ValueError(f\"No such file or directory: '{file_path}'\") from err\n        except json.JSONDecodeError as err:\n            raise ValueError(f\"Failed to decode JSON from file {file_path}: {err}\") from err\n\n    @classmethod\n    def from_json_file(cls, file_path: Path | None, name: str, decimals: int = 0) -> \"TokenMetadata\":\n        if not file_path:\n            return cls(name=name, decimals=decimals)\n\n        try:\n            with file_path.open(encoding=\"utf-8\") as file:\n                data = json.load(file)\n                data[\"name\"] = name\n                data[\"decimals\"] = decimals\n            return cls(**data)\n        except FileNotFoundError as err:\n            raise ValueError(f\"No such file or directory: '{file_path}'\") from err\n        except json.JSONDecodeError as err:\n            raise ValueError(f\"Failed to decode JSON from file {file_path}: {err}\") from err\n\n\n@dataclass\nclass AssetConfigTxnParams:\n    sender: str\n    sp: SuggestedParams\n    unit_name: str\n    asset_name: str\n    url: str\n    manager: str\n    reserve: str\n    total: int\n    freeze: str | None = \"\"\n    clawback: str | None = \"\"\n    note: str | None = \"\"\n    decimals: int = 0\n    default_frozen: bool = False\n    lease: str | None = \"\"\n    rekey_to: str | None = \"\"\n    metadata_hash: bytes | None = None\n    strict_empty_address_check: bool = False\n\n    def to_json(self, indent: int | None = 4) -> str:\n        # Filter out None values before converting to JSON\n        data_dict = {k: v for k, v in asdict(self).items() if v is not None and k != \"sp\"}\n        return json.dumps(data_dict, indent=indent)\n"
  },
  {
    "path": "src/algokit/core/tasks/nfd.py",
    "content": "import json\nimport logging\nfrom enum import Enum\n\nimport httpx\n\nlogger = logging.getLogger(__name__)\n\nNF_DOMAINS_API_URL = \"https://api.nf.domains\"\n\n\nclass NFDMatchType(Enum):\n    FULL = \"full\"\n    TINY = \"tiny\"\n    ADDRESS = \"address\"\n\n\ndef _process_get_request(url: str) -> dict:\n    response = httpx.get(url)\n\n    try:\n        response.raise_for_status()\n        data = response.json()\n        if not isinstance(data, dict):\n            raise ValueError(\"Response JSON is not a dictionary\")\n        return data\n    except httpx.HTTPStatusError as err:\n        logger.debug(f\"Error response: {err.response}\")\n\n        if err.response.status_code == httpx.codes.NOT_FOUND:\n            raise Exception(\"Not found!\") from err\n        if err.response.status_code == httpx.codes.BAD_REQUEST:\n            raise Exception(f\"Invalid request: {err.response.text}\") from err\n        if err.response.status_code == httpx.codes.UNAUTHORIZED:\n            raise Exception(f\"Unauthorized to access NFD API: {err.response.text}\") from err\n        if err.response.status_code == httpx.codes.FORBIDDEN:\n            raise Exception(f\"Forbidden to access NFD API: {err.response.text}\") from err\n        if err.response.status_code == httpx.codes.TOO_MANY_REQUESTS:\n            raise Exception(f\"Too many requests to NFD API: {err.response.text}\") from err\n\n        raise Exception(\n            f'NFD lookup failed with status code {err.response.status_code} and message \"{err.response.text}\"'\n        ) from err\n\n\ndef nfd_lookup_by_address(address: str, view: NFDMatchType) -> str:\n    \"\"\"\n    Perform a lookup on an API to retrieve information about a given address.\n\n    Args:\n        address (str): The address to perform the lookup on.\n        view (NFDMatchType): The type of view to retrieve from the API.\n        It can be one of the following: \"full\", \"tiny\", or \"address\".\n\n    Returns:\n        str: If the view is \"address\", returns the name associated with the address as a string.\n        If the view is not \"address\", returns the JSON response from the API as a string with an indentation of 2.\n\n    Raises:\n        Exception: If the content from the API is not a dictionary, raises an exception with the unexpected response.\n    \"\"\"\n\n    view_type = \"thumbnail\" if view.value == NFDMatchType.ADDRESS.value else view.value\n    url = f\"{NF_DOMAINS_API_URL}/nfd/lookup?address={address}&view={view_type}&allowUnverified=false\"\n    content = _process_get_request(url)\n    if isinstance(content, dict):\n        if view.value == NFDMatchType.ADDRESS.value:\n            return str(content[address][\"name\"])\n        else:\n            return json.dumps(content, indent=2)\n\n    raise Exception(f\"Unexpected response from NFD API: {content}\")\n\n\ndef nfd_lookup_by_domain(domain: str, view: NFDMatchType) -> str:\n    \"\"\"\n    Performs a lookup on a given domain using the NF Domains API.\n\n    Args:\n        domain (str): The domain to be looked up.\n        view (NFDMatchType): The type of information to retrieve.\n        It can be one of the following: NFDMatchType.FULL, NFDMatchType.TINY, or NFDMatchType.ADDRESS.\n\n    Returns:\n        str: If the view is NFDMatchType.ADDRESS, returns the owner of the domain as a string.\n        If the view is not NFDMatchType.ADDRESS, returns the response JSON stringified with indentation.\n\n    Raises:\n        Exception: If the response from the NF Domains API is not a dictionary.\n    \"\"\"\n\n    view_type = \"brief\" if view.value == NFDMatchType.ADDRESS.value else view.value\n    url = f\"{NF_DOMAINS_API_URL}/nfd/{domain}?view={view_type}&poll=false\"\n    content = _process_get_request(url)\n    if isinstance(content, dict):\n        if view == NFDMatchType.ADDRESS:\n            return str(content[\"owner\"])\n        else:\n            return json.dumps(content, indent=2)\n\n    raise Exception(f\"Unexpected response from NFD API: {content}\")\n"
  },
  {
    "path": "src/algokit/core/tasks/vanity_address.py",
    "content": "import logging\nimport multiprocessing\nimport signal\nimport time\nimport types\nimport typing\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom multiprocessing import Process, Queue, cpu_count\nfrom timeit import default_timer as timer\n\nimport algosdk\nfrom algosdk.mnemonic import from_private_key, to_private_key\n\nlogger = logging.getLogger(__name__)\n\nPROGRESS_REFRESH_INTERVAL_SECONDS = 5\n\n\nclass MatchType(Enum):\n    START = \"start\"\n    ANYWHERE = \"anywhere\"\n    END = \"end\"\n\n\nMatchFunction = Callable[[str, str], bool]\n\nMATCH_FUNCTIONS: dict[MatchType, MatchFunction] = {\n    MatchType.START: lambda addr, keyword: addr.startswith(keyword),\n    MatchType.ANYWHERE: lambda addr, keyword: keyword in addr,\n    MatchType.END: lambda addr, keyword: addr.endswith(keyword),\n}\n\n\n@dataclass\nclass VanityAccount:\n    mnemonic: str\n    address: str\n    private_key: str\n\n\nclass Counter:\n    def __init__(self, initial_value: int = 0):\n        self.val = multiprocessing.RawValue(\"i\", initial_value)\n        self.lock = multiprocessing.Lock()\n\n    def increment(self, value: int = 1) -> None:\n        with self.lock:\n            self.val.value += value\n\n    @property\n    def value(self) -> int:\n        return int(self.val.value)\n\n\ndef _log_progress(counter: Counter, start_time: float) -> None:\n    \"\"\"Logs progress of address matching at regular intervals.\"\"\"\n    last_log_time = start_time\n\n    try:\n        while True:\n            total_count = counter.value\n            if timer() - last_log_time >= PROGRESS_REFRESH_INTERVAL_SECONDS:\n                elapsed_time = timer() - start_time\n                message = (\n                    f\"Iterated over ~{total_count} addresses in {elapsed_time:.2f} seconds.\"\n                    if total_count > 0\n                    else f\"Elapsed time: {elapsed_time:.2f} seconds.\"\n                )\n                logger.info(f\"Still searching for a match. {message}\")\n                last_log_time = timer()\n            time.sleep(PROGRESS_REFRESH_INTERVAL_SECONDS)\n    except KeyboardInterrupt:\n        return\n\n\ndef _search_for_matching_address(keyword: str, match: MatchType, counter: Counter, queue: Queue) -> None:\n    \"\"\"\n    Searches for a matching address based on the specified keyword and matching criteria.\n\n    Args:\n        keyword (str): The keyword to search for in the address.\n        match (MatchType): The matching criteria for the keyword. It can be \"start\" to match addresses that start with\n        the keyword, \"anywhere\" to match addresses that contain the keyword anywhere,\n        or \"end\" to match addresses that end with the keyword.\n        lock (LockBase): A multiprocessing lock object to synchronize access to the shared data.\n        stop_event (EventClass): A multiprocessing event object to stop the search when a match is found.\n        shared_data (DictProxy): A multiprocessing dictionary to share data between processes.\n    \"\"\"\n\n    try:\n        local_count = 0\n        batch_size = 100\n\n        while True:\n            private_key, address = algosdk.account.generate_account()  # type: ignore[no-untyped-call]\n            local_count += 1\n            if local_count % batch_size == 0:\n                counter.increment(local_count)\n                local_count = 0\n\n            if MATCH_FUNCTIONS[match](address, keyword):\n                generated_mnemonic = from_private_key(private_key)  # type: ignore[no-untyped-call]\n                queue.put((address, generated_mnemonic))\n                return\n    except KeyboardInterrupt:\n        return\n\n\ndef generate_vanity_address(keyword: str, match: MatchType) -> VanityAccount:\n    \"\"\"\n    Generate a vanity address in the Algorand blockchain.\n\n    Args:\n        keyword (str): The keyword to search for in the address.\n        match (MatchType): The matching criteria for the keyword. It can be \"start\" to match addresses that start with\n        the keyword, \"anywhere\" to match addresses that contain the keyword anywhere,\n        or \"end\" to match addresses that end with the keyword.\n\n    Returns:\n        VanityAccount: An object containing the generated mnemonic and address\n        that match the specified keyword and matching criteria.\n    \"\"\"\n    jobs: list[Process] = []\n\n    def signal_handler(sig: int, frame: types.FrameType | None) -> typing.NoReturn:\n        logger.debug(f\"KeyboardInterrupt captured for {sig} and frame {frame}. Terminating processes...\")\n        for p in jobs:\n            p.terminate()\n        raise KeyboardInterrupt\n\n    num_processes = cpu_count()\n    logger.info(f\"Using {num_processes} processes to search for a matching address...\")\n    queue: Queue = Queue()\n    counter = Counter()\n\n    start_time: float = timer()\n    for _ in range(num_processes):\n        process = Process(target=_search_for_matching_address, args=(keyword, match, counter, queue))\n        jobs.append(process)\n        process.start()\n\n    # Start the logger process\n    logger_process = Process(target=_log_progress, args=(counter, start_time))\n    jobs.append(logger_process)\n    logger_process.start()\n\n    signal.signal(signal.SIGINT, signal_handler)  # capture ctrl-c so we can report attempts and running time\n\n    address, mnemonic = queue.get()  # this will return once one of the spawned processes finds a match\n\n    logger.info(f\"Vanity address generation time: {timer() - start_time:.2f} seconds\")\n\n    for p in jobs:\n        p.terminate()\n\n    return VanityAccount(\n        mnemonic=mnemonic,\n        address=address,\n        private_key=to_private_key(mnemonic),  # type: ignore[no-untyped-call]\n    )\n"
  },
  {
    "path": "src/algokit/core/tasks/wallet.py",
    "content": "import json\nimport logging\nfrom dataclasses import dataclass\n\nimport keyring\n\nlogger = logging.getLogger(__name__)\n\nWALLET_ALIAS_KEYRING_NAMESPACE = \"algokit_alias\"\nWALLET_ALIASES_KEYRING_NAMESPACE = \"algokit_aliases\"\nWALLET_ALIASES_KEYRING_USERNAME = \"aliases\"\n\n# Windows Credentials locker has a max limit of 1280 chars per password length.\n# Given that each alias is at most 20 chars, around ~50 alias keys can be stored within single password field.\n# Hence the limitation of 50 aliases.\nWALLET_ALIASING_MAX_LIMIT = 50\n\n\n@dataclass\nclass WalletAliasKeyringData:\n    alias: str\n    address: str\n    private_key: str | None\n\n\nclass WalletAliasingLimitError(Exception):\n    pass\n\n\ndef _get_alias_keys() -> list[str]:\n    try:\n        response = keyring.get_password(\n            service_name=WALLET_ALIASES_KEYRING_NAMESPACE, username=WALLET_ALIASES_KEYRING_USERNAME\n        )\n\n        if not response:\n            return []\n\n        alias_keys: list[str] = json.loads(response)\n        return alias_keys\n    except Exception as ex:\n        logger.debug(\"Failed to get alias keys from keyring\", exc_info=ex)\n        return []\n\n\ndef _update_alias_keys(alias_keys: list[str]) -> None:\n    keyring.set_password(\n        service_name=WALLET_ALIASES_KEYRING_NAMESPACE,\n        username=WALLET_ALIASES_KEYRING_USERNAME,\n        password=json.dumps(alias_keys, separators=(\",\", \":\")),\n    )\n\n\ndef _add_alias_key(alias_name: str) -> None:\n    alias_keys = _get_alias_keys()\n\n    if len(alias_keys) >= WALLET_ALIASING_MAX_LIMIT:\n        raise WalletAliasingLimitError(\"You have reached the maximum number of aliases.\")\n\n    if alias_name not in alias_keys:\n        alias_keys.append(alias_name)\n\n    _update_alias_keys(alias_keys)\n\n\ndef _remove_alias_key(alias_name: str) -> None:\n    alias_keys = _get_alias_keys()\n\n    if alias_name in alias_keys:\n        alias_keys.remove(alias_name)\n\n    _update_alias_keys(alias_keys)\n\n\ndef add_alias(alias_name: str, address: str, private_key: str | None) -> None:\n    \"\"\"\n    Add an address or account to be stored against a named alias in keyring.\n\n    Args:\n        alias_name (str): The name of the alias to be added.\n        address (str): The address or account to be stored against the alias.\n        private_key (str | None): The private key associated with the address or account.\n        It can be None if no private key is available.\n\n    Raises:\n        WalletAliasingLimitError: If the maximum number of aliases has been reached.\n\n    \"\"\"\n\n    try:\n        _add_alias_key(alias_name)\n        keyring.set_password(\n            service_name=WALLET_ALIAS_KEYRING_NAMESPACE,\n            username=alias_name,\n            password=json.dumps(\n                WalletAliasKeyringData(\n                    alias=alias_name,\n                    address=address,\n                    private_key=private_key,\n                ).__dict__\n            ),\n        )\n    except Exception as ex:\n        logger.debug(\"Failed to add alias to keyring\", exc_info=ex)\n        raise ex\n\n\ndef get_alias(alias_name: str) -> WalletAliasKeyringData | None:\n    \"\"\"\n    Get the address or account stored against a named alias in the keyring.\n\n    Args:\n        alias_name (str): The name of the alias to retrieve.\n\n    Returns:\n        WalletAliasKeyringData | None: An instance of the WalletAliasKeyringData class if the alias exists,\n        otherwise None.\n\n    Example Usage:\n        alias_data = get_alias(\"my_alias\")\n        if alias_data:\n            print(alias_data.address)\n    \"\"\"\n\n    try:\n        response = keyring.get_password(service_name=WALLET_ALIAS_KEYRING_NAMESPACE, username=alias_name)\n\n        if not response:\n            return None\n\n        return WalletAliasKeyringData(**json.loads(response))\n    except Exception as ex:\n        logger.debug(f\"`{alias_name}` does not exist\", exc_info=ex)\n        return None\n\n\ndef get_aliases() -> list[WalletAliasKeyringData]:\n    \"\"\"\n    Retrieves a list of wallet aliases and their associated data from a keyring.\n\n    Returns:\n        A list of WalletAliasKeyringData objects representing the aliases and their associated data.\n    \"\"\"\n\n    try:\n        alias_keys = _get_alias_keys()\n        response: list[WalletAliasKeyringData] = []\n\n        for alias_name in alias_keys:\n            alias_data = get_alias(alias_name)\n            if alias_data:\n                response.append(alias_data)\n\n        return response\n    except Exception as ex:\n        logger.debug(\"Failed to get aliases from keyring\", exc_info=ex)\n        return []\n\n\ndef remove_alias(alias_name: str) -> None:\n    \"\"\"\n    Remove an address or account stored against a named alias in keyring.\n\n    :param alias_name: The name of the alias to be removed.\n    :type alias_name: str\n    \"\"\"\n\n    keyring.delete_password(service_name=WALLET_ALIAS_KEYRING_NAMESPACE, username=alias_name)\n    _remove_alias_key(alias_name)\n"
  },
  {
    "path": "src/algokit/core/typed_client_generation.py",
    "content": "import abc\nimport enum\nimport json\nimport logging\nimport re\nimport shutil  # noqa: F401\nfrom functools import reduce\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import ClassVar\n\nimport click\n\nfrom algokit.core import proc\nfrom algokit.core.utils import (\n    extract_semantic_version,\n    extract_version_triple,\n    find_valid_pipx_command,\n    get_npm_command,\n)\n\nlogger = logging.getLogger(__name__)\n\nTYPESCRIPT_NPM_PACKAGE = \"@algorandfoundation/algokit-client-generator\"\nTYPESCRIPT_GENERATE_COMMAND = \"algokitgen-ts\"\nPYTHON_PYPI_PACKAGE = \"algokit-client-generator\"\nPYTHON_GENERATE_COMMAND = \"algokitgen-py\"\n\n\ndef _snake_case(s: str) -> str:\n    s = s.replace(\"-\", \" \")\n    s = re.sub(r\"([A-Z]+)([A-Z][a-z])\", r\"\\1_\\2\", s)\n    s = re.sub(r\"([a-z\\d])([A-Z])\", r\"\\1_\\2\", s)\n    return re.sub(r\"[-\\s]\", \"_\", s).lower()\n\n\nclass AppSpecType(enum.Enum):\n    ARC32 = \"arc32\"\n    ARC56 = \"arc56\"\n\n\nclass AppSpecsNotFoundError(Exception):\n    pass\n\n\nclass ClientGenerator(abc.ABC):\n    language: ClassVar[str]\n    extension: ClassVar[str]\n    version: str | None\n\n    _by_language: ClassVar[dict[str, type[\"ClientGenerator\"]]] = {}\n    _by_extension: ClassVar[dict[str, type[\"ClientGenerator\"]]] = {}\n\n    def __init__(self, version: str | None) -> None:\n        self.command = self.find_generate_command(version)\n\n    def __init_subclass__(cls, language: str, extension: str) -> None:\n        cls.language = language\n        cls.extension = extension\n        cls._by_language[language] = cls\n        cls._by_extension[extension] = cls\n\n    @classmethod\n    def languages(cls) -> list[str]:\n        return list(cls._by_language.keys())\n\n    @classmethod\n    def create_for_language(cls, language: str, version: str | None) -> \"ClientGenerator\":\n        return cls._by_language[language](version)\n\n    @classmethod\n    def create_for_extension(cls, extension: str, version: str | None) -> \"ClientGenerator\":\n        return cls._by_extension[extension](version)\n\n    def resolve_output_path(self, app_spec: Path, output_path_pattern: str | None) -> tuple[Path, AppSpecType] | None:\n        try:\n            application_json = json.loads(app_spec.read_text())\n            try:\n                contract_name: str = application_json[\"name\"]  # ARC-56\n                app_spec_type: AppSpecType = AppSpecType.ARC56\n            except KeyError:\n                contract_name = application_json[\"contract\"][\"name\"]  # ARC-32\n                app_spec_type = AppSpecType.ARC32\n        except Exception:\n            logger.error(f\"Couldn't parse contract name from {app_spec}\", exc_info=True)\n            return None\n        output_resolved = (output_path_pattern or self.default_output_pattern).format(\n            contract_name=self.format_contract_name(contract_name),\n            app_spec_dir=str(app_spec.parent),\n        )\n        output_path = Path(output_resolved)\n        if output_path.exists() and not output_path.is_file():\n            logger.error(f\"Could not output to {output_path} as it already exists and is a directory\")\n            return None\n        return (output_path, app_spec_type)\n\n    @abc.abstractmethod\n    def generate(self, app_spec: Path, output: Path, args: list[str] | None = None) -> None: ...\n\n    @abc.abstractmethod\n    def show_help(self) -> None: ...\n\n    @property\n    @abc.abstractmethod\n    def default_output_pattern(self) -> str: ...\n\n    @abc.abstractmethod\n    def find_generate_command(self, version: str | None) -> list[str]: ...\n\n    def format_contract_name(self, contract_name: str) -> str:\n        return contract_name\n\n    def generate_all(\n        self,\n        app_spec_path_or_dir: Path,\n        output_path_pattern: str | None,\n        args: list[str] | None,\n        *,\n        raise_on_path_resolution_failure: bool,\n    ) -> None:\n        if not app_spec_path_or_dir.is_dir():\n            app_specs = [app_spec_path_or_dir]\n        else:\n            file_patterns = [\"application.json\", \"*.arc32.json\", \"*.arc56.json\"]\n            app_specs = list(set(chain.from_iterable(app_spec_path_or_dir.rglob(pattern) for pattern in file_patterns)))\n            app_specs.sort()\n            if not app_specs:\n                raise AppSpecsNotFoundError\n\n        def accumulate_items_to_generate(\n            acc: dict[Path, tuple[Path, AppSpecType]], app_spec: Path\n        ) -> dict[Path, tuple[Path, AppSpecType]]:\n            output_path_result = self.resolve_output_path(app_spec, output_path_pattern)\n            if output_path_result is None:\n                if raise_on_path_resolution_failure:\n                    raise click.ClickException(f\"Error generating client for {app_spec}\")\n                return acc\n            (output_path, app_spec_type) = output_path_result\n            if output_path in acc:\n                # ARC-56 app specs take precedence over ARC-32 app specs\n                if acc[output_path][1] == AppSpecType.ARC32 and app_spec_type == AppSpecType.ARC56:\n                    acc[output_path] = (app_spec, app_spec_type)\n            else:\n                acc[output_path] = (app_spec, app_spec_type)\n            return acc\n\n        items_to_generate: dict[Path, tuple[Path, AppSpecType]] = reduce(accumulate_items_to_generate, app_specs, {})\n        for output_path, (app_spec, _) in items_to_generate.items():\n            self.generate(app_spec, output_path, args)\n\n\nclass PythonClientGenerator(ClientGenerator, language=\"python\", extension=\".py\"):\n    def generate(self, app_spec: Path, output: Path, args: list[str] | None = None) -> None:\n        logger.info(f\"Generating Python client code for application specified in {app_spec} and writing to {output}\")\n        cmd = [\n            *self.command,\n            \"-a\",\n            str(app_spec),\n            \"-o\",\n            str(output),\n        ]\n        if args:\n            cmd.extend(args)\n\n        run_result = proc.run(cmd)\n        click.echo(run_result.output)\n\n        if run_result.exit_code != 0:\n            click.secho(\n                f\"Client generation failed for {app_spec}.\",\n                err=True,\n                fg=\"red\",\n            )\n            raise click.exceptions.Exit(run_result.exit_code)\n\n    def show_help(self) -> None:\n        \"\"\"Show help for the Python client generator.\"\"\"\n        cmd = [*self.command, \"--help\"]\n        run_result = proc.run(cmd)\n\n        # Filter out unwanted lines from the help output\n        filtered_lines = []\n        skip_next_lines = False\n\n        for line in run_result.output.splitlines():\n            # Skip usage line\n            if \"usage: algokitgen-py\" in line:\n                continue\n\n            # Start skipping when we encounter the -a option\n            if \"-a APP_SPEC\" in line:\n                skip_next_lines = True\n                continue\n\n            # If we're skipping and encounter a line that starts a new option (starts with a dash), stop skipping\n            if skip_next_lines:\n                is_new_option = line.lstrip().startswith(\"-\")\n                if is_new_option:\n                    skip_next_lines = False\n                else:\n                    continue\n\n            filtered_lines.append(line)\n\n        click.echo(\"\\n\".join(filtered_lines))\n        if run_result.exit_code != 0:\n            raise click.exceptions.Exit(run_result.exit_code)\n\n    @property\n    def default_output_pattern(self) -> str:\n        return f\"{{contract_name}}_client{self.extension}\"\n\n    def format_contract_name(self, contract_name: str) -> str:\n        return _snake_case(contract_name)\n\n    def find_project_generate_command(self, version: str | None) -> list[str] | None:\n        \"\"\"\n        Try find the generate command in the project.\n        \"\"\"\n        try:\n            # Use the tree output as it puts the package info on the first line of the output\n            result = proc.run([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n            if result.exit_code == 0:\n                generate_command = [\"poetry\", \"run\", PYTHON_GENERATE_COMMAND]\n                if version is not None:\n                    installed_version = None\n                    lines = result.output.splitlines()\n                    if len(lines) > 0:\n                        installed_version = extract_version_triple(lines[0])\n                        if extract_version_triple(version) == installed_version:\n                            return generate_command\n                else:\n                    return generate_command\n        except OSError:\n            pass\n        except ValueError:\n            pass\n\n        return None\n\n    def find_global_generate_command(self, pipx_command: list[str], version: str | None) -> list[str] | None:\n        \"\"\"\n        Try find the generate command installed globally.\n        \"\"\"\n        try:\n            result = proc.run([*pipx_command, \"list\", \"--short\"])\n            if result.exit_code == 0:\n                generate_command = [PYTHON_GENERATE_COMMAND]\n                for line in result.output.splitlines():\n                    if PYTHON_PYPI_PACKAGE in line:\n                        if version is not None:\n                            installed_version = None\n                            installed_version = extract_version_triple(line)\n                            if extract_version_triple(version) == installed_version:\n                                return generate_command\n                        else:\n                            return generate_command\n        except OSError:\n            pass\n        except ValueError:\n            pass\n\n        return None\n\n    def find_generate_command(self, version: str | None) -> list[str]:\n        \"\"\"\n        Find Python generator command.\n        If a matching version is installed at a project level, use that.\n        If a matching version is installed at a global level, use that.\n        Otherwise, run the matching version via pipx.\n        \"\"\"\n\n        logger.debug(\"Searching for project installed client generator\")\n        project_result = self.find_project_generate_command(version)\n        if project_result is not None:\n            return project_result\n\n        pipx_command = find_valid_pipx_command(\n            f\"Unable to find pipx install so that the `{PYTHON_PYPI_PACKAGE}` can be run; \"\n            \"please install pipx via https://pypa.github.io/pipx/ \"\n            \"and then try `algokit generate client ...` again.\"\n        )\n\n        logger.debug(\"Searching for globally installed client generator\")\n        global_result = self.find_global_generate_command(pipx_command, version)\n        if global_result is not None:\n            return global_result\n\n        # when not installed, run via pipx\n        logger.debug(\"No matching installed client generator found, run client generator via pipx\")\n        return [\n            *pipx_command,\n            \"run\",\n            f\"--spec={PYTHON_PYPI_PACKAGE}{f'=={version}' if version is not None else ''}\",\n            PYTHON_GENERATE_COMMAND,\n        ]\n\n\nclass TypeScriptClientGenerator(ClientGenerator, language=\"typescript\", extension=\".ts\"):\n    def generate(self, app_spec: Path, output: Path, args: list[str] | None = None) -> None:\n        cmd = [*self.command, \"generate\", \"-a\", str(app_spec), \"-o\", str(output)]\n        if args:\n            cmd.extend(args)\n\n        logger.info(\n            f\"Generating TypeScript client code for application specified in {app_spec} and writing to {output}\"\n        )\n        run_result = proc.run(cmd)\n        click.echo(run_result.output)\n\n        if run_result.exit_code != 0:\n            click.secho(\n                f\"Client generation failed for {app_spec}.\",\n                err=True,\n                fg=\"red\",\n            )\n            raise click.exceptions.Exit(run_result.exit_code)\n\n    def show_help(self) -> None:\n        \"\"\"Show help for the TypeScript client generator.\"\"\"\n        cmd = [*self.command, \"generate\", \"--help\"]\n        run_result = proc.run(cmd)\n\n        # Filter out unwanted lines from the help output\n        filtered_lines = []\n        for line in run_result.output.splitlines():\n            if \"-a\" not in line and \"Usage: algokitgen\" not in line:\n                filtered_lines.append(line)\n\n        click.echo(\"\\n\".join(filtered_lines))\n        if run_result.exit_code != 0:\n            raise click.exceptions.Exit(run_result.exit_code)\n\n    def find_project_generate_command(\n        self, npm_command: list[str], npx_command: list[str], version: str | None\n    ) -> list[str] | None:\n        try:\n            result = proc.run([*npm_command, \"ls\", \"--no-unicode\"])\n            # Normally we would check the exit code, however `npm ls` may return a non zero exit code\n            # when certain dependencies are not met. We still want to continue processing.\n            if result.output != \"\":\n                generate_command = [*npx_command, TYPESCRIPT_NPM_PACKAGE]\n                for line in result.output.splitlines():\n                    if TYPESCRIPT_NPM_PACKAGE in line:\n                        if \"UNMET DEPENDENCY\" in line:\n                            raise ModuleNotFoundError(\n                                f\"{TYPESCRIPT_NPM_PACKAGE} was detected in the project, but is not installed.\"\n                            )\n                        if version is not None:\n                            installed_version = extract_semantic_version(line)\n                            if extract_semantic_version(version) == installed_version:\n                                return generate_command\n                        else:\n                            return generate_command\n        except OSError:\n            pass\n        except ValueError:\n            pass\n\n        return None\n\n    def find_global_generate_command(\n        self, npm_command: list[str], npx_command: list[str], version: str | None\n    ) -> list[str] | None:\n        return self.find_project_generate_command([*npm_command, \"--global\"], npx_command, version)\n\n    def find_generate_command(self, version: str | None) -> list[str]:\n        \"\"\"\n        Find TypeScript generator command.\n        If a matching version is installed at a project level, use that.\n        If a matching version is installed at a global level, use that.\n        Otherwise, run the matching version via npx.\n        \"\"\"\n\n        npm_command = get_npm_command(\n            f\"Unable to find npm install so that the `{TYPESCRIPT_NPM_PACKAGE}` can be run; \"\n            \"please install npm via https://docs.npmjs.com/downloading-and-installing-node-js-and-npm \"\n            \"and then try `algokit generate client ...` again.\",\n        )\n        npx_command = get_npm_command(\n            f\"Unable to find npx install so that the `{TYPESCRIPT_NPM_PACKAGE}` can be run; \"\n            \"please install npx via https://www.npmjs.com/package/npx \"\n            \"and then try `algokit generate client ...` again.\",\n            is_npx=True,\n        )\n\n        logger.debug(\"Searching for project installed client generator\")\n        project_result = self.find_project_generate_command(npm_command, npx_command, version)\n        if project_result is not None:\n            return project_result\n\n        logger.debug(\"Searching for globally installed client generator\")\n        global_result = self.find_global_generate_command(npm_command, npx_command, version)\n        if global_result is not None:\n            return global_result\n\n        # when not installed, run via npx\n        logger.debug(\"No matching installed client generator found, run client generator via npx\")\n        return [\n            *npx_command,\n            \"--yes\",\n            f\"{TYPESCRIPT_NPM_PACKAGE}@{version if version is not None else 'latest'}\",\n        ]\n\n    @property\n    def default_output_pattern(self) -> str:\n        return f\"{{contract_name}}Client{self.extension}\"\n"
  },
  {
    "path": "src/algokit/core/utils.py",
    "content": "from __future__ import annotations\n\nimport os\nimport platform\nimport re\nimport shutil\nimport socket\nimport sys\nimport threading\nimport time\nfrom functools import cache\nfrom itertools import cycle\nfrom os import environ\nfrom pathlib import Path\nfrom shutil import which\nfrom typing import TYPE_CHECKING, Any\n\nimport click\nimport dotenv\nfrom algokit_utils import AlgorandClient\n\nfrom algokit.core import proc\n\nif TYPE_CHECKING:\n    from collections.abc import Callable, Iterator\n\n    from algokit.cli.common.constants import AlgorandNetwork\n\nCLEAR_LINE = \"\\033[K\"\nSPINNER_FRAMES = [\"⠋\", \"⠙\", \"⠹\", \"⠸\", \"⠼\", \"⠴\", \"⠦\", \"⠧\", \"⠇\", \"⠏\"]\n\n# From _WIN_DEFAULT_PATHEXT from shutils\nWIN_DEFAULT_PATHEXT = \".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC\"\n\n\ndef extract_version_triple(version_str: str) -> str:\n    match = re.search(r\"\\d+\\.\\d+\\.\\d+\", version_str)\n    if not match:\n        raise ValueError(\"Unable to parse version number\")\n    return match.group()\n\n\ndef extract_semantic_version(version_str: str) -> str:\n    match = re.search(r\"\\d+\\.\\d+\\.\\d+(?:-[a-zA-Z0-9.]+)?\", version_str)\n    if not match:\n        raise ValueError(\"Unable to parse version number\")\n    return match.group()\n\n\ndef is_minimum_version(system_version: str, minimum_version: str) -> bool:\n    system_version_as_tuple = tuple(map(int, system_version.split(\".\")))\n    minimum_version_as_tuple = tuple(map(int, minimum_version.split(\".\")))\n    return system_version_as_tuple >= minimum_version_as_tuple\n\n\ndef is_network_available(host: str = \"8.8.8.8\", port: int = 53, timeout: float = 3.0) -> bool:\n    \"\"\"\n    Check if internet is available by trying to establish a socket connection.\n    \"\"\"\n\n    try:\n        socket.setdefaulttimeout(timeout)\n        with socket.create_connection((host, port), timeout=timeout):\n            return True\n    except OSError:\n        return False\n\n\ndef animate(name: str, stop_event: threading.Event) -> None:\n    \"\"\"Displays an animated spinner in the console.\"\"\"\n    # Ensure sys.stdout uses UTF-8 encoding\n    if sys.stdout.encoding.lower() != \"utf-8\":\n        sys.stdout = open(sys.stdout.fileno(), mode=\"w\", encoding=\"utf-8\", buffering=1)  # noqa: SIM115, PTH123\n\n    for frame in cycle(SPINNER_FRAMES):\n        if stop_event.is_set():\n            break\n        text = f\"\\r{frame} {name}\"\n        sys.stdout.write(text)\n        sys.stdout.flush()\n        time.sleep(0.1)\n\n    sys.stdout.write(\"\\r\" + CLEAR_LINE)  # Clear the animation line\n    sys.stdout.flush()\n\n\ndef run_with_animation(\n    target_function: Callable[..., Any], animation_text: str = \"Loading\", *args: Any, **kwargs: Any\n) -> Any:  # noqa: ANN401\n    \"\"\"Executes a function while displaying an animation, handling termination.\"\"\"\n    stop_event = threading.Event()\n    animation_thread = threading.Thread(target=animate, args=(animation_text, stop_event), daemon=True)\n    animation_thread.start()\n\n    try:\n        result: Any = target_function(*args, **kwargs)\n    except Exception:\n        raise  # Re-raise to propagate the exception\n    finally:\n        stop_event.set()\n        animation_thread.join()  # Wait for animation to finish\n\n    return result\n\n\ndef find_valid_pipx_command(error_message: str) -> list[str]:\n    for pipx_command in get_candidate_pipx_commands():\n        try:\n            pipx_version_result = proc.run([*pipx_command, \"--version\"])\n        except OSError:\n            pass  # in case of path/permission issues, go to next candidate\n        else:\n            if pipx_version_result.exit_code == 0:\n                return pipx_command\n    # If pipx isn't found in global path or python -m pipx then bail out\n    #   this is an exceptional circumstance since pipx should always be present with algokit\n    #   since it's installed with brew / winget as a dependency, and otherwise is used to install algokit\n    raise click.ClickException(error_message)\n\n\ndef get_candidate_pipx_commands() -> Iterator[list[str]]:\n    # first try is pipx via PATH\n    yield [\"pipx\"]\n    # otherwise try getting an interpreter with pipx installed as a module,\n    # this won't work if pipx is installed in its own venv but worth a shot\n    for python_path in get_python_paths():\n        yield [python_path, \"-m\", \"pipx\"]\n\n\ndef get_npm_command(error_message: str, *, is_npx: bool = False) -> list[str]:\n    command = \"npx\" if is_npx else \"npm\"\n    path = shutil.which(command)\n    if not path:\n        raise click.ClickException(error_message)\n        # Create the npm directory inside %APPDATA% if it doesn't exist, as npx on windows needs this.\n        # See https://github.com/npm/cli/issues/7089 for more details.\n    if is_windows():\n        appdata_dir = os.getenv(\"APPDATA\")\n        if appdata_dir is not None:\n            appdata_dir_path = Path(appdata_dir).expanduser()\n            npm_dir = appdata_dir_path / \"npm\"\n            try:\n                if not npm_dir.exists():\n                    npm_dir.mkdir(parents=True)\n            except OSError as ex:\n                raise click.ClickException(\n                    f\"Failed to create the `npm` directory in {appdata_dir_path}.\\n\"\n                    \"This command uses `npx`, which requires the `npm` directory to exist \"\n                    \"in the above path, otherwise an ENOENT 4058 error will occur.\\n\"\n                    \"Please create this directory manually and try again.\"\n                ) from ex\n        return [f\"{command}.cmd\"]\n    return [command]\n\n\ndef get_python_paths() -> Iterator[str]:\n    for python_name in (\"python3\", \"python\"):\n        if python_path := which(python_name):\n            yield python_path\n    python_base_path = get_base_python_path()\n    if python_base_path is not None:\n        yield python_base_path\n\n\ndef get_base_python_path() -> str | None:\n    this_python: str | None = sys.executable\n    if not this_python or this_python.endswith(\"algokit\"):\n        # Not: can be empty or None... yikes! unlikely though\n        # https://docs.python.org/3.10/library/sys.html#sys.executable\n        return None\n    # not in venv... not recommended to install algokit this way, but okay\n    if sys.prefix == sys.base_prefix:\n        return this_python\n    this_python_path = Path(this_python)\n    # try resolving symlink, this should be default on *nix\n    try:\n        if this_python_path.is_symlink():\n            return str(this_python_path.resolve())\n    except (OSError, RuntimeError):\n        pass\n    # otherwise, try getting an internal value which should be set when running in a .venv\n    # this will be the value of `home = <path>` in pyvenv.cfg if it exists\n    if base_home := getattr(sys, \"_home\", None):\n        base_home_path = Path(base_home)\n        for name in (\"python\", \"python3\", f\"python3.{sys.version_info.minor}\"):\n            candidate_path = base_home_path / name\n            if is_windows():\n                candidate_path = candidate_path.with_suffix(\".exe\")\n            if candidate_path.is_file():\n                return str(candidate_path)\n    # give up, we tried...\n    return this_python\n\n\ndef is_binary_mode() -> bool:\n    \"\"\"\n    Check if the current Python interpreter is running in a native binary frozen environment.\n    return: True if running in a native binary frozen environment, False otherwise.\n    \"\"\"\n    return getattr(sys, \"frozen\", False) and hasattr(sys, \"_MEIPASS\")\n\n\ndef is_windows() -> bool:\n    return platform.system() == \"Windows\"\n\n\ndef is_wsl() -> bool:\n    \"\"\"\n    detects if Python is running in WSL\n    https://github.com/scivision/detect-windows-subsystem-for-linux\n    \"\"\"\n    return platform.uname().release.endswith((\"-Microsoft\", \"microsoft-standard-WSL2\"))\n\n\ndef split_command_string(command: str) -> list[str]:\n    \"\"\"\n    Parses a command string into a list of arguments, handling both shell and non-shell commands\n    \"\"\"\n\n    if platform.system() == \"Windows\":\n        import mslex\n\n        return mslex.split(command)\n    else:\n        import shlex\n\n        return shlex.split(command)\n\n\ndef resolve_command_path(\n    command: list[str],\n) -> list[str]:\n    \"\"\"\n    Encapsulates custom command resolution, promotes reusability\n\n    Args:\n        command (list[str]): The command to resolve\n        allow_chained_commands (bool): Whether to allow chained commands (e.g. \"&&\" or \"||\")\n\n    Returns:\n        list[str]: The resolved command\n    \"\"\"\n\n    cmd, *args = command\n\n    # No resolution needed if the command already has a path or is not Windows-specific\n    if Path(cmd).name != cmd:\n        return command\n\n    # Windows-specific handling if 'shutil.which' fails:\n    if is_windows():\n        for ext in environ.get(\"PATHEXT\", WIN_DEFAULT_PATHEXT).split(\";\"):\n            potential_path = shutil.which(cmd + ext)\n            if potential_path:\n                return [potential_path, *args]\n\n    # If resolves directly, return\n    if resolved_cmd := shutil.which(cmd):\n        return [resolved_cmd, *args]\n\n    # Command not found with any extension\n    raise click.ClickException(f\"Failed to resolve command path, '{cmd}' wasn't found\")\n\n\ndef load_env_file(path: Path | None) -> dict[str, str | None]:\n    \"\"\"Load the general .env configuration.\n\n    Args:\n        path (Path): Path to the .env file or directory containing the .env file.\n\n    Returns:\n        dict[str, str | None]: Dictionary with .env configurations.\n    \"\"\"\n    if path is None:\n        return {}\n\n    env_path = path if path.is_file() else path / \".env\"\n    if env_path.is_file():\n        return dotenv.dotenv_values(env_path, verbose=True)\n\n    return {}\n\n\ndef alphanumeric_sort_key(s: str) -> list[int | str]:\n    \"\"\"\n    Generate a key for sorting strings that contain both text and numbers.\n    For instance, ensures that \"name_digit_1\" comes before \"name_digit_2\".\n    \"\"\"\n    return [int(text) if text.isdigit() else text.lower() for text in re.split(\"([0-9]+)\", s)]\n\n\n@cache\ndef get_algorand_client_for_network(network: AlgorandNetwork) -> AlgorandClient:\n    from algokit.cli.common.constants import AlgorandNetwork\n\n    match network:\n        case AlgorandNetwork.MAINNET:\n            return AlgorandClient.mainnet()\n        case AlgorandNetwork.TESTNET:\n            return AlgorandClient.testnet()\n        case AlgorandNetwork.LOCALNET:\n            return AlgorandClient.default_localnet()\n        case _:\n            raise ValueError(f\"Unsupported network: {network}\")\n"
  },
  {
    "path": "src/algokit/py.typed",
    "content": ""
  },
  {
    "path": "src/algokit/resources/distribution-method",
    "content": ""
  },
  {
    "path": "tests/__init__.py",
    "content": "def get_combined_verify_output(stdout: str, additional_name: str, additional_output: str) -> str:\n    \"\"\"Simple way to get output combined from two sources so that approval testing still works\"\"\"\n    return f\"\"\"{stdout}----\n{additional_name}:\n----\n{additional_output}\"\"\"\n"
  },
  {
    "path": "tests/compile/__init__.py",
    "content": ""
  },
  {
    "path": "tests/compile/conftest.py",
    "content": "VALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT = \"\"\"\nfrom algopy import Contract, Txn, log\n\n\nclass HelloWorldContract(Contract):\n    def approval_program(self) -> bool:\n        name = Txn.application_args(0)\n        log(b\"Hello, \" + name)\n        return True\n\n    def clear_state_program(self) -> bool:\n        return True\n\"\"\"\n\nINVALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT = \"\"\"\nfrom algopy import Contract, Txn, log\n\n\nclass HelloWorldContract(Contract):\n    def approval_program(self) -> bool:\n        name = Txn.application_args_invalid(0)\n        log(b\"Hello, \" + name)\n        return True\n\n    def clear_state_program(self) -> bool:\n        return True\n\"\"\"\n\nVALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT = \"\"\"\nimport { Contract } from '@algorandfoundation/algorand-typescript'\n\nexport class HelloWorld extends Contract {\n  public hello(name: string): string {\n    return `${this.getHello()} ${name}`\n  }\n\n  private getHello() {\n    return 'Hello'\n  }\n}\n\"\"\"\n\nINVALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT = \"\"\"\nimport { Contract } from '@algorandfoundation/algorand-typescript'\n\nexport class HelloWorld extends ContractInvalid {\n  public hello(name: string): string {\n    return `${this.getHello()} ${name}`\n  }\n\n  private getHello() {\n    return 'Hello'\n  }\n}\n\"\"\"\n"
  },
  {
    "path": "tests/compile/test_python.py",
    "content": "import logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom tests.compile.conftest import (\n    INVALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT,\n    VALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT,\n)\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\nlogger = logging.getLogger(__name__)\n\n\ndef _normalize_path(path: Path) -> str:\n    return str(path.absolute()).replace(\"\\\\\", r\"\\\\\")\n\n\n@pytest.fixture\ndef dummy_contract_path() -> Path:\n    return Path(__file__).parent / \"dummy_contract.py\"\n\n\n@pytest.fixture(autouse=True)\ndef cwd(tmp_path_factory: pytest.TempPathFactory) -> Path:\n    return tmp_path_factory.mktemp(\"cwd\", numbered=True)\n\n\n@pytest.fixture\ndef output_path(cwd: Path) -> Path:\n    return cwd / \"output\"\n\n\ndef test_compile_py_help(mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n    proc_mock.set_output([\"poetry\", \"run\", \"puyapy\", \"--version\"], output=[\"puyapy 1.0.0\"])\n    proc_mock.set_output([\"poetry\", \"run\", \"puyapy\", \"-h\"], output=[\"Puyapy help\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n    result = invoke(\"compile python -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_puyapy_is_not_installed_anywhere(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n    proc_mock.should_bad_exit_on([\"poetry\", \"run\", \"puyapy\", \"--version\"], exit_code=1, output=[\"Puyapy not found\"])\n    proc_mock.should_bad_exit_on([\"puyapy\", \"--version\"], exit_code=1, output=[\"Puyapy not found\"])\n\n    proc_mock.set_output([\"pipx\", \"--version\"], [\"1.0.0\"])\n\n    proc_mock.set_output([\"pipx\", \"install\", \"puya\"], [\"Puyapy is installed\"])\n    proc_mock.set_output([\"puyapy\", str(dummy_contract_path)], [\"Done\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile python {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_specificed_puyapy_version_is_not_installed(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    current_version = \"1.0.0\"\n    target_version = \"1.1.0\"\n\n    proc_mock = ProcMock()\n    proc_mock.set_output([\"poetry\", \"run\", \"puyapy\", \"--version\"], output=[f\"puyapy {current_version}\"])\n    proc_mock.should_bad_exit_on([\"puyapy\", \"--version\"], exit_code=1, output=[\"Puyapy not found\"])\n\n    proc_mock.set_output([\"pipx\", \"--version\"], [\"1.0.0\"])\n    proc_mock.set_output([\"pipx\", \"run\", f\"puya=={target_version}\", str(dummy_contract_path)], [\"Done\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile --version {target_version} py {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_puyapy_is_installed_in_project(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n    proc_mock.set_output([\"poetry\", \"run\", \"puyapy\", \"--version\"], output=[\"puyapy 1.0.0\"])\n    proc_mock.set_output([\"poetry\", \"run\", \"puyapy\", str(dummy_contract_path)], [\"Done\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile python {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_puyapy_is_installed_globally(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n\n    proc_mock.should_bad_exit_on([\"poetry\", \"run\", \"puyapy\", \"--version\"], exit_code=1, output=[\"Puyapy not found\"])\n\n    proc_mock.set_output([\"puyapy\", \"--version\"], output=[\"puyapy 1.0.0\"])\n    proc_mock.set_output([\"puyapy\", str(dummy_contract_path)], [\"Done\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile python {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.skipif(sys.version_info < (3, 12), reason=\"PuyaPy requires python3.12 or higher\")\ndef test_valid_contract(cwd: Path, output_path: Path) -> None:\n    contract_path = cwd / \"contract.py\"\n    contract_path.write_text(VALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT)\n\n    result = invoke(\n        f\"--no-color compile python {_normalize_path(contract_path)} --out-dir {_normalize_path(output_path)}\"\n    )\n\n    # Only check for the exit code, don't check the results from PuyaPy\n    assert result.exit_code == 0\n\n\n@pytest.mark.skipif(sys.version_info < (3, 12), reason=\"PuyaPy requires python3.12 or higher\")\ndef test_invalid_contract(cwd: Path, output_path: Path) -> None:\n    # Set NO_COLOR to 1 to avoid requirements for colorama on Windows\n    os.environ[\"NO_COLOR\"] = \"1\"\n\n    contract_path = cwd / \"contract.py\"\n    contract_path.write_text(INVALID_ALGORAND_PYTHON_CONTRACT_FILE_CONTENT)\n    result = invoke(f\"compile python {_normalize_path(contract_path)} --out-dir {_normalize_path(output_path)}\")\n\n    # Only check for the exit code and the error message from AlgoKit CLI\n    assert result.exit_code == 1\n    result.output.endswith(\n        \"An error occurred during compile. Ensure supplied files are valid PuyaPy code before retrying.\"\n    )\n"
  },
  {
    "path": "tests/compile/test_python.test_compile_py_help.approved.txt",
    "content": "DEBUG: Running 'poetry run puyapy --version' in '{current_working_directory}'\nDEBUG: poetry: puyapy 1.0.0\nDEBUG: Running 'poetry run puyapy -h' in '{current_working_directory}'\nDEBUG: poetry: Puyapy help\nPuyapy help\n"
  },
  {
    "path": "tests/compile/test_python.test_puyapy_is_installed_globally.approved.txt",
    "content": "DEBUG: Running 'poetry run puyapy --version' in '{current_working_directory}'\nDEBUG: poetry: Puyapy not found\nDEBUG: Running 'puyapy --version' in '{current_working_directory}'\nDEBUG: puyapy: puyapy 1.0.0\nDEBUG: Running 'puyapy {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: puyapy: Done\nDone\n"
  },
  {
    "path": "tests/compile/test_python.test_puyapy_is_installed_in_project.approved.txt",
    "content": "DEBUG: Running 'poetry run puyapy --version' in '{current_working_directory}'\nDEBUG: poetry: puyapy 1.0.0\nDEBUG: Running 'poetry run puyapy {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: poetry: Done\nDone\n"
  },
  {
    "path": "tests/compile/test_python.test_puyapy_is_not_installed_anywhere.approved.txt",
    "content": "DEBUG: Running 'poetry run puyapy --version' in '{current_working_directory}'\nDEBUG: poetry: Puyapy not found\nDEBUG: Running 'puyapy --version' in '{current_working_directory}'\nDEBUG: puyapy: Puyapy not found\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.0.0\nDEBUG: Running 'pipx run --spec=puyapy puyapy {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/compile/test_python.test_specificed_puyapy_version_is_not_installed.approved.txt",
    "content": "DEBUG: Running 'poetry run puyapy --version' in '{current_working_directory}'\nDEBUG: poetry: puyapy 1.0.0\nDEBUG: Running 'puyapy --version' in '{current_working_directory}'\nDEBUG: puyapy: Puyapy not found\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.0.0\nDEBUG: Running 'pipx run --spec=puyapy==1.1.0 puyapy {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/compile/test_typescript.py",
    "content": "import logging\nimport os\nimport subprocess\nfrom pathlib import Path\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.compilers.typescript import PUYATS_NPM_PACKAGE\nfrom tests.compile.conftest import (\n    INVALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT,\n    VALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT,\n)\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\nlogger = logging.getLogger(__name__)\n\n\ndef _normalize_path(path: Path) -> str:\n    return str(path.absolute()).replace(\"\\\\\", r\"\\\\\")\n\n\ndef _get_npm_command() -> str:\n    return \"npm\" if os.name != \"nt\" else \"npm.cmd\"\n\n\ndef _get_npx_command() -> str:\n    return \"npx\" if os.name != \"nt\" else \"npx.cmd\"\n\n\ndef _command_name_scrubber(output: str) -> str:\n    \"\"\"Scrubber to normalize command names between Windows and non-Windows platforms.\"\"\"\n    return (\n        output.replace(\"npm.cmd\", \"npm\")\n        .replace(\"npx.cmd\", \"npx\")\n        .replace(\"DEBUG: npm.cmd:\", \"DEBUG: npm:\")\n        .replace(\"DEBUG: npx.cmd:\", \"DEBUG: npx:\")\n        .replace(\"Running 'npm.cmd\", \"Running 'npm\")\n        .replace(\"Running 'npx.cmd\", \"Running 'npx\")\n    )\n\n\n@pytest.fixture\ndef dummy_contract_path() -> Path:\n    return Path(__file__).parent / \"dummy_contract.py\"\n\n\n@pytest.fixture(autouse=True)\ndef cwd(tmp_path_factory: pytest.TempPathFactory) -> Path:\n    return tmp_path_factory.mktemp(\"cwd\", numbered=True)\n\n\n@pytest.fixture\ndef output_path(cwd: Path) -> Path:\n    return cwd / \"output\"\n\n\n@pytest.fixture\ndef typescript_test_dir(tmp_path_factory: pytest.TempPathFactory) -> Path:\n    # Create a test directory\n    test_dir = tmp_path_factory.mktemp(\"ts_test\", numbered=True)\n\n    # Create package.json with required dependencies\n    # TODO: update to use latest versions once they are released out of beta\n    package_json_content = \"\"\"{\n        \"name\": \"algokit-test\",\n        \"version\": \"1.0.0\",\n        \"dependencies\": {\n            \"@algorandfoundation/puya-ts\": \"^1.0.1\",\n            \"@algorandfoundation/algorand-typescript\": \"^1.0.1\"\n        }\n    }\"\"\"\n\n    package_json_path = test_dir / \"package.json\"\n    package_json_path.write_text(package_json_content)\n\n    # Execute npm install in the directory\n    result = subprocess.run(\n        [_get_npm_command(), \"install\", \"--ignore-scripts\"], cwd=test_dir, capture_output=True, text=True, check=False\n    )\n    if result.returncode != 0:\n        raise RuntimeError(\n            f\"npm install failed with exit code {result.returncode}\\nstdout: {result.stdout}\\nstderr: {result.stderr}\"\n        )\n\n    return test_dir\n\n\ndef test_compile_py_help(mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n\n    # Mock npm ls for project and global scopes with no PuyaTs found\n    proc_mock.set_output([_get_npm_command(), \"ls\"], [\"STDOUT\", \"STDERR\"])\n    proc_mock.set_output([_get_npm_command(), \"--global\", \"ls\"], [\"STDOUT\", \"STDERR\"])\n\n    # Mock the help command\n    proc_mock.set_output([_get_npx_command(), \"-y\", PUYATS_NPM_PACKAGE, \"-h\"], output=[\"PuyaTs help\"])\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n    result = invoke(\"compile typescript -h\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=_command_name_scrubber)\n\n\ndef test_puyats_is_not_installed_anywhere(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    proc_mock = ProcMock()\n\n    # Mock npm ls for project and global scopes with no PuyaTs found\n    proc_mock.set_output([_get_npm_command(), \"ls\"], [\"STDOUT\", \"STDERR\"])\n    proc_mock.set_output([_get_npm_command(), \"--global\", \"ls\"], [\"STDOUT\", \"STDERR\"])\n\n    # Mock successful npx execution\n    proc_mock.set_output(\n        [_get_npx_command(), \"-y\", PUYATS_NPM_PACKAGE, str(dummy_contract_path)],\n        [\"Compilation successful\"],\n    )\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile typescript {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=_command_name_scrubber)\n\n\ndef test_specificed_puyats_version_is_not_installed(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    current_version = \"1.0.0\"\n    target_version = \"1.1.0\"\n\n    proc_mock = ProcMock()\n\n    # Mock npm ls for project with a different version installed\n    proc_mock.set_output([_get_npm_command(), \"ls\"], [f\"└── {PUYATS_NPM_PACKAGE}@{current_version}\"])\n\n    # Mock npm ls for global with a different version installed\n    proc_mock.set_output([_get_npm_command(), \"--global\", \"ls\"], [f\"└── {PUYATS_NPM_PACKAGE}@{current_version}\"])\n\n    # Mock successful npx execution with version-specific package\n    proc_mock.set_output(\n        [\n            _get_npx_command(),\n            \"-y\",\n            f\"{PUYATS_NPM_PACKAGE}@{target_version}\",\n            str(dummy_contract_path),\n        ],\n        [\"Compilation successful\"],\n    )\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile --version {target_version} typescript {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=_command_name_scrubber)\n\n\ndef test_puyats_is_installed_in_project(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    version = \"1.0.0\"\n    proc_mock = ProcMock()\n\n    # Mock npm ls for project with PuyaTs installed\n    proc_mock.set_output([_get_npm_command(), \"ls\"], [f\"└── {PUYATS_NPM_PACKAGE}@{version}\"])\n\n    # Ensure version check passes for project version\n    proc_mock.set_output([_get_npx_command(), PUYATS_NPM_PACKAGE, \"--version\"], [f\"puya-ts {version}\"])\n\n    # Mock successful compile with project installation\n    proc_mock.set_output(\n        [_get_npx_command(), PUYATS_NPM_PACKAGE, str(dummy_contract_path)],\n        [\"Compilation successful\"],\n    )\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile typescript {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=_command_name_scrubber)\n\n\ndef test_puyats_is_installed_globally(dummy_contract_path: Path, mocker: MockerFixture) -> None:\n    version = \"1.0.0\"\n    proc_mock = ProcMock()\n\n    # Mock npm ls for project with no installation\n    proc_mock.set_output([_get_npm_command(), \"ls\"], [\"STDOUT\", \"STDERR\"])\n\n    # Mock npm ls for global with PuyaTs installed\n    proc_mock.set_output([_get_npm_command(), \"--global\", \"ls\"], [f\"└── {PUYATS_NPM_PACKAGE}@{version}\"])\n\n    # Ensure version check passes for global installation\n    proc_mock.set_output([_get_npx_command(), PUYATS_NPM_PACKAGE, \"--version\"], [f\"puya-ts {version}\"])\n\n    # Mock successful compile with global installation\n    proc_mock.set_output(\n        [_get_npx_command(), PUYATS_NPM_PACKAGE, str(dummy_contract_path)],\n        [\"Compilation successful\"],\n    )\n\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n\n    result = invoke(f\"compile typescript {_normalize_path(dummy_contract_path)}\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=_command_name_scrubber)\n\n\n# Test with locally installed PuyaTs compiler\ndef test_valid_contract(typescript_test_dir: Path) -> None:\n    # Create a contract file\n    contract_path = typescript_test_dir / \"contract.algo.ts\"\n    contract_path.write_text(VALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT)\n\n    # Run algokit compile with proper Node.js stack size setting\n    result = invoke(\n        \"compile typescript contract.algo.ts --out-dir output\",\n        cwd=typescript_test_dir,\n    )\n\n    # Check the results\n    assert result.exit_code == 0\n    # Verify output files exist\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.approval.puya.map\").exists()\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.approval.teal\").exists()\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.arc32.json\").exists()\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.arc56.json\").exists()\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.clear.puya.map\").exists()\n    assert (typescript_test_dir / \"output\" / \"HelloWorld.clear.teal\").exists()\n\n\n# Test with locally installed PuyaTs compiler\ndef test_invalid_contract(typescript_test_dir: Path) -> None:\n    # Create a contract file\n    contract_path = typescript_test_dir / \"contract.algo.ts\"\n    contract_path.write_text(INVALID_ALGORAND_TYPESCRIPT_CONTRACT_FILE_CONTENT)\n\n    # Run algokit compile with proper Node.js stack size setting\n    result = invoke(\n        \"compile typescript contract.algo.ts --out-dir output\",\n        cwd=typescript_test_dir,\n    )\n\n    # Check the results\n    assert result.exit_code == 1\n    # Verify output files exist\n    assert \"Compilation halted due to parse errors\" in result.output\n"
  },
  {
    "path": "tests/compile/test_typescript.test_compile_py_help.approved.txt",
    "content": "DEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Running 'npx -y @algorandfoundation/puya-ts -h' in '{current_working_directory}'\nDEBUG: npx: PuyaTs help\nPuyaTs help\n"
  },
  {
    "path": "tests/compile/test_typescript.test_puyats_is_installed_globally.approved.txt",
    "content": "DEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: └── @algorandfoundation/puya-ts@1.0.0\nDEBUG: Running 'npx @algorandfoundation/puya-ts --version' in '{current_working_directory}'\nDEBUG: npx: puya-ts 1.0.0\nDEBUG: Running 'npx @algorandfoundation/puya-ts {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: npx: Compilation successful\nCompilation successful\n"
  },
  {
    "path": "tests/compile/test_typescript.test_puyats_is_installed_in_project.approved.txt",
    "content": "DEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: └── @algorandfoundation/puya-ts@1.0.0\nDEBUG: Running 'npx @algorandfoundation/puya-ts --version' in '{current_working_directory}'\nDEBUG: npx: puya-ts 1.0.0\nDEBUG: Running 'npx @algorandfoundation/puya-ts {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: npx: Compilation successful\nCompilation successful\n"
  },
  {
    "path": "tests/compile/test_typescript.test_puyats_is_not_installed_anywhere.approved.txt",
    "content": "DEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Running 'npx -y @algorandfoundation/puya-ts {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: npx: Compilation successful\nCompilation successful\n"
  },
  {
    "path": "tests/compile/test_typescript.test_specificed_puyats_version_is_not_installed.approved.txt",
    "content": "DEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: └── @algorandfoundation/puya-ts@1.0.0\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: └── @algorandfoundation/puya-ts@1.0.0\nDEBUG: Running 'npx -y @algorandfoundation/puya-ts@1.1.0 {current_working_directory}/tests/compile/dummy_contract.py' in '{current_working_directory}'\nDEBUG: npx: Compilation successful\nCompilation successful\n"
  },
  {
    "path": "tests/completions/__init__.py",
    "content": ""
  },
  {
    "path": "tests/completions/test_completions.py",
    "content": "from pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nfrom approvaltests.namer import NamerFactory\nfrom pytest_mock import MockerFixture\n\nfrom algokit.cli.completions import SUPPORTED_SHELLS\nfrom tests import get_combined_verify_output\nfrom tests.utils.approvals import normalize_path, verify\nfrom tests.utils.click_invoker import ClickInvokeResult, invoke\n\nORIGINAL_PROFILE_CONTENTS = \"# ORIGINAL END OF FILE\\n\"\n\n\ndef test_completions_help() -> None:\n    # Act\n    result = invoke(\"completions\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.parametrize(\"command\", [\"install\", \"uninstall\"])\ndef test_completions_subcommands_help(command: str) -> None:\n    # Act\n    result = invoke(f\"completions {command} --help\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output, options=NamerFactory.with_parameters(command))\n\n\ndef _mock_bash_version(mocker: MockerFixture, version: str) -> None:\n    mocked_run = mocker.patch(\"subprocess.run\")\n    mocked_output = mocked_run.return_value\n    mocked_output.configure_mock(stdout=version.encode())\n\n\n@pytest.fixture(autouse=True)\ndef _mock_default_bash_version(mocker: MockerFixture) -> None:\n    _mock_bash_version(mocker, \"5.2.0\")\n\n\nclass CompletionsTestContext:\n    def __init__(self, expected_shell: str):\n        self.home = TemporaryDirectory()\n        self.home_path = Path(self.home.name).resolve()\n        self.profile_path = self.home_path / f\".{expected_shell}rc\"\n        self.config_path = self.home_path / \".config\"\n        self.source_path = self.config_path / \"algokit\" / f\".algokit-completions.{expected_shell}\"\n        self.profile_path.write_text(ORIGINAL_PROFILE_CONTENTS)\n        self.env = {\n            # posix\n            \"HOME\": str(self.home_path),\n            \"XDG_CONFIG_HOME\": str(self.config_path),\n            # windows\n            \"USERPROFILE\": str(self.home_path),\n            \"APPDATA\": str(self.config_path),\n        }\n\n    def run_command(self, command: str, shell: str | None = None) -> ClickInvokeResult:\n        command = f\"completions {command}\"\n        if shell:\n            command += f\" --shell {shell}\"\n\n        result = invoke(command, env=self.env)\n        result.output = normalize_path(result.output, str(self.home_path), \"{home}\").replace(\"\\\\\", \"/\")\n        return result\n\n    @property\n    def profile_contents(self) -> str:\n        return self.profile_path.read_text().replace(\"\\\\\", \"/\")\n\n\n@pytest.mark.parametrize(\"shell\", SUPPORTED_SHELLS)\ndef test_completions_installs_correctly_with_specified_shell(shell: str) -> None:\n    # Arrange\n    context = CompletionsTestContext(shell)\n\n    # Act\n    result = context.run_command(\"install\", shell)\n\n    # Assert\n    assert result.exit_code == 0\n    # content of this file is defined by click, so only assert it exists not its content\n    assert context.source_path.exists()\n    assert not context.profile_path.with_suffix(\".algokit~\").exists()\n    profile = context.profile_contents\n    verify(get_combined_verify_output(result.output, \"profile\", profile), options=NamerFactory.with_parameters(shell))\n\n\ndef test_completions_installs_correctly_with_detected_shell(mocker: MockerFixture) -> None:\n    # Arrange\n    mocker.patch(\"shellingham.detect_shell\").return_value = (\"bash\", \"/bin/bash\")\n    context = CompletionsTestContext(\"bash\")\n\n    # Act\n    result = context.run_command(\"install\")\n\n    # Assert\n    assert result.exit_code == 0\n    # content of this file is defined by click, so only assert it exists not its content\n    assert context.source_path.exists()\n    profile = context.profile_contents\n    verify(get_combined_verify_output(result.output, \"profile\", profile))\n\n\n@pytest.mark.parametrize(\"shell\", SUPPORTED_SHELLS)\ndef test_completions_uninstalls_correctly(shell: str) -> None:\n    # Arrange\n    context = CompletionsTestContext(shell)\n\n    context.run_command(\"install\", shell)\n\n    # Act\n    result = context.run_command(\"uninstall\", shell)\n\n    # Assert\n    assert result.exit_code == 0\n    assert not context.source_path.exists()\n    profile = context.profile_contents\n    assert not context.profile_path.with_suffix(\".algokit~\").exists()\n    assert profile == ORIGINAL_PROFILE_CONTENTS\n    verify(result.output, options=NamerFactory.with_parameters(shell))\n\n\n@pytest.mark.parametrize(\"command\", [\"install\", \"uninstall\"])\ndef test_completions_subcommands_with_unknown_shell_fails_gracefully(command: str, mocker: MockerFixture) -> None:\n    # Arrange\n    mocker.patch(\"shellingham.detect_shell\").return_value = None\n\n    # Act\n    result = invoke(f\"completions {command}\")\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output, options=NamerFactory.with_parameters(command))\n\n\n@pytest.mark.parametrize(\"command\", [\"install\", \"uninstall\"])\ndef test_completions_subcommands_with_unsupported_shell_fails_gracefully(command: str, mocker: MockerFixture) -> None:\n    # Arrange\n    mocker.patch(\"shellingham.detect_shell\").return_value = (\"pwsh\", \"/bin/pwsh\")\n\n    # Act\n    result = invoke(f\"completions {command}\")\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output, options=NamerFactory.with_parameters(command))\n\n\ndef test_completions_install_is_idempotent() -> None:\n    # Arrange\n    context = CompletionsTestContext(\"bash\")\n    context.run_command(\"install\", \"bash\")\n\n    # Act\n    result = context.run_command(\"install\", \"bash\")\n\n    # Assert\n    assert result.exit_code == 0\n    # content of this file is defined by click, so only assert it exists not its content\n    assert context.source_path.exists()\n    profile = context.profile_contents\n    verify(get_combined_verify_output(result.output, \"profile\", profile))\n\n\ndef test_completions_uninstall_is_idempotent() -> None:\n    # Arrange\n    context = CompletionsTestContext(\"bash\")\n\n    context.run_command(\"install\", \"bash\")\n    context.run_command(\"uninstall\", \"bash\")\n\n    # Act\n    result = context.run_command(\"uninstall\", \"bash\")\n\n    # Assert\n    assert result.exit_code == 0\n    assert not context.source_path.exists()\n    profile = context.profile_contents\n    assert profile == ORIGINAL_PROFILE_CONTENTS\n    verify(result.output)\n\n\ndef test_completions_install_handles_no_profile() -> None:\n    # Arrange\n    context = CompletionsTestContext(\"bash\")\n    context.profile_path.unlink()\n\n    # Act\n    result = context.run_command(\"install\", \"bash\")\n\n    # Assert\n    assert result.exit_code == 0\n    assert context.source_path.exists()\n    profile = context.profile_contents\n    verify(get_combined_verify_output(result.output, \"profile\", profile))\n\n\ndef test_completions_uninstall_handles_no_profile() -> None:\n    # Arrange\n    context = CompletionsTestContext(\"bash\")\n    context.run_command(\"install\", \"brew\")\n    context.profile_path.unlink()\n\n    # Act\n    result = context.run_command(\"uninstall\", \"bash\")\n\n    # Assert\n    assert result.exit_code == 0\n    assert not context.source_path.exists()\n    assert not context.profile_path.exists()\n    verify(result.output)\n\n\ndef test_completions_install_handles_config_outside_home() -> None:\n    # Arrange\n    context = CompletionsTestContext(\"bash\")\n    # create a different directory outside home directory for config\n    config = TemporaryDirectory()\n    context.config_path = Path(config.name).resolve()\n    context.source_path = context.config_path / \"algokit\" / \".algokit-completions.bash\"\n    context.env[\"XDG_CONFIG_HOME\"] = str(context.config_path)\n    context.env[\"APPDATA\"] = str(context.config_path)\n\n    # Act\n    result = context.run_command(\"install\", \"bash\")\n\n    # Assert\n    assert result.exit_code == 0\n    # content of this file is defined by click, so only assert it exists not its content\n    assert context.source_path.exists()\n    output = normalize_path(result.output, str(context.config_path), \"{config}\")\n    profile = normalize_path(context.profile_contents, str(context.config_path), \"{config}\")\n    verify(get_combined_verify_output(output, \"profile\", profile))\n\n\ndef test_completions_install_handles_unsupported_bash_gracefully(mocker: MockerFixture) -> None:\n    # Arrange\n    _mock_bash_version(mocker, \"3.2.0\")\n    context = CompletionsTestContext(\"bash\")\n\n    # Act\n    result = context.run_command(\"install\", \"bash\")\n\n    # Assert\n    # NOTE: shellingham no longer throws an error when shell is not supported.\n    # However, it still prints the error message to stderr.\n    # Then it proceeds to try to install the completion script regardless.\n    assert \"Shell completion is not supported for Bash\" in result.output\n    assert context.source_path.exists()\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_help.approved.txt",
    "content": "Usage: algokit completions [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  install    Install shell completions\n  uninstall  Uninstall shell completions\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_install_handles_config_outside_home.approved.txt",
    "content": "DEBUG: Writing source script {config}/algokit/.algokit-completions.bash\nDEBUG: Appending completion source to {home}/.bashrc\nAlgoKit completions installed for bash 🎉\nRestart shell or run `. ~/.bashrc` to enable completions\n----\nprofile:\n----\n# ORIGINAL END OF FILE\n. {config}/algokit/.algokit-completions.bash\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_install_handles_no_profile.approved.txt",
    "content": "DEBUG: Writing source script {home}/.config/algokit/.algokit-completions.bash\nDEBUG: Appending completion source to {home}/.bashrc\nAlgoKit completions installed for bash 🎉\nRestart shell or run `. ~/.bashrc` to enable completions\n----\nprofile:\n----\n. ~/.config/algokit/.algokit-completions.bash\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_install_handles_unsupported_bash_gracefully.approved.txt",
    "content": "DEBUG: Failed to generate completion source. Shell completion is not supported for Bash versions older than 4.4.\nERROR: Shell completion is not supported for Bash versions older than 4.4.\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_install_is_idempotent.approved.txt",
    "content": "DEBUG: Writing source script {home}/.config/algokit/.algokit-completions.bash\n{home}/.bashrc already contains completion source 🤔\nRestart shell or run `. ~/.bashrc` to enable completions\n----\nprofile:\n----\n# ORIGINAL END OF FILE\n. ~/.config/algokit/.algokit-completions.bash\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_installs_correctly_with_detected_shell.approved.txt",
    "content": "DEBUG: Writing source script {home}/.config/algokit/.algokit-completions.bash\nDEBUG: Appending completion source to {home}/.bashrc\nAlgoKit completions installed for bash 🎉\nRestart shell or run `. ~/.bashrc` to enable completions\n----\nprofile:\n----\n# ORIGINAL END OF FILE\n. ~/.config/algokit/.algokit-completions.bash\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_installs_correctly_with_specified_shell.bash.approved.txt",
    "content": "DEBUG: Writing source script {home}/.config/algokit/.algokit-completions.bash\nDEBUG: Appending completion source to {home}/.bashrc\nAlgoKit completions installed for bash 🎉\nRestart shell or run `. ~/.bashrc` to enable completions\n----\nprofile:\n----\n# ORIGINAL END OF FILE\n. ~/.config/algokit/.algokit-completions.bash\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_installs_correctly_with_specified_shell.zsh.approved.txt",
    "content": "DEBUG: Writing source script {home}/.config/algokit/.algokit-completions.zsh\nDEBUG: Appending completion source to {home}/.zshrc\nAlgoKit completions installed for zsh 🎉\nRestart shell or run `. ~/.zshrc` to enable completions\n----\nprofile:\n----\n# ORIGINAL END OF FILE\n. ~/.config/algokit/.algokit-completions.zsh\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_help.install.approved.txt",
    "content": "Usage: algokit completions install [OPTIONS]\n\n  Install shell completions, this command will attempt to update the interactive\n  profile script for the current shell to support algokit completions. To\n  specify a specific shell use --shell.\n\nOptions:\n  --shell [bash|zsh]  Specify shell to install algokit completions for.\n  -h, --help          Show this message and exit.\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_help.uninstall.approved.txt",
    "content": "Usage: algokit completions uninstall [OPTIONS]\n\n  Uninstall shell completions, this command will attempt to update the\n  interactive profile script for the current shell to remove any algokit\n  completions that have been added. To specify a specific shell use --shell.\n\nOptions:\n  --shell [bash|zsh]  Specify shell to install algokit completions for.\n  -h, --help          Show this message and exit.\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_with_unknown_shell_fails_gracefully.install.approved.txt",
    "content": "DEBUG: Could not determine current shell\nWARNING: Could not determine current shell. Try specifying a supported shell with --shell\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_with_unknown_shell_fails_gracefully.uninstall.approved.txt",
    "content": "DEBUG: Could not determine current shell\nWARNING: Could not determine current shell. Try specifying a supported shell with --shell\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_with_unsupported_shell_fails_gracefully.install.approved.txt",
    "content": "WARNING: pwsh is not a supported shell. 😢\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_subcommands_with_unsupported_shell_fails_gracefully.uninstall.approved.txt",
    "content": "WARNING: pwsh is not a supported shell. 😢\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_uninstall_handles_no_profile.approved.txt",
    "content": "DEBUG: Removing source script {home}/.config/algokit/.algokit-completions.bash\nDEBUG: {home}/.bashrc not found\nAlgoKit completions not installed for bash 🤔\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_uninstall_is_idempotent.approved.txt",
    "content": "DEBUG: Removing source script {home}/.config/algokit/.algokit-completions.bash\nAlgoKit completions not installed for bash 🤔\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_uninstalls_correctly.bash.approved.txt",
    "content": "DEBUG: Removing source script {home}/.config/algokit/.algokit-completions.bash\nDEBUG: Completion source found in {home}/.bashrc\nDEBUG: Removing completion source found in {home}/.bashrc\nAlgoKit completions uninstalled for bash 🎉\n"
  },
  {
    "path": "tests/completions/test_completions.test_completions_uninstalls_correctly.zsh.approved.txt",
    "content": "DEBUG: Removing source script {home}/.config/algokit/.algokit-completions.zsh\nDEBUG: Completion source found in {home}/.zshrc\nDEBUG: Removing completion source found in {home}/.zshrc\nAlgoKit completions uninstalled for zsh 🎉\n"
  },
  {
    "path": "tests/config/__init__.py",
    "content": ""
  },
  {
    "path": "tests/config/test_package_managers.py",
    "content": "\"\"\"\nEssential tests for package manager configuration commands.\nFocuses on critical user-facing functionality only.\n\"\"\"\n\nimport pytest\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n# Exit codes\nINVALID_ARGUMENT_EXIT_CODE = 2\n\n\ndef test_js_package_manager_help() -> None:\n    \"\"\"Test help output for js-package-manager command.\"\"\"\n    result = invoke(\"config js-package-manager --help\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_py_package_manager_help() -> None:\n    \"\"\"Test help output for py-package-manager command.\"\"\"\n    result = invoke(\"config py-package-manager --help\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_js_package_manager_invalid_argument() -> None:\n    \"\"\"Test error handling for invalid js package manager.\"\"\"\n    result = invoke(\"config js-package-manager invalid\")\n    assert result.exit_code == INVALID_ARGUMENT_EXIT_CODE\n    verify(result.output)\n\n\ndef test_py_package_manager_invalid_argument() -> None:\n    \"\"\"Test error handling for invalid py package manager.\"\"\"\n    result = invoke(\"config py-package-manager invalid\")\n    assert result.exit_code == INVALID_ARGUMENT_EXIT_CODE\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_js_package_manager_set_npm() -> None:\n    \"\"\"Test setting npm as js package manager.\"\"\"\n    result = invoke(\"config js-package-manager npm\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_js_package_manager_set_pnpm() -> None:\n    \"\"\"Test setting pnpm as js package manager.\"\"\"\n    result = invoke(\"config js-package-manager pnpm\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_py_package_manager_set_poetry() -> None:\n    \"\"\"Test setting poetry as py package manager.\"\"\"\n    result = invoke(\"config py-package-manager poetry\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_py_package_manager_set_uv() -> None:\n    \"\"\"Test setting uv as py package manager.\"\"\"\n    result = invoke(\"config py-package-manager uv\")\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/config/test_package_managers.test_js_package_manager_help.approved.txt",
    "content": "Usage: algokit config js-package-manager [OPTIONS] [[npm|pnpm]]\n\n  Set the default JavaScript package manager for use by AlgoKit CLI.\n\nOptions:\n  -h, --help  Show this message and exit.\n"
  },
  {
    "path": "tests/config/test_package_managers.test_js_package_manager_invalid_argument.approved.txt",
    "content": "Usage: algokit config js-package-manager [OPTIONS] [[npm|pnpm]]\nTry 'algokit config js-package-manager -h' for help.\n\nError: Invalid value for '[[npm|pnpm]]': 'invalid' is not one of <JSPackageManager.NPM: 'npm'>, <JSPackageManager.PNPM: 'pnpm'>.\n"
  },
  {
    "path": "tests/config/test_package_managers.test_js_package_manager_set_npm.approved.txt",
    "content": "JavaScript package manager set to `npm`\n"
  },
  {
    "path": "tests/config/test_package_managers.test_js_package_manager_set_pnpm.approved.txt",
    "content": "JavaScript package manager set to `pnpm`\n"
  },
  {
    "path": "tests/config/test_package_managers.test_py_package_manager_help.approved.txt",
    "content": "Usage: algokit config py-package-manager [OPTIONS] [[poetry|uv]]\n\n  Set the default Python package manager for use by AlgoKit CLI.\n\nOptions:\n  -h, --help  Show this message and exit.\n"
  },
  {
    "path": "tests/config/test_package_managers.test_py_package_manager_invalid_argument.approved.txt",
    "content": "Usage: algokit config py-package-manager [OPTIONS] [[poetry|uv]]\nTry 'algokit config py-package-manager -h' for help.\n\nError: Invalid value for '[[poetry|uv]]': 'invalid' is not one of <PyPackageManager.POETRY: 'poetry'>, <PyPackageManager.UV: 'uv'>.\n"
  },
  {
    "path": "tests/config/test_package_managers.test_py_package_manager_set_poetry.approved.txt",
    "content": "Python package manager set to `poetry`\n"
  },
  {
    "path": "tests/config/test_package_managers.test_py_package_manager_set_uv.approved.txt",
    "content": "Python package manager set to `uv`\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "import functools\nimport json\nimport logging\nimport os\nimport subprocess\nimport typing\nfrom collections.abc import Callable, Sequence  # noqa: RUF100, TC003\nfrom pathlib import Path\n\nimport pytest\nimport questionary\nfrom approvaltests import Reporter, reporters, set_default_reporter\nfrom approvaltests.reporters.generic_diff_reporter_config import create_config\nfrom approvaltests.reporters.generic_diff_reporter_factory import GenericDiffReporter\nfrom prompt_toolkit.application import create_app_session\nfrom prompt_toolkit.input import PipeInput, create_pipe_input\nfrom prompt_toolkit.output import DummyOutput\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core import questionary_extensions\nfrom algokit.core.project import get_project_configs, get_project_dir_names_from_workspace\nfrom tests.utils.app_dir_mock import AppDirs, tmp_app_dir\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.fixture\ndef proc_mock(mocker: MockerFixture) -> ProcMock:\n    proc_mock = ProcMock()\n    # add a default for docker compose version\n    proc_mock.set_output([\"docker\", \"compose\", \"version\", \"--format\", \"json\"], [json.dumps({\"version\": \"v2.5.0\"})])\n    mocker.patch(\"algokit.core.proc.Popen\").side_effect = proc_mock.popen\n    return proc_mock\n\n\ndef _do_platform_mock(platform_system: str, monkeypatch: pytest.MonkeyPatch) -> None:\n    import platform\n\n    monkeypatch.setattr(platform, \"system\", lambda: platform_system)\n    monkeypatch.setattr(platform, \"platform\", lambda: f\"{platform_system}-other-system-info\")\n\n\n@pytest.fixture(\n    params=[\n        pytest.param(\"Windows\", id=\"windows\"),\n        pytest.param(\"Linux\", id=\"linux\"),\n        pytest.param(\"Darwin\", id=\"macOS\"),\n    ]\n)\ndef mock_platform_system(request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch) -> str:\n    platform_system: str = request.param\n    _do_platform_mock(platform_system=platform_system, monkeypatch=monkeypatch)\n    return platform_system\n\n\n@pytest.fixture(autouse=True)\ndef _mock_platform_system_marker(request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch) -> None:\n    marker = request.node.get_closest_marker(\"mock_platform_system\")\n    if marker is not None:\n        _do_platform_mock(platform_system=marker.args[0], monkeypatch=monkeypatch)\n\n\n@pytest.fixture\ndef app_dir_mock(mocker: MockerFixture, tmp_path: Path) -> AppDirs:\n    return tmp_app_dir(mocker, tmp_path)\n\n\n@pytest.fixture\ndef mock_questionary_input() -> typing.Iterator[PipeInput]:\n    with create_pipe_input() as pipe_input, create_app_session(input=pipe_input, output=DummyOutput()):\n        yield pipe_input\n\n\n@pytest.fixture(autouse=True)\ndef _supress_copier_dependencies_debug_output() -> None:\n    logging.getLogger(\"plumbum.local\").setLevel(\"INFO\")\n    logging.getLogger(\"asyncio\").setLevel(\"INFO\")\n\n\nParams = typing.ParamSpec(\"Params\")\nResult = typing.TypeVar(\"Result\")\n\n\ndef intercept(\n    f: typing.Callable[Params, Result], interceptor: typing.Callable[Params, None]\n) -> typing.Callable[Params, Result]:\n    @functools.wraps(f)\n    def wrapped(*args: Params.args, **kwargs: Params.kwargs) -> Result:\n        interceptor(*args, **kwargs)\n        return f(*args, **kwargs)\n\n    return wrapped\n\n\n@pytest.fixture(autouse=True)\ndef _patch_questionary_prompts(monkeypatch: pytest.MonkeyPatch) -> None:\n    ValidatorsType = Sequence[type[questionary.Validator] | questionary.Validator | Callable[[str], bool]]  # noqa: N806\n\n    def log_prompt_text(\n        message: str,\n        *,\n        validators: ValidatorsType | None = None,  # noqa: ARG001\n        validate_while_typing: bool = False,  # noqa: ARG001\n    ) -> None:\n        print(f\"? {message}\")  # noqa: T201\n\n    def log_prompt_select(\n        message: str,\n        *choices: str | questionary.Choice,\n    ) -> None:\n        print(f\"? {message}\")  # noqa: T201\n        for choice in choices:\n            if isinstance(choice, questionary.Choice):\n                if isinstance(choice.title, str):\n                    print(choice.title)  # noqa: T201\n                elif isinstance(choice.title, list):\n                    print(\"\".join([token[1] for token in choice.title]))  # noqa: T201\n            else:\n                print(choice)  # noqa: T201\n\n    def log_prompt_confirm(message: str, *, default: bool) -> None:\n        if default:\n            default_text = \"(Y/n)\"\n        else:\n            default_text = \"(y/N)\"\n        print(f\"? {message} {default_text}\")  # noqa: T201\n\n    monkeypatch.setattr(\n        questionary_extensions,\n        \"prompt_text\",\n        intercept(questionary_extensions.prompt_text, log_prompt_text),\n    )\n    monkeypatch.setattr(\n        questionary_extensions,\n        \"prompt_select\",\n        intercept(questionary_extensions.prompt_select, log_prompt_select),\n    )\n    monkeypatch.setattr(\n        questionary_extensions,\n        \"prompt_confirm\",\n        intercept(questionary_extensions.prompt_confirm, log_prompt_confirm),\n    )\n\n\nif os.getenv(\"CI\"):\n    set_default_reporter(reporters.PythonNativeReporter())\nelse:\n    default_reporters: list[Reporter] = (\n        [\n            GenericDiffReporter(\n                create_config(\n                    [\n                        os.getenv(\"APPROVAL_REPORTER\"),\n                        os.getenv(\"APPROVAL_REPORTER_PATH\"),\n                        os.getenv(\"APPROVAL_REPORTER_ARGS\", \"\").split(),\n                    ]\n                )\n            )\n        ]\n        if os.getenv(\"APPROVAL_REPORTER\")\n        else []\n    )\n    default_reporters += [\n        # reporters.ReporterThatAutomaticallyApproves(),  # noqa: ERA001\n        # # uncomment to auto approve all received files, do not commit to VCS!\n        GenericDiffReporter(create_config([\"kdiff3\", \"/usr/bin/kdiff3\"])),\n        GenericDiffReporter(create_config([\"DiffMerge\", \"/Applications/DiffMerge.app/Contents/MacOS/DiffMerge\"])),\n        GenericDiffReporter(create_config([\"TortoiseGit\", \"{ProgramFiles}\\\\TortoiseGit\\\\bin\\\\TortoiseGitMerge.exe\"])),\n        GenericDiffReporter(create_config([\"VSCodeInsiders\", \"code-insiders\", [\"-d\"]])),\n        reporters.ReportWithBeyondCompare(),\n        reporters.ReportWithWinMerge(),\n        reporters.ReportWithVSCode(),\n        reporters.PythonNativeReporter(),\n    ]\n    set_default_reporter(reporters.FirstWorkingReporter(*default_reporters))\n\n\n@pytest.fixture\ndef mock_keyring(mocker: MockerFixture) -> typing.Generator[dict[str, str | None], None, None]:\n    credentials: dict[str, str | None] = {}\n\n    def _get_password(service_name: str, username: str) -> str | None:  # noqa: ARG001\n        return credentials[username]\n\n    def _set_password(service_name: str, username: str, password: str) -> None:  # noqa: ARG001\n        credentials[username] = password\n\n    def _delete_password(service_name: str, username: str) -> None:  # noqa: ARG001\n        del credentials[username]\n\n    mocker.patch(\"keyring.get_password\", side_effect=_get_password)\n    mocker.patch(\"keyring.set_password\", side_effect=_set_password)\n    mocker.patch(\"keyring.delete_password\", side_effect=_delete_password)\n\n    yield credentials\n\n    # Teardown step: reset the credentials\n    for key in credentials:\n        credentials[key] = None\n\n\n@pytest.fixture\ndef dummy_algokit_template_with_python_task(tmp_path_factory: pytest.TempPathFactory) -> dict[str, Path]:\n    \"\"\"\n    Used in init approval tests and binary portability tests\n    \"\"\"\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    dummy_template_path = cwd / \"dummy_template\"\n    dummy_template_path.mkdir()\n    (dummy_template_path / \"copier.yaml\").write_text(\n        \"\"\"\n        _tasks:\n            - \"echo '==== 1/1 - Emulate fullstack template python task ===='\"\n            - '{{ python_path }} -c ''print(\"hello world\")'''\n\n        python_path:\n            type: str\n            help: Path to the sys.executable.\n        \"\"\"\n    )\n    subprocess.run([\"git\", \"init\"], cwd=dummy_template_path, check=False)\n    subprocess.run([\"git\", \"add\", \".\"], cwd=dummy_template_path, check=False)\n    subprocess.run([\"git\", \"commit\", \"-m\", \"chore: setup dummy test template\"], cwd=dummy_template_path, check=False)\n    return {\"template_path\": dummy_template_path, \"cwd\": cwd}\n\n\n@pytest.fixture(autouse=True)\ndef _clear_caches(mocker: MockerFixture) -> None:\n    get_project_dir_names_from_workspace.cache_clear()\n    get_project_configs.cache_clear()\n    mocker.patch(\"algokit.core.config_commands.container_engine.get_container_engine\", return_value=\"docker\")\n\n\n@pytest.fixture(autouse=True)\ndef _always_check_image_versions(mocker: MockerFixture, request: pytest.FixtureRequest) -> None:\n    \"\"\"Ensure image version checks always run in tests by bypassing the cache.\n\n    Tests can opt-out of this by using @pytest.mark.use_real_image_version_cache\n    \"\"\"\n    if \"use_real_image_version_cache\" in [marker.name for marker in request.node.iter_markers()]:\n        return\n    mocker.patch(\"algokit.core.sandbox._should_check_image_versions\", return_value=True)\n    mocker.patch(\"algokit.core.sandbox._update_image_version_cache\")\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_address_invalid.approved.txt",
    "content": "Error: `TZXGUW6DZ27OBB4QSGZKTYFEABCO3R7XWAXECEV73DTF3VOBNNJNAHZJJY` is an invalid account address\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_alias_invalid.approved.txt",
    "content": "DEBUG: `abc` does not exist\nError: Alias `abc` alias does not exist.\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_from_alias_successful.approved.txt",
    "content": "HTTP Request: POST https://snapshottest.dispenser.com/fund/0 \"HTTP/1.1 200 OK\"\nSuccessfully funded 1000000 μAlgo. Browse transaction at https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_http_error.approved.txt",
    "content": "ERROR: Error: Limit exceeded. Try again in ~4.0 hours if your request doesn't exceed the daily limit.\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_invalid_args.approved.txt",
    "content": "Usage: algokit dispenser fund [OPTIONS]\nTry 'algokit dispenser fund -h' for help.\n\nError: Missing option '--receiver' / '-r'.\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_not_authenticated.approved.txt",
    "content": "ERROR: Please login first by running `algokit dispenser login` command\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_success.False.False.approved.txt",
    "content": "HTTP Request: POST https://snapshottest.dispenser.com/fund/0 \"HTTP/1.1 200 OK\"\nSuccessfully funded 1000000 μAlgo. Browse transaction at https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_success.False.True.approved.txt",
    "content": "DEBUG: Converted algos to microAlgos: 1000000\nHTTP Request: POST https://snapshottest.dispenser.com/fund/0 \"HTTP/1.1 200 OK\"\nSuccessfully funded 1.0 Algo. Browse transaction at https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_success.True.False.approved.txt",
    "content": "DEBUG: Using CI access token over keyring credentials\nHTTP Request: POST https://snapshottest.dispenser.com/fund/0 \"HTTP/1.1 200 OK\"\nSuccessfully funded 1000000 μAlgo. Browse transaction at https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/dispenser/TestFundCommand.test_fund_command_success.True.True.approved.txt",
    "content": "DEBUG: Converted algos to microAlgos: 1000000\nDEBUG: Using CI access token over keyring credentials\nHTTP Request: POST https://snapshottest.dispenser.com/fund/0 \"HTTP/1.1 200 OK\"\nSuccessfully funded 1.0 Algo. Browse transaction at https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_http_error.approved.txt",
    "content": "DEBUG: Error processing dispenser API request: Unable to process limit request\nERROR: Error: Unable to process limit request\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_not_authenticated.approved.txt",
    "content": "ERROR: Please login first by running `algokit dispenser login` command\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_success.False.False.approved.txt",
    "content": "HTTP Request: GET https://snapshottest.dispenser.com/fund/0/limit \"HTTP/1.1 200 OK\"\nRemaining daily fund limit: 1000000 μAlgo\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_success.False.True.approved.txt",
    "content": "HTTP Request: GET https://snapshottest.dispenser.com/fund/0/limit \"HTTP/1.1 200 OK\"\nRemaining daily fund limit: 1.0 Algo\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_success.True.False.approved.txt",
    "content": "DEBUG: Using CI access token over keyring credentials\nHTTP Request: GET https://snapshottest.dispenser.com/fund/0/limit \"HTTP/1.1 200 OK\"\nRemaining daily fund limit: 1000000 μAlgo\n"
  },
  {
    "path": "tests/dispenser/TestLimitCommand.test_limit_command_success.True.True.approved.txt",
    "content": "DEBUG: Using CI access token over keyring credentials\nHTTP Request: GET https://snapshottest.dispenser.com/fund/0/limit \"HTTP/1.1 200 OK\"\nRemaining daily fund limit: 1.0 Algo\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_already_logged_in.approved.txt",
    "content": "You are already logged in\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_cancelled_timeout.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/device/code \"HTTP/1.1 200 OK\"\nNavigate to: https://example.com/device\nConfirm code: user_code\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nWARNING: Authentication cancelled. Timeout reached after 5 minutes of inactivity.\nError: Error obtaining auth token\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_expired_token_refresh.False.approved.txt",
    "content": "DEBUG: Access token is expired. Attempting to refresh the token...\nWARNING: Failed to refresh the access token. Please authenticate first before proceeding with this command.\nLogin successful\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_expired_token_refresh.True.approved.txt",
    "content": "DEBUG: Access token is expired. Attempting to refresh the token...\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nYou are already logged in\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_success_ci.file.None.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/device/code \"HTTP/1.1 200 OK\"\nNavigate to: https://example.com/device\nConfirm code: user_code\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nWARNING: Your CI access token has been saved to `algokit_ci_token.txt`.\nPlease ensure you keep this file safe or remove after copying the token!\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_success_ci.file.custom_file.txt.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/device/code \"HTTP/1.1 200 OK\"\nNavigate to: https://example.com/device\nConfirm code: user_code\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nWARNING: Your CI access token has been saved to `custom_file.txt`.\nPlease ensure you keep this file safe or remove after copying the token!\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_success_ci.stdout.None.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/device/code \"HTTP/1.1 200 OK\"\nNavigate to: https://example.com/device\nConfirm code: user_code\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\n\nALGOKIT_DISPENSER_ACCESS_TOKEN (valid for 30 days):\n\naccess_token\n\nWARNING: Your CI access token has been printed to stdout.\nPlease ensure you keep this token safe!\nIf needed, clear your terminal history after copying the token!\n"
  },
  {
    "path": "tests/dispenser/TestLoginCommand.test_login_command_success_user.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/device/code \"HTTP/1.1 200 OK\"\nNavigate to: https://example.com/device\nConfirm code: user_code\nHTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/token \"HTTP/1.1 200 OK\"\nLogin successful\n"
  },
  {
    "path": "tests/dispenser/TestLogoutCommand.test_logout_command_already_logged_out.approved.txt",
    "content": "WARNING: Already logged out\n"
  },
  {
    "path": "tests/dispenser/TestLogoutCommand.test_logout_command_revoke_exception.approved.txt",
    "content": "DEBUG: Error logging out An unexpected error occurred: Error response\nError: Error logging out\n"
  },
  {
    "path": "tests/dispenser/TestLogoutCommand.test_logout_command_success.approved.txt",
    "content": "HTTP Request: POST https://dispenser-prod.eu.auth0.com/oauth/revoke \"HTTP/1.1 200 OK\"\nDEBUG: Token revoked successfully\nLogout successful\n"
  },
  {
    "path": "tests/dispenser/TestRefundCommand.test_refund_command_http_error.approved.txt",
    "content": "DEBUG: Error processing dispenser API request: Transaction was already processed\nERROR: Error: Transaction was already processed\n"
  },
  {
    "path": "tests/dispenser/TestRefundCommand.test_refund_command_invalid_args.approved.txt",
    "content": "Usage: algokit dispenser refund [OPTIONS]\nTry 'algokit dispenser refund -h' for help.\n\nError: Missing option '--txID' / '-t'.\n"
  },
  {
    "path": "tests/dispenser/TestRefundCommand.test_refund_command_not_authenticated.approved.txt",
    "content": "ERROR: Please login first by running `algokit dispenser login` command\n"
  },
  {
    "path": "tests/dispenser/TestRefundCommand.test_refund_command_success.False.approved.txt",
    "content": "HTTP Request: POST https://snapshottest.dispenser.com/refund \"HTTP/1.1 200 OK\"\nSuccessfully processed refund transaction\n"
  },
  {
    "path": "tests/dispenser/TestRefundCommand.test_refund_command_success.True.approved.txt",
    "content": "DEBUG: Using CI access token over keyring credentials\nHTTP Request: POST https://snapshottest.dispenser.com/refund \"HTTP/1.1 200 OK\"\nSuccessfully processed refund transaction\n"
  },
  {
    "path": "tests/dispenser/test_dispenser.py",
    "content": "import json\nfrom pathlib import Path\n\nimport click\nimport httpx\nimport jwt\nimport pytest\nfrom approvaltests.namer import NamerFactory\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.cli.dispenser import DEFAULT_CI_TOKEN_FILENAME, DISPENSER_ASSETS, DispenserAssetName\nfrom algokit.core.dispenser import (\n    DISPENSER_KEYRING_ACCESS_TOKEN_KEY,\n    DISPENSER_KEYRING_ID_TOKEN_KEY,\n    DISPENSER_KEYRING_REFRESH_TOKEN_KEY,\n    DISPENSER_KEYRING_USER_ID_KEY,\n    ApiConfig,\n    APIErrorCode,\n    AuthConfig,\n)\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\n@pytest.fixture(autouse=True)\ndef _mock_api_base_url(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.dispenser.ApiConfig.BASE_URL\", \"https://snapshottest.dispenser.com\")\n\n\ndef _set_mock_keyring_credentials(\n    mock_keyring: dict, id_token: str, access_token: str, refresh_token: str, user_id: str\n) -> None:\n    mock_keyring[DISPENSER_KEYRING_ID_TOKEN_KEY] = id_token\n    mock_keyring[DISPENSER_KEYRING_ACCESS_TOKEN_KEY] = access_token\n    mock_keyring[DISPENSER_KEYRING_REFRESH_TOKEN_KEY] = refresh_token\n    mock_keyring[DISPENSER_KEYRING_USER_ID_KEY] = user_id\n\n\n@pytest.fixture\ndef cwd(tmp_path_factory: pytest.TempPathFactory) -> Path:\n    return tmp_path_factory.mktemp(\"cwd\")\n\n\n@pytest.mark.parametrize(\n    \"command\",\n    [\"logout\", \"login\", \"refund\", \"fund\", \"limit\"],\n)\ndef test_no_internet_access(command: str, mocker: MockerFixture) -> None:\n    # Arrange\n    is_network_available_mock = mocker.patch(\"algokit.cli.dispenser.is_network_available\", return_value=False)\n\n    # Act\n    result = invoke(f\"dispenser {command}\")\n\n    # Assert\n    is_network_available_mock.assert_called()\n    assert result.exit_code == 1\n    assert result.output == \"ERROR: Please connect to internet first\\n\"\n\n\nclass TestTokenRefresh:\n    def test_token_refresh_success(\n        self, mock_keyring: dict[str, str | None], mocker: MockerFixture, httpx_mock: HTTPXMock\n    ) -> None:\n        # Arrange\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.core.dispenser.jwt.decode\", return_value={\"sub\": \"new_user_id\"})\n        mocker.patch(\"algokit.core.dispenser._get_access_token_rsa_pub_key\")\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_TOKEN_URL,\n            method=\"POST\",\n            json={\n                \"access_token\": \"new_access_token\",\n                \"id_token\": \"new_id_token\",\n                \"refresh_token\": \"new_refresh_token\",\n            },\n        )\n\n        # Act\n        from algokit.core.dispenser import _refresh_user_access_token\n\n        _refresh_user_access_token()\n\n        # Assert\n        assert mock_keyring[DISPENSER_KEYRING_ACCESS_TOKEN_KEY] == \"new_access_token\"\n        assert mock_keyring[DISPENSER_KEYRING_ID_TOKEN_KEY] == \"new_id_token\"\n        assert mock_keyring[DISPENSER_KEYRING_REFRESH_TOKEN_KEY] == \"new_refresh_token\"\n        assert mock_keyring[DISPENSER_KEYRING_USER_ID_KEY] == \"new_user_id\"\n\n    def test_token_refresh_failure(\n        self, mock_keyring: dict[str, str | None], mocker: MockerFixture, httpx_mock: HTTPXMock\n    ) -> None:\n        # Arrange\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.core.dispenser._get_access_token_rsa_pub_key\")\n        httpx_mock.add_exception(httpx.HTTPError(\"Error response\"), url=AuthConfig.OAUTH_TOKEN_URL)\n\n        # Act and Assert\n        from algokit.core.dispenser import _refresh_user_access_token\n\n        with pytest.raises(httpx.HTTPError):\n            _refresh_user_access_token()\n\n\n# Snapshot tests for dispenser commands\n\n\nclass TestLogoutCommand:\n    def test_logout_command_already_logged_out(self, mocker: MockerFixture) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n\n        # Act\n        result = invoke(\"dispenser logout\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_logout_command_success(\n        self, mock_keyring: dict[str, str | None], mocker: MockerFixture, httpx_mock: HTTPXMock\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        httpx_mock.add_response(url=AuthConfig.OAUTH_REVOKE_URL, method=\"POST\", status_code=200)\n\n        # Act\n        result = invoke(\"dispenser logout\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert not mock_keyring\n        verify(result.output)\n\n    def test_logout_command_revoke_exception(\n        self, mock_keyring: dict[str, str | None], mocker: MockerFixture, httpx_mock: HTTPXMock\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        httpx_mock.add_exception(httpx.HTTPError(\"Error response\"), url=AuthConfig.OAUTH_REVOKE_URL)\n        clear_mock = mocker.patch(\"algokit.cli.dispenser.clear_dispenser_credentials\")\n\n        # Act\n        result = invoke(\"dispenser logout\")\n\n        # Assert\n        clear_mock.assert_not_called()\n        assert result.exit_code == 1\n        verify(result.output)\n\n\nclass TestLoginCommand:\n    def test_login_command_already_logged_in(self, mocker: MockerFixture) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n\n        # Act\n        result = invoke(\"dispenser login\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_login_command_success_user(\n        self, mock_keyring: dict[str, str | None], mocker: MockerFixture, httpx_mock: HTTPXMock\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n        mocker.patch(\"algokit.core.dispenser.jwt.decode\", return_value={\"sub\": \"user_id\"})\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_DEVICE_CODE_URL,\n            method=\"POST\",\n            json={\n                \"device_code\": \"device_code\",\n                \"user_code\": \"user_code\",\n                \"verification_uri_complete\": \"https://example.com/device\",\n            },\n        )\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_TOKEN_URL,\n            method=\"POST\",\n            json={\n                \"access_token\": \"access_token\",\n                \"id_token\": \"id_token\",\n                \"refresh_token\": \"refresh_token\",\n            },\n        )\n        mocker.patch(\"algokit.core.dispenser.TokenVerifier\")\n\n        # Act\n        result = invoke(\"dispenser login\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert mock_keyring[DISPENSER_KEYRING_ID_TOKEN_KEY] == \"id_token\"\n        assert mock_keyring[DISPENSER_KEYRING_ACCESS_TOKEN_KEY] == \"access_token\"\n        assert mock_keyring[DISPENSER_KEYRING_REFRESH_TOKEN_KEY] == \"refresh_token\"\n        assert mock_keyring[DISPENSER_KEYRING_USER_ID_KEY] == \"user_id\"\n        verify(result.output)\n\n    @pytest.mark.parametrize(\n        (\"output_mode\", \"output_filename\"),\n        [\n            (\"stdout\", None),\n            (\"file\", \"custom_file.txt\"),\n            (\"file\", None),\n        ],\n    )\n    def test_login_command_success_ci(\n        self, output_mode: str, output_filename: str | None, mocker: MockerFixture, httpx_mock: HTTPXMock, cwd: Path\n    ) -> None:\n        # Arrange\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_DEVICE_CODE_URL,\n            method=\"POST\",\n            json={\n                \"device_code\": \"device_code\",\n                \"user_code\": \"user_code\",\n                \"verification_uri_complete\": \"https://example.com/device\",\n            },\n        )\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_TOKEN_URL,\n            method=\"POST\",\n            json={\n                \"access_token\": \"access_token\",\n                \"id_token\": \"id_token\",\n            },\n        )\n        mocker.patch(\"algokit.core.dispenser.TokenVerifier\")\n\n        # Act\n        result = invoke(\n            f\"dispenser login --ci -o {output_mode} {('-f ' + output_filename) if output_filename else ''}\", cwd=cwd\n        )\n\n        # Assert\n        assert result.exit_code == 0\n\n        if output_mode == \"file\":\n            expected_output_filename = output_filename if output_filename else DEFAULT_CI_TOKEN_FILENAME\n            output_file_path = cwd / expected_output_filename\n            assert output_file_path.exists()\n            assert output_file_path.read_text() == \"access_token\"\n\n        verify(result.output, options=NamerFactory.with_parameters(output_mode, output_filename))\n\n    def test_login_command_cancelled_timeout(self, mocker: MockerFixture, httpx_mock: HTTPXMock) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_DEVICE_CODE_URL,\n            method=\"POST\",\n            json={\n                \"device_code\": \"device_code\",\n                \"user_code\": \"user_code\",\n                \"verification_uri_complete\": \"https://example.com/device\",\n            },\n            is_reusable=True,\n        )\n        httpx_mock.add_response(\n            url=AuthConfig.OAUTH_TOKEN_URL,\n            method=\"POST\",\n            json={\n                \"error\": \"authorization_pending\",\n                \"error_description\": \"The user authentication is pending.\",\n            },\n            is_reusable=True,\n        )\n        mocker.patch(\"algokit.core.dispenser.TokenVerifier\")\n        mocker.patch(\"algokit.core.dispenser.DISPENSER_LOGIN_TIMEOUT\", 1)\n\n        # Act\n        result = invoke(\"dispenser login\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n    @pytest.mark.parametrize(\n        \"refresh_successful\",\n        [True, False],\n    )\n    def test_login_command_expired_token_refresh(\n        self,\n        *,\n        refresh_successful: bool,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n    ) -> None:\n        # Arrange\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.core.dispenser._get_access_token_rsa_pub_key\")\n        mocker.patch(\"algokit.cli.dispenser.get_oauth_tokens\")\n        mocker.patch(\n            \"algokit.core.dispenser.jwt.decode\",\n            side_effect=[jwt.ExpiredSignatureError(\"Expired token\"), {\"sub\": \"new_user_id\"}],\n        )\n\n        if refresh_successful:\n            httpx_mock.add_response(\n                url=AuthConfig.OAUTH_TOKEN_URL,\n                method=\"POST\",\n                json={\n                    \"access_token\": \"access_token\",\n                    \"id_token\": \"id_token\",\n                    \"refresh_token\": \"refresh_token\",\n                },\n            )\n        else:\n            httpx_mock.add_exception(httpx.HTTPError(\"Error response\"), url=AuthConfig.OAUTH_TOKEN_URL)\n\n        # Act\n        result = invoke(\"dispenser login\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output, options=NamerFactory.with_parameters(refresh_successful))\n\n\nclass TestFundCommand:\n    def test_fund_command_invalid_args(\n        self,\n    ) -> None:\n        # Act\n        result = invoke(\"dispenser fund\")\n\n        # Assert\n        assert result.exit_code == click.UsageError.exit_code\n        verify(result.output)\n\n    @pytest.mark.parametrize(\n        (\"with_ci_token\", \"use_whole_units\"),\n        [(True, True), (True, False), (False, True), (False, False)],\n    )\n    def test_fund_command_success(\n        self,\n        *,\n        with_ci_token: bool,\n        use_whole_units: bool,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n        monkeypatch: pytest.MonkeyPatch,\n    ) -> None:\n        # Arrange\n        if with_ci_token:\n            monkeypatch.setenv(\"ALGOKIT_DISPENSER_ACCESS_TOKEN\", \"ci_access_token\")\n        else:\n            _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        algo_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n        amount = 1 if use_whole_units else int(1e6)\n        receiver = \"TZXGUW6DZ27OBB4QSGZKTYFEABCO3R7XWAXECEV73DTFLVOBNNJNAHZJJY\"\n        httpx_mock.add_response(\n            url=f\"{ApiConfig.BASE_URL}/fund/{algo_asset.asset_id}\",\n            method=\"POST\",\n            json={\"amount\": int(1e6), \"txID\": \"dummy_tx_id\"},\n        )\n\n        # Act\n        result = invoke(f\"dispenser fund -r {receiver} -a {amount} {'--whole-units' if use_whole_units else ''}\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output, options=NamerFactory.with_parameters(with_ci_token, use_whole_units))\n\n    def test_fund_command_http_error(\n        self,\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        mocker.patch(\"algokit.core.dispenser._get_auth_token\", return_value=\"auth_token\")\n\n        # Mock datetime.datetime.now() to always return a specific datetime\n        mocker.patch(\"algokit.core.dispenser._get_hours_until_reset\", return_value=4.0)\n\n        algo_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n\n        httpx_mock.add_exception(\n            httpx.HTTPStatusError(\n                \"Limit exceeded\",\n                request=httpx.Request(\"POST\", f\"{ApiConfig.BASE_URL}/fund\"),\n                response=httpx.Response(\n                    400,\n                    request=httpx.Request(\"POST\", f\"{ApiConfig.BASE_URL}/fund\"),\n                    json={\n                        \"code\": APIErrorCode.FUND_LIMIT_EXCEEDED,\n                        \"limit\": 10_000_000,\n                        \"resetsAt\": \"2023-09-19T10:07:34.024Z\",\n                    },\n                ),\n            ),\n            url=f\"{ApiConfig.BASE_URL}/fund/{algo_asset.asset_id}\",\n            method=\"POST\",\n        )\n\n        # Act\n        result = invoke(\"dispenser fund -r TZXGUW6DZ27OBB4QSGZKTYFEABCO3R7XWAXECEV73DTFLVOBNNJNAHZJJY -a 123\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_fund_command_not_authenticated(\n        self,\n        mocker: MockerFixture,\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n\n        # Act\n        result = invoke(\"dispenser fund -r abc -a 123\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_fund_command_from_alias_successful(\n        self,\n        mocker: MockerFixture,\n        mock_keyring: dict[str, str | None],\n        httpx_mock: HTTPXMock,\n    ) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mock_keyring[alias_name] = json.dumps(\n            {\n                \"alias\": alias_name,\n                \"address\": \"TZXGUW6DZ27OBB4QSGZKTYFEABCO3R7XWAXECEV73DTFLVOBNNJNAHZJJY\",\n                \"private_key\": None,\n            }\n        )\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        httpx_mock.add_response(\n            url=f\"{ApiConfig.BASE_URL}/fund/{DISPENSER_ASSETS[DispenserAssetName.ALGO].asset_id}\",\n            method=\"POST\",\n            json={\"amount\": int(1e6), \"txID\": \"dummy_tx_id\"},\n        )\n\n        # Act\n        result = invoke(\"dispenser fund -r test_alias -a 123\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_fund_command_address_invalid(self, mocker: MockerFixture, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n\n        # Act\n        result = invoke(\"dispenser fund -r TZXGUW6DZ27OBB4QSGZKTYFEABCO3R7XWAXECEV73DTF3VOBNNJNAHZJJY -a 123\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n    def test_fund_command_alias_invalid(self, mocker: MockerFixture, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n\n        # Act\n        result = invoke(\"dispenser fund -r abc -a 123\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n\nclass TestRefundCommand:\n    def test_refund_command_invalid_args(\n        self,\n    ) -> None:\n        # Act\n        result = invoke(\"dispenser refund\")\n\n        # Assert\n        assert result.exit_code == click.UsageError.exit_code\n        verify(result.output)\n\n    @pytest.mark.parametrize(\n        \"with_ci_token\",\n        [True, False],\n    )\n    def test_refund_command_success(\n        self,\n        *,\n        with_ci_token: bool,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n        monkeypatch: pytest.MonkeyPatch,\n    ) -> None:\n        # Arrange\n        if with_ci_token:\n            monkeypatch.setenv(\"ALGOKIT_DISPENSER_ACCESS_TOKEN\", \"ci_access_token\")\n        else:\n            _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        tx_id = \"some_transaction_id\"\n        httpx_mock.add_response(\n            url=f\"{ApiConfig.BASE_URL}/refund\",\n            method=\"POST\",\n            json={\"message\": f\"Successfully refunded transaction {tx_id}\"},\n        )\n\n        # Act\n        result = invoke(f\"dispenser refund -t {tx_id}\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output, options=NamerFactory.with_parameters(with_ci_token))\n\n    def test_refund_command_http_error(\n        self,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n    ) -> None:\n        # Arrange\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        tx_id = \"some_transaction_id\"\n        httpx_mock.add_exception(\n            httpx.HTTPError(\"Transaction was already processed\"), url=f\"{ApiConfig.BASE_URL}/refund\", method=\"POST\"\n        )\n\n        # Act\n        result = invoke(f\"dispenser refund -t {tx_id}\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_refund_command_not_authenticated(\n        self,\n        mocker: MockerFixture,\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n        tx_id = \"some_transaction_id\"\n\n        # Act\n        result = invoke(f\"dispenser refund -t {tx_id}\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n\nclass TestLimitCommand:\n    @pytest.mark.parametrize(\n        (\"with_ci_token\", \"use_whole_units\"),\n        [(True, True), (True, False), (False, True), (False, False)],\n    )\n    def test_limit_command_success(\n        self,\n        *,\n        with_ci_token: bool,\n        use_whole_units: bool,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n        monkeypatch: pytest.MonkeyPatch,\n    ) -> None:\n        # Arrange\n        if with_ci_token:\n            monkeypatch.setenv(\"ALGOKIT_DISPENSER_ACCESS_TOKEN\", \"ci_access_token\")\n        else:\n            _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        algo_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n        httpx_mock.add_response(\n            url=f\"{ApiConfig.BASE_URL}/fund/{algo_asset.asset_id}/limit\",\n            method=\"GET\",\n            json={\"amount\": 1000000},\n        )\n\n        # Act\n        result = invoke(f\"dispenser limit {'--whole-units' if use_whole_units else ''}\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output, options=NamerFactory.with_parameters(with_ci_token, use_whole_units))\n\n    def test_limit_command_http_error(\n        self,\n        mock_keyring: dict[str, str | None],\n        mocker: MockerFixture,\n        httpx_mock: HTTPXMock,\n    ) -> None:\n        # Arrange\n        _set_mock_keyring_credentials(mock_keyring, \"id_token\", \"access_token\", \"refresh_token\", \"user_id\")\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=True)\n        algo_asset = DISPENSER_ASSETS[DispenserAssetName.ALGO]\n        httpx_mock.add_exception(\n            httpx.HTTPError(\"Unable to process limit request\"),\n            url=f\"{ApiConfig.BASE_URL}/fund/{algo_asset.asset_id}/limit\",\n            method=\"GET\",\n        )\n\n        # Act\n        result = invoke(\"dispenser limit\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_limit_command_not_authenticated(\n        self,\n        mocker: MockerFixture,\n    ) -> None:\n        # Arrange\n        mocker.patch(\"algokit.cli.dispenser.is_authenticated\", return_value=False)\n\n        # Act\n        result = invoke(\"dispenser limit\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n"
  },
  {
    "path": "tests/doctor/__init__.py",
    "content": ""
  },
  {
    "path": "tests/doctor/test_doctor.py",
    "content": "import typing\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport click\nimport pytest\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom approvaltests.scrubbers.scrubbers import Scrubber\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import TokenScrubber, combine_scrubbers, verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\nPARENT_DIRECTORY = Path(__file__).parent\n\nDOCKER_COMPOSE_VERSION_COMMAND = [\"docker\", \"compose\", \"version\", \"--format\", \"json\"]\n\n\nclass VersionInfoType(typing.NamedTuple):\n    major: int\n    minor: int\n    micro: int\n    releaselevel: str\n    serial: int\n\n\n@pytest.fixture\ndef _mock_doctor_dependencies(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.cli.doctor.get_current_package_version\").return_value = \"1.2.3\"\n    mocker.patch(\"algokit.cli.doctor.get_latest_github_version\").return_value = \"1.2.3\"\n    # Mock datetime\n    mocker.patch(\"algokit.cli.doctor.dt\").datetime.now.side_effect = lambda _, tz=None: datetime(\n        1990, 12, 31, 10, 9, 8, tzinfo=tz\n    )\n    # Mock shutil\n    mocker.patch(\"algokit.core.doctor.which\").side_effect = mock_shutil_which\n    # Mock sys - Tuple[int, int, int, str, int]\n    sys_module = mocker.patch(\"algokit.cli.doctor.sys\")\n    sys_module.version = \"3.6.2\"\n    sys_module.prefix = \"/home/me/.local/pipx/venvs/algokit\"\n    # Mock enable binary mode to ignore outputting package information to\n    # simplify snapshot diffs - otherwise each new run may fail whenever main prod\n    # dependencies are updated\n    mocker.patch(\"algokit.cli.doctor.is_binary_mode\").return_value = True\n\n\n@pytest.fixture(autouse=True)\ndef _mock_happy_values(proc_mock: ProcMock) -> None:\n    proc_mock.set_output([\"winget\", \"--version\"], [\"v1.8.1911\"])\n    proc_mock.set_output([\"brew\", \"--version\"], [\"Homebrew 3.6.15\", \"Homebrew/homebrew-core (blah)\"])\n    proc_mock.set_output([\"docker\", \"--version\"], [\"Docker version 20.10.21, build baeda1f\"])\n    proc_mock.set_output(DOCKER_COMPOSE_VERSION_COMMAND, ['{\"version\": \"v2.12.2\"}'])\n    proc_mock.set_output([\"git\", \"--version\"], [\"git version 2.37.1 (Apple Git-137.1)\"])\n    proc_mock.set_output([\"python\", \"--version\"], [\"Python 3.10.0\"])\n    proc_mock.set_output([\"python3\", \"--version\"], [\"Python 3.11.0\"])\n    proc_mock.set_output([\"pipx\", \"--version\"], [\"1.1.0\"])\n    proc_mock.set_output([\"poetry\", \"--version\"], [\"blah blah\", \"\", \"Poetry (version 1.2.2)\"])\n    proc_mock.set_output([\"node\", \"--version\"], [\"v18.12.1\"])\n    proc_mock.set_output([\"npm\", \"--version\"], [\"8.19.2\"])\n    proc_mock.set_output([\"npm.cmd\", \"--version\"], [\"8.19.2\"])\n\n\ndef mock_shutil_which(python_command_name: str) -> str:\n    if python_command_name == \"python\":\n        return \"/usr/local/bin/python\"\n    if python_command_name == \"python3\":\n        return \"/usr/local/bin/python3\"\n    return \"\"\n\n\ndef make_output_scrubber(**extra_tokens: str) -> Scrubber:\n    default_tokens = {\"test_parent_directory\": str(PARENT_DIRECTORY)}\n    tokens = default_tokens | extra_tokens\n    return combine_scrubbers(\n        click.unstyle,\n        TokenScrubber(tokens=tokens),\n        TokenScrubber(tokens={\"test_parent_directory\": str(PARENT_DIRECTORY).replace(\"\\\\\", \"/\")}),\n        lambda t: t.replace(\"{test_parent_directory}\\\\\", \"{test_parent_directory}/\"),\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\ndef test_doctor_help() -> None:\n    result = invoke(\"doctor -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_doctor_with_copy(mocker: MockerFixture) -> None:\n    # Mock pyclip\n    mocked_os = mocker.patch(\"algokit.cli.doctor.pyclip.copy\")\n    result = invoke(\"doctor -c\")\n\n    assert result.exit_code == 0\n    mocked_os.assert_called_once()\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\", \"mock_platform_system\")\ndef test_doctor_successful(request: pytest.FixtureRequest) -> None:\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber(), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_doctor_with_docker_compose_version_warning(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(DOCKER_COMPOSE_VERSION_COMMAND, ['{\"version\": \"v2.1.3\"}'])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_doctor_with_docker_compose_version_gitpod(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(DOCKER_COMPOSE_VERSION_COMMAND, ['{\"version\": \"v2.10.0-gitpod.0\"}'])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_doctor_with_docker_compose_version_unparseable(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(DOCKER_COMPOSE_VERSION_COMMAND, ['{\"version\": \"TEAPOT\"}'])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\nALL_COMMANDS = [\n    [\"brew\", \"--version\"],\n    [\"docker\", \"--version\"],\n    DOCKER_COMPOSE_VERSION_COMMAND,\n    [\"git\", \"--version\"],\n    [\"python\", \"--version\"],\n    [\"python3\", \"--version\"],\n    [\"pipx\", \"--version\"],\n    [\"poetry\", \"--version\"],\n    [\"node\", \"--version\"],\n    [\"npm\", \"--version\"],\n    [\"npm.cmd\", \"--version\"],\n]\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\", \"mock_platform_system\")\ndef test_doctor_all_commands_not_found(request: pytest.FixtureRequest, proc_mock: ProcMock) -> None:\n    for cmd in ALL_COMMANDS:\n        proc_mock.should_fail_on(cmd[0])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber(), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\", \"mock_platform_system\")\ndef test_doctor_all_commands_bad_exit(request: pytest.FixtureRequest, proc_mock: ProcMock) -> None:\n    for cmd in ALL_COMMANDS:\n        proc_mock.should_bad_exit_on(cmd, output=[\"I AM A TEAPOT\"])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber(), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_doctor_with_weird_values_on_mac(proc_mock: ProcMock) -> None:\n    proc_mock.set_output([\"brew\", \"--version\"], [\"Homebrew 3.6.15-31-g82d89bb\"])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_unparseable_python_version(proc_mock: ProcMock) -> None:\n    proc_mock.set_output([\"python\", \"--version\"], [\"  \", \"1-2-3\", \"  abc  \"])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\", \"proc_mock\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_unexpected_exception_locating_executable(mocker: MockerFixture) -> None:\n    def which_throw(_cmd: str) -> None:\n        raise RuntimeError(\"OH NO\")\n\n    mocker.patch(\"algokit.core.doctor.which\").side_effect = which_throw\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_npm_permission_denied(proc_mock: ProcMock) -> None:\n    proc_mock.should_deny_on([\"npm\"])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_new_algokit_version_available(request: pytest.FixtureRequest, mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.cli.doctor.get_latest_github_version\").return_value = \"4.5.6\"\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber(), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"_mock_doctor_dependencies\")\n@pytest.mark.mock_platform_system(\"Windows\")\ndef test_doctor_with_weird_values_on_windows(proc_mock: ProcMock) -> None:\n    proc_mock.set_output([\"git\", \"--version\"], [\"git version 2.31.0.windows.1\"])\n    proc_mock.set_output([\"winget\"], [\"v1.8.1911\", \"Winget v1.8.1911\"])\n    proc_mock.should_fail_on([\"npm\"])\n    proc_mock.set_output([\"npm.cmd\", \"--version\"], [\" 16.17.0 \"])\n\n    result = invoke(\"doctor\")\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_doctor_no_mocking() -> None:\n    result = invoke(\"doctor\")\n    assert result.exception is None\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_bad_exit[linux].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: I AM A TEAPOT\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: I AM A TEAPOT\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: I AM A TEAPOT\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: I AM A TEAPOT\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: I AM A TEAPOT\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: I AM A TEAPOT\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: I AM A TEAPOT\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Linux-other-system-info\ndocker: Command exited with code: -1\n  I AM A TEAPOT\ndocker compose: Command exited with code: -1\n  I AM A TEAPOT\ngit: Command exited with code: -1\n  I AM A TEAPOT\npython: Command exited with code: -1\n  I AM A TEAPOT\npython3: Command exited with code: -1\n  I AM A TEAPOT\npipx: Command exited with code: -1\n  I AM A TEAPOT\npoetry: Command exited with code: -1\n  I AM A TEAPOT\nnode: Command exited with code: -1\n  I AM A TEAPOT\nnpm: Command exited with code: -1\n  I AM A TEAPOT\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_bad_exit[macOS].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: I AM A TEAPOT\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: I AM A TEAPOT\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: I AM A TEAPOT\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: I AM A TEAPOT\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: I AM A TEAPOT\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: I AM A TEAPOT\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: I AM A TEAPOT\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: I AM A TEAPOT\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: Command exited with code: -1\n  I AM A TEAPOT\ndocker compose: Command exited with code: -1\n  I AM A TEAPOT\ngit: Command exited with code: -1\n  I AM A TEAPOT\npython: Command exited with code: -1\n  I AM A TEAPOT\npython3: Command exited with code: -1\n  I AM A TEAPOT\npipx: Command exited with code: -1\n  I AM A TEAPOT\npoetry: Command exited with code: -1\n  I AM A TEAPOT\nnode: Command exited with code: -1\n  I AM A TEAPOT\nnpm: Command exited with code: -1\n  I AM A TEAPOT\nbrew: Command exited with code: -1\n  I AM A TEAPOT\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_bad_exit[windows].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: I AM A TEAPOT\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: I AM A TEAPOT\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: I AM A TEAPOT\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: I AM A TEAPOT\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: I AM A TEAPOT\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: I AM A TEAPOT\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: I AM A TEAPOT\nDEBUG: Running 'npm.cmd --version' in '{current_working_directory}'\nDEBUG: npm.cmd: I AM A TEAPOT\nDEBUG: Running 'winget --version' in '{current_working_directory}'\nDEBUG: winget: v1.8.1911\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Windows-other-system-info\ndocker: Command exited with code: -1\n  I AM A TEAPOT\ndocker compose: Command exited with code: -1\n  I AM A TEAPOT\ngit: Command exited with code: -1\n  I AM A TEAPOT\npython: Command exited with code: -1\n  I AM A TEAPOT\npython3: Command exited with code: -1\n  I AM A TEAPOT\npipx: Command exited with code: -1\n  I AM A TEAPOT\npoetry: Command exited with code: -1\n  I AM A TEAPOT\nnode: Command exited with code: -1\n  I AM A TEAPOT\nnpm: Command exited with code: -1\n  I AM A TEAPOT\nwinget: 1.8.1911\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_not_found[linux].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: Command not found\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Linux-other-system-info\ndocker: Command not found!\n  `docker` required to run `algokit localnet` command; install via https://docker.io\ndocker compose: Command not found!\ngit: Command not found!\n  Git required to run `algokit init`; install via https://github.com/git-guides/install-git\npython: Command not found!\npython3: Command not found!\npipx: Command not found!\n  pipx is required if poetry is not installed in order to install it automatically;\n  install via https://pypa.github.io/pipx/\npoetry: Command not found!\n  Poetry is required for some Python-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://python-poetry.org/docs/#installation\nnode: Command not found!\n  Node.js is required for some Node.js-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://nodejs.dev/en/learn/how-to-install-nodejs/\nnpm: Command not found!\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_not_found[macOS].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: Command not found\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: Command not found!\n  `docker` required to run `algokit localnet` command; install via https://docker.io\ndocker compose: Command not found!\ngit: Command not found!\n  Git required to run `algokit init`; install via https://github.com/git-guides/install-git\npython: Command not found!\npython3: Command not found!\npipx: Command not found!\n  pipx is required if poetry is not installed in order to install it automatically;\n  install via https://pypa.github.io/pipx/\npoetry: Command not found!\n  Poetry is required for some Python-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://python-poetry.org/docs/#installation\nnode: Command not found!\n  Node.js is required for some Node.js-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://nodejs.dev/en/learn/how-to-install-nodejs/\nnpm: Command not found!\nbrew: Command not found!\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_all_commands_not_found[windows].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'npm.cmd --version' in '{current_working_directory}'\nDEBUG: Command not found\nDEBUG: Running 'winget --version' in '{current_working_directory}'\nDEBUG: winget: v1.8.1911\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Windows-other-system-info\ndocker: Command not found!\n  `docker` required to run `algokit localnet` command; install via https://docker.io\ndocker compose: Command not found!\ngit: Command not found!\n  Git required to `run algokit init`; install via `winget install -e --id Git.Git` if using winget,\n  or via https://github.com/git-guides/install-git#install-git-on-windows\npython: Command not found!\npython3: Command not found!\npipx: Command not found!\n  pipx is required if poetry is not installed in order to install it automatically;\n  install via https://pypa.github.io/pipx/\npoetry: Command not found!\n  Poetry is required for some Python-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://python-poetry.org/docs/#installation\nnode: Command not found!\n  Node.js is required for some Node.js-based templates;\n  install via `algokit project bootstrap` within project directory, or via:\n  https://nodejs.dev/en/learn/how-to-install-nodejs/\nnpm: Command not found!\nwinget: 1.8.1911\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_help.approved.txt",
    "content": "Usage: algokit doctor [OPTIONS]\n\n  Diagnose potential environment issues that may affect AlgoKit.\n\n  Will search the system for AlgoKit dependencies and show their versions, as\n  well as identifying any potential issues.\n\nOptions:\n  -c, --copy-to-clipboard  Copy the contents of the doctor message (in Markdown\n                           format) in your clipboard.\n  -h, --help               Show this message and exit.\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_successful[linux].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Linux-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_successful[macOS].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_successful[windows].approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm.cmd --version' in '{current_working_directory}'\nDEBUG: npm.cmd: 8.19.2\nDEBUG: Running 'winget --version' in '{current_working_directory}'\nDEBUG: winget: v1.8.1911\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Windows-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nwinget: 1.8.1911\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_copy.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_docker_compose_version_gitpod.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.10.0-gitpod.0\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.10.0-gitpod.0\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_docker_compose_version_unparseable.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"TEAPOT\"}\nDEBUG: Unexpected error parsing version: Unable to parse version number\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: {\"version\": \"TEAPOT\"}\n  Failed to parse version from: \"{\"version\": \"TEAPOT\"}\"\n  Error: Unable to parse version number\n  Unable to check against minimum version of 2.5.0\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_docker_compose_version_warning.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.1.3\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.1.3\n  Docker Compose 2.5.0 required to run `algokit localnet` command;\n  install via https://docker.io\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_weird_values_on_mac.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15-31-g82d89bb\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15-31-g82d89bb\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_doctor_with_weird_values_on_windows.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.31.0.windows.1\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm.cmd --version' in '{current_working_directory}'\nDEBUG: npm.cmd: 16.17.0\nDEBUG: Running 'winget --version' in '{current_working_directory}'\nDEBUG: winget: v1.8.1911\nDEBUG: winget: Winget v1.8.1911\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Windows-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.31.0.windows.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 16.17.0\nwinget: 1.8.1911\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_new_algokit_version_available.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3 (latest: 4.5.6)\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_npm_permission_denied.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: Permission denied running command\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: Permission denied attempting to run command\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_unexpected_exception_locating_executable.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: Python 3.10.0\nDEBUG: Failed to locate python: OH NO\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Failed to locate python3: OH NO\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 3.10.0f (location: unknown)\npython3: 3.11.0f (location: unknown)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/doctor/test_doctor.test_unparseable_python_version.approved.txt",
    "content": "DEBUG: Running 'docker --version' in '{current_working_directory}'\nDEBUG: docker: Docker version 20.10.21, build baeda1f\nDEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.12.2\"}\nDEBUG: Running 'git --version' in '{current_working_directory}'\nDEBUG: git: git version 2.37.1 (Apple Git-137.1)\nDEBUG: Running 'python --version' in '{current_working_directory}'\nDEBUG: python: \nDEBUG: python: 1-2-3\nDEBUG: python: abc\nDEBUG: Running 'python3 --version' in '{current_working_directory}'\nDEBUG: python3: Python 3.11.0\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: 1.1.0\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: blah blah\nDEBUG: poetry: \nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Running 'node --version' in '{current_working_directory}'\nDEBUG: node: v18.12.1\nDEBUG: Running 'npm --version' in '{current_working_directory}'\nDEBUG: npm: 8.19.2\nDEBUG: Running 'brew --version' in '{current_working_directory}'\nDEBUG: brew: Homebrew 3.6.15\nDEBUG: brew: Homebrew/homebrew-core (blah)\ntimestamp: 1990-12-31T10:09:08\nAlgoKit: 1.2.3\nAlgoKit Python: 3.6.2 (location: /home/me/.local/pipx/venvs/algokit)\nOS: Darwin-other-system-info\ndocker: 20.10.21\ndocker compose: 2.12.2\ngit: 2.37.1\npython: 1-2-3 (location: /usr/local/bin/python)\npython3: 3.11.0 (location: /usr/local/bin/python3)\npipx: 1.1.0\npoetry: 1.2.2\nnode: 18.12.1\nnpm: 8.19.2\nbrew: 3.6.15\n\nIf you are experiencing a problem with AlgoKit, feel free to submit an issue via:\nhttps://github.com/algorandfoundation/algokit-cli/issues/new\nPlease include this output, if you want to populate this message in your clipboard, run `algokit doctor -c`\n"
  },
  {
    "path": "tests/explore/__init__.py",
    "content": ""
  },
  {
    "path": "tests/explore/test_explore.py",
    "content": "import logging\n\nimport pytest\nfrom approvaltests.namer import NamerFactory\nfrom pytest_mock import MockerFixture\n\nfrom tests import get_combined_verify_output\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\n@pytest.mark.parametrize(\"command\", [\"\", \"localnet\", \"testnet\", \"mainnet\"])\ndef test_explore(command: str, mocker: MockerFixture) -> None:\n    launch_mock = mocker.patch(\"webbrowser.open\")\n    result = invoke(f\"explore {command}\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(result.output, \"launch args\", launch_mock.call_args),\n        options=NamerFactory.with_parameters(command or \"localnet\"),\n    )\n\n\ndef test_explore_wsl_exception(mocker: MockerFixture, caplog: pytest.LogCaptureFixture) -> None:\n    command = \"localnet\"\n    mocker.patch(\"algokit.cli.explore.is_wsl\", return_value=True)\n    mocker.patch(\"webbrowser.open\", return_value=False)\n\n    with caplog.at_level(logging.WARNING):\n        result = invoke(f\"explore {command}\")\n\n    assert result.exit_code == 0\n    assert any(\"Unable to open browser from WSL\" in message for message in caplog.messages)\n\n\ndef test_explore_webbrowser_exception(mocker: MockerFixture, caplog: pytest.LogCaptureFixture) -> None:\n    command = \"localnet\"\n    mocker.patch(\"algokit.cli.explore.is_wsl\", return_value=False)\n    mocker.patch(\"webbrowser.open\", side_effect=Exception(\"Webbrowser Exception\"))\n\n    with caplog.at_level(logging.WARNING):\n        result = invoke(f\"explore {command}\")\n\n    assert result.exit_code == 0\n    assert any(\"Failed to open browser. Please open this URL manually:\" in message for message in caplog.messages)\n"
  },
  {
    "path": "tests/explore/test_explore.test_explore.localnet.approved.txt",
    "content": "Opening localnet explorer in your default browser\nURL: https://explore.algokit.io/localnet\n----\nlaunch args:\n----\ncall('https://explore.algokit.io/localnet')\n"
  },
  {
    "path": "tests/explore/test_explore.test_explore.mainnet.approved.txt",
    "content": "Opening mainnet explorer in your default browser\nURL: https://explore.algokit.io/mainnet\n----\nlaunch args:\n----\ncall('https://explore.algokit.io/mainnet')\n"
  },
  {
    "path": "tests/explore/test_explore.test_explore.testnet.approved.txt",
    "content": "Opening testnet explorer in your default browser\nURL: https://explore.algokit.io/testnet\n----\nlaunch args:\n----\ncall('https://explore.algokit.io/testnet')\n"
  },
  {
    "path": "tests/generate/__init__.py",
    "content": ""
  },
  {
    "path": "tests/generate/app.arc32.json",
    "content": "{\n  \"hints\": {\n    \"hello(string)string\": {\n      \"call_config\": {\n        \"no_op\": \"CALL\"\n      }\n    },\n    \"hello_world_check(string)void\": {\n      \"call_config\": {\n        \"no_op\": \"CALL\"\n      }\n    }\n  },\n  \"source\": {\n    \"approval\": \"I3ByYWdtYSB2ZXJzaW9uIDgKaW50Y2Jsb2NrIDAgMQp0eG4gTnVtQXBwQXJncwppbnRjXzAgLy8gMAo9PQpibnogbWFpbl9sNgp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweDAyYmVjZTExIC8vICJoZWxsbyhzdHJpbmcpc3RyaW5nIgo9PQpibnogbWFpbl9sNQp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweGJmOWMxZWRmIC8vICJoZWxsb193b3JsZF9jaGVjayhzdHJpbmcpdm9pZCIKPT0KYm56IG1haW5fbDQKZXJyCm1haW5fbDQ6CnR4biBPbkNvbXBsZXRpb24KaW50Y18wIC8vIE5vT3AKPT0KdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KJiYKYXNzZXJ0CnR4bmEgQXBwbGljYXRpb25BcmdzIDEKY2FsbHN1YiBoZWxsb3dvcmxkY2hlY2tfMwppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sNToKdHhuIE9uQ29tcGxldGlvbgppbnRjXzAgLy8gTm9PcAo9PQp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQomJgphc3NlcnQKdHhuYSBBcHBsaWNhdGlvbkFyZ3MgMQpjYWxsc3ViIGhlbGxvXzIKc3RvcmUgMApwdXNoYnl0ZXMgMHgxNTFmN2M3NSAvLyAweDE1MWY3Yzc1CmxvYWQgMApjb25jYXQKbG9nCmludGNfMSAvLyAxCnJldHVybgptYWluX2w2Ogp0eG4gT25Db21wbGV0aW9uCmludGNfMCAvLyBOb09wCj09CmJueiBtYWluX2wxMgp0eG4gT25Db21wbGV0aW9uCnB1c2hpbnQgNCAvLyBVcGRhdGVBcHBsaWNhdGlvbgo9PQpibnogbWFpbl9sMTEKdHhuIE9uQ29tcGxldGlvbgpwdXNoaW50IDUgLy8gRGVsZXRlQXBwbGljYXRpb24KPT0KYm56IG1haW5fbDEwCmVycgptYWluX2wxMDoKdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KYXNzZXJ0CmNhbGxzdWIgZGVsZXRlXzEKaW50Y18xIC8vIDEKcmV0dXJuCm1haW5fbDExOgp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQphc3NlcnQKY2FsbHN1YiB1cGRhdGVfMAppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sMTI6CnR4biBBcHBsaWNhdGlvbklECmludGNfMCAvLyAwCj09CmFzc2VydAppbnRjXzEgLy8gMQpyZXR1cm4KCi8vIHVwZGF0ZQp1cGRhdGVfMDoKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX1VQREFUQUJMRSAvLyBUTVBMX1VQREFUQUJMRQovLyBDaGVjayBhcHAgaXMgdXBkYXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGRlbGV0ZQpkZWxldGVfMToKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX0RFTEVUQUJMRSAvLyBUTVBMX0RFTEVUQUJMRQovLyBDaGVjayBhcHAgaXMgZGVsZXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGhlbGxvCmhlbGxvXzI6CnByb3RvIDEgMQpwdXNoYnl0ZXMgMHggLy8gIiIKcHVzaGJ5dGVzIDB4NDg2NTZjNmM2ZjJjMjAgLy8gIkhlbGxvLCAiCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApjb25jYXQKZnJhbWVfYnVyeSAwCmZyYW1lX2RpZyAwCmxlbgppdG9iCmV4dHJhY3QgNiAwCmZyYW1lX2RpZyAwCmNvbmNhdApmcmFtZV9idXJ5IDAKcmV0c3ViCgovLyBoZWxsb193b3JsZF9jaGVjawpoZWxsb3dvcmxkY2hlY2tfMzoKcHJvdG8gMSAwCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApwdXNoYnl0ZXMgMHg1NzZmNzI2YzY0IC8vICJXb3JsZCIKPT0KYXNzZXJ0CnJldHN1Yg==\",\n    \"clear\": \"I3ByYWdtYSB2ZXJzaW9uIDgKcHVzaGludCAwIC8vIDAKcmV0dXJu\"\n  },\n  \"state\": {\n    \"global\": {\n      \"num_byte_slices\": 0,\n      \"num_uints\": 0\n    },\n    \"local\": {\n      \"num_byte_slices\": 0,\n      \"num_uints\": 0\n    }\n  },\n  \"schema\": {\n    \"global\": {\n      \"declared\": {},\n      \"reserved\": {}\n    },\n    \"local\": {\n      \"declared\": {},\n      \"reserved\": {}\n    }\n  },\n  \"contract\": {\n    \"name\": \"HelloWorldApp\",\n    \"methods\": [\n      {\n        \"name\": \"hello\",\n        \"args\": [\n          {\n            \"type\": \"string\",\n            \"name\": \"name\"\n          }\n        ],\n        \"returns\": {\n          \"type\": \"string\"\n        },\n        \"desc\": \"Returns Hello, {name}\"\n      },\n      {\n        \"name\": \"hello_world_check\",\n        \"args\": [\n          {\n            \"type\": \"string\",\n            \"name\": \"name\"\n          }\n        ],\n        \"returns\": {\n          \"type\": \"void\"\n        },\n        \"desc\": \"Asserts {name} is \\\"World\\\"\"\n      }\n    ],\n    \"networks\": {}\n  },\n  \"bare_call_config\": {\n    \"delete_application\": \"CALL\",\n    \"no_op\": \"CREATE\",\n    \"update_application\": \"CALL\"\n  }\n}"
  },
  {
    "path": "tests/generate/app.arc56.json",
    "content": "{\n  \"name\": \"HelloWorldApp\",\n  \"structs\": {},\n  \"methods\": [\n    {\n      \"name\": \"hello\",\n      \"args\": [\n        {\n          \"type\": \"string\",\n          \"name\": \"name\"\n        }\n      ],\n      \"returns\": {\n        \"type\": \"string\"\n      },\n      \"actions\": {\n        \"create\": [],\n        \"call\": [\n          \"NoOp\"\n        ]\n      },\n      \"readonly\": false,\n      \"events\": [],\n      \"recommendations\": {}\n    }\n  ],\n  \"arcs\": [\n    22,\n    28\n  ],\n  \"networks\": {},\n  \"state\": {\n    \"schema\": {\n      \"global\": {\n        \"ints\": 0,\n        \"bytes\": 0\n      },\n      \"local\": {\n        \"ints\": 0,\n        \"bytes\": 0\n      }\n    },\n    \"keys\": {\n      \"global\": {},\n      \"local\": {},\n      \"box\": {}\n    },\n    \"maps\": {\n      \"global\": {},\n      \"local\": {},\n      \"box\": {}\n    }\n  },\n  \"bareActions\": {\n    \"create\": [\n      \"NoOp\"\n    ],\n    \"call\": []\n  },\n  \"sourceInfo\": {\n    \"approval\": {\n      \"sourceInfo\": [\n        {\n          \"pc\": [\n            35\n          ],\n          \"errorMessage\": \"OnCompletion is not NoOp\"\n        },\n        {\n          \"pc\": [\n            75\n          ],\n          \"errorMessage\": \"can only call when creating\"\n        },\n        {\n          \"pc\": [\n            38\n          ],\n          \"errorMessage\": \"can only call when not creating\"\n        }\n      ],\n      \"pcOffsetMethod\": \"none\"\n    },\n    \"clear\": {\n      \"sourceInfo\": [],\n      \"pcOffsetMethod\": \"none\"\n    }\n  },\n  \"source\": {\n    \"approval\": \"I3ByYWdtYSB2ZXJzaW9uIDEwCgpzbWFydF9jb250cmFjdHMuaGVsbG9fd29ybGQuY29udHJhY3QuSGVsbG9Xb3JsZC5hcHByb3ZhbF9wcm9ncmFtOgogICAgaW50Y2Jsb2NrIDAgMQogICAgY2FsbHN1YiBfX3B1eWFfYXJjNF9yb3V0ZXJfXwogICAgcmV0dXJuCgoKLy8gc21hcnRfY29udHJhY3RzLmhlbGxvX3dvcmxkLmNvbnRyYWN0LkhlbGxvV29ybGQuX19wdXlhX2FyYzRfcm91dGVyX18oKSAtPiB1aW50NjQ6Cl9fcHV5YV9hcmM0X3JvdXRlcl9fOgogICAgLy8gc21hcnRfY29udHJhY3RzL2hlbGxvX3dvcmxkL2NvbnRyYWN0LnB5OjUKICAgIC8vIGNsYXNzIEhlbGxvV29ybGQoQVJDNENvbnRyYWN0KToKICAgIHByb3RvIDAgMQogICAgdHhuIE51bUFwcEFyZ3MKICAgIGJ6IF9fcHV5YV9hcmM0X3JvdXRlcl9fX2JhcmVfcm91dGluZ0A1CiAgICBwdXNoYnl0ZXMgMHgwMmJlY2UxMSAvLyBtZXRob2QgImhlbGxvKHN0cmluZylzdHJpbmciCiAgICB0eG5hIEFwcGxpY2F0aW9uQXJncyAwCiAgICBtYXRjaCBfX3B1eWFfYXJjNF9yb3V0ZXJfX19oZWxsb19yb3V0ZUAyCiAgICBpbnRjXzAgLy8gMAogICAgcmV0c3ViCgpfX3B1eWFfYXJjNF9yb3V0ZXJfX19oZWxsb19yb3V0ZUAyOgogICAgLy8gc21hcnRfY29udHJhY3RzL2hlbGxvX3dvcmxkL2NvbnRyYWN0LnB5OjYKICAgIC8vIEBhYmltZXRob2QoKQogICAgdHhuIE9uQ29tcGxldGlvbgogICAgIQogICAgYXNzZXJ0IC8vIE9uQ29tcGxldGlvbiBpcyBub3QgTm9PcAogICAgdHhuIEFwcGxpY2F0aW9uSUQKICAgIGFzc2VydCAvLyBjYW4gb25seSBjYWxsIHdoZW4gbm90IGNyZWF0aW5nCiAgICAvLyBzbWFydF9jb250cmFjdHMvaGVsbG9fd29ybGQvY29udHJhY3QucHk6NQogICAgLy8gY2xhc3MgSGVsbG9Xb3JsZChBUkM0Q29udHJhY3QpOgogICAgdHhuYSBBcHBsaWNhdGlvbkFyZ3MgMQogICAgZXh0cmFjdCAyIDAKICAgIC8vIHNtYXJ0X2NvbnRyYWN0cy9oZWxsb193b3JsZC9jb250cmFjdC5weTo2CiAgICAvLyBAYWJpbWV0aG9kKCkKICAgIGNhbGxzdWIgaGVsbG8KICAgIGR1cAogICAgbGVuCiAgICBpdG9iCiAgICBleHRyYWN0IDYgMgogICAgc3dhcAogICAgY29uY2F0CiAgICBwdXNoYnl0ZXMgMHgxNTFmN2M3NQogICAgc3dhcAogICAgY29uY2F0CiAgICBsb2cKICAgIGludGNfMSAvLyAxCiAgICByZXRzdWIKCl9fcHV5YV9hcmM0X3JvdXRlcl9fX2JhcmVfcm91dGluZ0A1OgogICAgLy8gc21hcnRfY29udHJhY3RzL2hlbGxvX3dvcmxkL2NvbnRyYWN0LnB5OjUKICAgIC8vIGNsYXNzIEhlbGxvV29ybGQoQVJDNENvbnRyYWN0KToKICAgIHR4biBPbkNvbXBsZXRpb24KICAgIGJueiBfX3B1eWFfYXJjNF9yb3V0ZXJfX19hZnRlcl9pZl9lbHNlQDkKICAgIHR4biBBcHBsaWNhdGlvbklECiAgICAhCiAgICBhc3NlcnQgLy8gY2FuIG9ubHkgY2FsbCB3aGVuIGNyZWF0aW5nCiAgICBpbnRjXzEgLy8gMQogICAgcmV0c3ViCgpfX3B1eWFfYXJjNF9yb3V0ZXJfX19hZnRlcl9pZl9lbHNlQDk6CiAgICAvLyBzbWFydF9jb250cmFjdHMvaGVsbG9fd29ybGQvY29udHJhY3QucHk6NQogICAgLy8gY2xhc3MgSGVsbG9Xb3JsZChBUkM0Q29udHJhY3QpOgogICAgaW50Y18wIC8vIDAKICAgIHJldHN1YgoKCi8vIHNtYXJ0X2NvbnRyYWN0cy5oZWxsb193b3JsZC5jb250cmFjdC5IZWxsb1dvcmxkLmhlbGxvKG5hbWU6IGJ5dGVzKSAtPiBieXRlczoKaGVsbG86CiAgICAvLyBzbWFydF9jb250cmFjdHMvaGVsbG9fd29ybGQvY29udHJhY3QucHk6Ni03CiAgICAvLyBAYWJpbWV0aG9kKCkKICAgIC8vIGRlZiBoZWxsbyhzZWxmLCBuYW1lOiBTdHJpbmcpIC0+IFN0cmluZzoKICAgIHByb3RvIDEgMQogICAgLy8gc21hcnRfY29udHJhY3RzL2hlbGxvX3dvcmxkL2NvbnRyYWN0LnB5OjgKICAgIC8vIHJldHVybiAiSGVsbG8sICIgKyBuYW1lCiAgICBwdXNoYnl0ZXMgIkhlbGxvLCAiCiAgICBmcmFtZV9kaWcgLTEKICAgIGNvbmNhdAogICAgcmV0c3ViCg==\",\n    \"clear\": \"I3ByYWdtYSB2ZXJzaW9uIDEwCgpzbWFydF9jb250cmFjdHMuaGVsbG9fd29ybGQuY29udHJhY3QuSGVsbG9Xb3JsZC5jbGVhcl9zdGF0ZV9wcm9ncmFtOgogICAgcHVzaGludCAxIC8vIDEKICAgIHJldHVybgo=\"\n  },\n  \"events\": [],\n  \"templateVariables\": {}\n}"
  },
  {
    "path": "tests/generate/application.json",
    "content": "{\n  \"hints\": {\n    \"hello(string)string\": {\n      \"call_config\": {\n        \"no_op\": \"CALL\"\n      }\n    },\n    \"hello_world_check(string)void\": {\n      \"call_config\": {\n        \"no_op\": \"CALL\"\n      }\n    }\n  },\n  \"source\": {\n    \"approval\": \"I3ByYWdtYSB2ZXJzaW9uIDgKaW50Y2Jsb2NrIDAgMQp0eG4gTnVtQXBwQXJncwppbnRjXzAgLy8gMAo9PQpibnogbWFpbl9sNgp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweDAyYmVjZTExIC8vICJoZWxsbyhzdHJpbmcpc3RyaW5nIgo9PQpibnogbWFpbl9sNQp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweGJmOWMxZWRmIC8vICJoZWxsb193b3JsZF9jaGVjayhzdHJpbmcpdm9pZCIKPT0KYm56IG1haW5fbDQKZXJyCm1haW5fbDQ6CnR4biBPbkNvbXBsZXRpb24KaW50Y18wIC8vIE5vT3AKPT0KdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KJiYKYXNzZXJ0CnR4bmEgQXBwbGljYXRpb25BcmdzIDEKY2FsbHN1YiBoZWxsb3dvcmxkY2hlY2tfMwppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sNToKdHhuIE9uQ29tcGxldGlvbgppbnRjXzAgLy8gTm9PcAo9PQp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQomJgphc3NlcnQKdHhuYSBBcHBsaWNhdGlvbkFyZ3MgMQpjYWxsc3ViIGhlbGxvXzIKc3RvcmUgMApwdXNoYnl0ZXMgMHgxNTFmN2M3NSAvLyAweDE1MWY3Yzc1CmxvYWQgMApjb25jYXQKbG9nCmludGNfMSAvLyAxCnJldHVybgptYWluX2w2Ogp0eG4gT25Db21wbGV0aW9uCmludGNfMCAvLyBOb09wCj09CmJueiBtYWluX2wxMgp0eG4gT25Db21wbGV0aW9uCnB1c2hpbnQgNCAvLyBVcGRhdGVBcHBsaWNhdGlvbgo9PQpibnogbWFpbl9sMTEKdHhuIE9uQ29tcGxldGlvbgpwdXNoaW50IDUgLy8gRGVsZXRlQXBwbGljYXRpb24KPT0KYm56IG1haW5fbDEwCmVycgptYWluX2wxMDoKdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KYXNzZXJ0CmNhbGxzdWIgZGVsZXRlXzEKaW50Y18xIC8vIDEKcmV0dXJuCm1haW5fbDExOgp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQphc3NlcnQKY2FsbHN1YiB1cGRhdGVfMAppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sMTI6CnR4biBBcHBsaWNhdGlvbklECmludGNfMCAvLyAwCj09CmFzc2VydAppbnRjXzEgLy8gMQpyZXR1cm4KCi8vIHVwZGF0ZQp1cGRhdGVfMDoKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX1VQREFUQUJMRSAvLyBUTVBMX1VQREFUQUJMRQovLyBDaGVjayBhcHAgaXMgdXBkYXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGRlbGV0ZQpkZWxldGVfMToKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX0RFTEVUQUJMRSAvLyBUTVBMX0RFTEVUQUJMRQovLyBDaGVjayBhcHAgaXMgZGVsZXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGhlbGxvCmhlbGxvXzI6CnByb3RvIDEgMQpwdXNoYnl0ZXMgMHggLy8gIiIKcHVzaGJ5dGVzIDB4NDg2NTZjNmM2ZjJjMjAgLy8gIkhlbGxvLCAiCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApjb25jYXQKZnJhbWVfYnVyeSAwCmZyYW1lX2RpZyAwCmxlbgppdG9iCmV4dHJhY3QgNiAwCmZyYW1lX2RpZyAwCmNvbmNhdApmcmFtZV9idXJ5IDAKcmV0c3ViCgovLyBoZWxsb193b3JsZF9jaGVjawpoZWxsb3dvcmxkY2hlY2tfMzoKcHJvdG8gMSAwCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApwdXNoYnl0ZXMgMHg1NzZmNzI2YzY0IC8vICJXb3JsZCIKPT0KYXNzZXJ0CnJldHN1Yg==\",\n    \"clear\": \"I3ByYWdtYSB2ZXJzaW9uIDgKcHVzaGludCAwIC8vIDAKcmV0dXJu\"\n  },\n  \"state\": {\n    \"global\": {\n      \"num_byte_slices\": 0,\n      \"num_uints\": 0\n    },\n    \"local\": {\n      \"num_byte_slices\": 0,\n      \"num_uints\": 0\n    }\n  },\n  \"schema\": {\n    \"global\": {\n      \"declared\": {},\n      \"reserved\": {}\n    },\n    \"local\": {\n      \"declared\": {},\n      \"reserved\": {}\n    }\n  },\n  \"contract\": {\n    \"name\": \"HelloWorldApp\",\n    \"methods\": [\n      {\n        \"name\": \"hello\",\n        \"args\": [\n          {\n            \"type\": \"string\",\n            \"name\": \"name\"\n          }\n        ],\n        \"returns\": {\n          \"type\": \"string\"\n        },\n        \"desc\": \"Returns Hello, {name}\"\n      },\n      {\n        \"name\": \"hello_world_check\",\n        \"args\": [\n          {\n            \"type\": \"string\",\n            \"name\": \"name\"\n          }\n        ],\n        \"returns\": {\n          \"type\": \"void\"\n        },\n        \"desc\": \"Asserts {name} is \\\"World\\\"\"\n      }\n    ],\n    \"networks\": {}\n  },\n  \"bare_call_config\": {\n    \"delete_application\": \"CALL\",\n    \"no_op\": \"CREATE\",\n    \"update_application\": \"CALL\"\n  }\n}"
  },
  {
    "path": "tests/generate/test_generate_client.py",
    "content": "import shutil\nfrom collections.abc import Callable\nfrom pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.namer import NamerFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.typed_client_generation import (\n    PYTHON_PYPI_PACKAGE,\n    TYPESCRIPT_NPM_PACKAGE,\n    _snake_case,\n)\nfrom algokit.core.utils import is_windows\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\nDirWithAppSpecFactory = Callable[[Path, str], Path]\n\n\ndef _normalize_output(output: str) -> str:\n    return output.replace(\"\\\\\", \"/\")\n\n\ndef _get_npx_command() -> str:\n    return \"npx\" if not is_windows() else \"npx.cmd\"\n\n\ndef _get_npm_command() -> str:\n    return \"npm\" if not is_windows() else \"npm.cmd\"\n\n\ndef _get_python_generate_command(version: str | None, application_json: Path, expected_output_path: Path) -> str:\n    return (\n        f\"pipx run --spec={PYTHON_PYPI_PACKAGE}{f'=={version}' if version is not None else ''} \"\n        f\"algokitgen-py -a {application_json} -o {expected_output_path}\"\n    )\n\n\ndef _get_typescript_generate_command(version: str | None, application_json: Path, expected_output_path: Path) -> str:\n    return (\n        f\"{_get_npx_command()} --yes {TYPESCRIPT_NPM_PACKAGE}{f'@{version}' if version is not None else 'latest'} \"\n        f\"generate -a {application_json} -o {expected_output_path}\"\n    )\n\n\n@pytest.fixture\ndef cwd(tmp_path_factory: TempPathFactory) -> Path:\n    return tmp_path_factory.mktemp(\"cwd\")\n\n\n@pytest.fixture\ndef dir_with_app_spec_factory() -> DirWithAppSpecFactory:\n    def factory(app_spec_dir: Path, app_spec_file_name: str) -> Path:\n        app_spec_example_path = Path(__file__).parent / app_spec_file_name\n        app_spec_dir.mkdir(exist_ok=True, parents=True)\n        app_spec_path = app_spec_dir / app_spec_file_name\n        shutil.copy(app_spec_example_path, app_spec_path)\n        return app_spec_path\n\n    return factory\n\n\n@pytest.fixture\ndef application_json(cwd: Path, dir_with_app_spec_factory: DirWithAppSpecFactory) -> Path:\n    return dir_with_app_spec_factory(cwd, \"application.json\")\n\n\n@pytest.fixture\ndef arc32_json(cwd: Path, dir_with_app_spec_factory: DirWithAppSpecFactory) -> Path:\n    return dir_with_app_spec_factory(cwd, \"app.arc32.json\")\n\n\n@pytest.fixture\ndef arc56_json(cwd: Path, dir_with_app_spec_factory: DirWithAppSpecFactory) -> Path:\n    return dir_with_app_spec_factory(cwd, \"app.arc56.json\")\n\n\n@pytest.fixture(autouse=True)\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    which_mock.add(\"npx\")\n    which_mock.add(\"npm\")\n    which_mock.add(\"pipx\")\n    mocker.patch(\"algokit.core.typed_client_generation.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\ndef test_generate_help() -> None:\n    result = invoke(\"generate -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_no_options(application_json: Path) -> None:\n    result = invoke(\"generate client .\", cwd=application_json.parent)\n    assert result.exit_code != 0\n    verify(result.output)\n\n\n@pytest.mark.parametrize(\n    (\"options\", \"expected_output_path\"),\n    [\n        (\"-o client.py\", \"client.py\"),\n        (\"--output {contract_name}.py\", \"hello_world_app.py\"),\n        (\"-l python\", \"hello_world_app_client.py\"),\n        (\"-o client.ts --language python\", \"client.ts\"),\n        (\"-o client.py --language python --version 1.1.2\", \"client.py\"),\n        (\"-l python -v 1.1.0\", \"hello_world_app_client.py\"),\n        (\"-o client.py -p --mode minimal\", \"client.py\"),\n    ],\n)\ndef test_generate_client_python(\n    proc_mock: ProcMock,\n    application_json: Path,\n    options: str,\n    expected_output_path: Path,\n    request: pytest.FixtureRequest,\n) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    proc_mock.should_bad_exit_on([\"pipx\", \"list\", \"--short\"])\n\n    result = invoke(f\"generate client {application_json.name} {options}\", cwd=application_json.parent)\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output),\n        namer=PyTestNamer(request),\n        options=NamerFactory.with_parameters(*options.split()),\n    )\n    version = options.split()[-1] if \"--version\" in options or \"-v\" in options else None\n    assert len(proc_mock.called) == 4  # noqa: PLR2004\n    assert \" \".join(proc_mock.called[3].command).startswith(\n        _get_python_generate_command(version, application_json, expected_output_path)\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_python_generator_is_installed_in_project(application_json: Path, proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\n        [\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"],\n        output=[f\"{PYTHON_PYPI_PACKAGE} 1.1.2 Algorand typed client Generator\", \"└── algokit-utils 2.2.1\"],\n    )\n\n    result = invoke(f\"generate client -o client.py -l python {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_python_generator_is_installed_globally(application_json: Path, proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    proc_mock.set_output(\n        [\"pipx\", \"list\", \"--short\"],\n        output=[\"algokit 1.13.0\", \"poetry 1.6.1\", f\"{PYTHON_PYPI_PACKAGE} 1.1.2\"],\n    )\n\n    result = invoke(f\"generate client -o client.py -l python {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_python_generator_version_is_not_installed_anywhere(application_json: Path, proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\n        [\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"],\n        output=[f\"{PYTHON_PYPI_PACKAGE} 1.1.2 Algorand typed client Generator\", \"└── algokit-utils 2.2.1\"],\n    )\n    proc_mock.set_output(\n        [\"pipx\", \"list\", \"--short\"],\n        output=[\"algokit 1.13.0\", \"poetry 1.6.1\", f\"{PYTHON_PYPI_PACKAGE} 1.1.2\"],\n    )\n\n    result = invoke(\n        f\"generate client --version 1.2.0 -o client.py -l python {application_json.name}\", cwd=application_json.parent\n    )\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_pipx_missing(application_json: Path, mocker: MockerFixture, proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    mocker.patch(\"algokit.core.utils.get_candidate_pipx_commands\", return_value=[])\n    result = invoke(f\"generate client -o client.py -l python {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 1\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.parametrize(\n    (\"options\", \"expected_output_path\"),\n    [\n        (\"-o client.py\", \"client.py\"),\n    ],\n)\ndef test_generate_client_python_arc32_filename(\n    proc_mock: ProcMock, arc32_json: Path, options: str, expected_output_path: Path\n) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    proc_mock.should_bad_exit_on([\"pipx\", \"list\", \"--short\"])\n\n    result = invoke(f\"generate client {options} {arc32_json.name}\", cwd=arc32_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), options=NamerFactory.with_parameters(*options.split()))\n    assert len(proc_mock.called) == 4  # noqa: PLR2004\n    assert proc_mock.called[3].command == _get_python_generate_command(None, arc32_json, expected_output_path).split()\n\n\n@pytest.mark.parametrize(\n    (\"options\", \"expected_output_path\"),\n    [\n        (\"-o client.py\", \"client.py\"),\n    ],\n)\ndef test_generate_client_python_arc56_filename(\n    proc_mock: ProcMock,\n    arc56_json: Path,\n    options: str,\n    expected_output_path: Path,\n) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    proc_mock.should_bad_exit_on([\"pipx\", \"list\", \"--short\"])\n\n    result = invoke(f\"generate client {options} {arc56_json.name}\", cwd=arc56_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), options=NamerFactory.with_parameters(*options.split()))\n    assert len(proc_mock.called) == 4  # noqa: PLR2004\n    assert proc_mock.called[3].command == _get_python_generate_command(None, arc56_json, expected_output_path).split()\n\n\n@pytest.mark.parametrize(\n    (\"options\", \"expected_output_path\"),\n    [\n        (\"-o client.py\", \"client.py\"),\n    ],\n)\ndef test_generate_client_python_multiple_app_specs_in_directory(\n    proc_mock: ProcMock,\n    arc56_json: Path,\n    arc32_json: Path,\n    application_json: Path,\n    options: str,\n    expected_output_path: Path,\n) -> None:\n    proc_mock.should_bad_exit_on([\"poetry\", \"show\", PYTHON_PYPI_PACKAGE, \"--tree\"])\n    proc_mock.should_bad_exit_on([\"pipx\", \"list\", \"--short\"])\n\n    result = invoke(f\"generate client {options} .\", cwd=arc56_json.parent)\n\n    # Confirm multiple app specs are in the input directory\n    assert arc32_json.parent == arc56_json.parent\n    assert application_json.parent == arc56_json.parent\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), options=NamerFactory.with_parameters(*options.split()))\n    # only a single generate call is made for the arc56 app spec\n    assert len(proc_mock.called) == 4  # noqa: PLR2004\n    assert proc_mock.called[3].command == _get_python_generate_command(None, arc56_json, expected_output_path).split()\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\n@pytest.mark.parametrize(\n    (\"options\", \"expected_output_path\"),\n    [\n        (\"-o client.ts\", \"client.ts\"),\n        (\"--output {contract_name}.ts\", \"HelloWorldApp.ts\"),\n        (\"-l typescript\", \"HelloWorldAppClient.ts\"),\n        (\"-o client.py --language typescript\", \"client.py\"),\n        (\"-o client.ts --language typescript --version 3.0.0\", \"client.ts\"),\n        (\"-l typescript -v 2.6.0\", \"HelloWorldAppClient.ts\"),\n        (\"-o client.ts -pn --mode minimal\", \"client.ts\"),\n    ],\n)\ndef test_generate_client_typescript(\n    proc_mock: ProcMock,\n    application_json: Path,\n    options: str,\n    expected_output_path: Path,\n    request: pytest.FixtureRequest,\n) -> None:\n    npm_command = _get_npm_command()\n    proc_mock.should_bad_exit_on([npm_command, \"ls\"])\n    proc_mock.should_bad_exit_on([npm_command, \"ls\", \"--global\"])\n\n    result = invoke(f\"generate client {application_json.name} {options}\", cwd=application_json.parent)\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output),\n        namer=PyTestNamer(request),\n        options=NamerFactory.with_parameters(*options.split()),\n    )\n    version = options.split()[-1] if \"--version\" in options or \"-v\" in options else \"latest\"\n    assert len(proc_mock.called) == 3  # noqa: PLR2004\n    assert \" \".join(proc_mock.called[2].command).startswith(\n        _get_typescript_generate_command(version, application_json, expected_output_path)\n    )\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\ndef test_typescript_generator_is_installed_in_project(\n    application_json: Path, proc_mock: ProcMock, request: pytest.FixtureRequest\n) -> None:\n    proc_mock.set_output(\n        [_get_npm_command(), \"ls\"],\n        output=[\"/Users/user/my-project\", \"├── test@1.2.3\", f\"└── {TYPESCRIPT_NPM_PACKAGE}@1.1.2\"],\n    )\n\n    result = invoke(f\"generate client -o client.py -l typescript {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\ndef test_typescript_generator_is_installed_globally(\n    application_json: Path, proc_mock: ProcMock, request: pytest.FixtureRequest\n) -> None:\n    proc_mock.should_bad_exit_on([_get_npm_command(), \"ls\"])\n    proc_mock.set_output(\n        [_get_npm_command(), \"--global\", \"ls\"],\n        output=[\"/Users/user/.nvm/versions/node/v20.11.0/lib\", \"├── test@1.2.3\", f\"└── {TYPESCRIPT_NPM_PACKAGE}@1.1.2\"],\n    )\n\n    result = invoke(f\"generate client -o client.py -l typescript {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\ndef test_typescript_generator_version_is_not_installed_anywhere(\n    application_json: Path, proc_mock: ProcMock, request: pytest.FixtureRequest\n) -> None:\n    proc_mock.set_output(\n        [_get_npm_command(), \"ls\"],\n        output=[\"/Users/user/my-project\", \"├── test@1.2.3\", f\"└── {TYPESCRIPT_NPM_PACKAGE}@1.1.2\"],\n    )\n    proc_mock.set_output(\n        [_get_npm_command(), \"--global\", \"ls\"],\n        output=[\"/Users/user/.nvm/versions/node/v20.11.0/lib\", \"├── test@1.2.3\", f\"└── {TYPESCRIPT_NPM_PACKAGE}@1.1.2\"],\n    )\n\n    result = invoke(\n        f\"generate client --version 1.2.0 -o client.py -l typescript {application_json.name}\",\n        cwd=application_json.parent,\n    )\n\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_npx_missing(application_json: Path, which_mock: WhichMock) -> None:\n    which_mock.remove(\"npx\")\n    result = invoke(f\"generate client -o client.ts {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == 1\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\ndef test_npx_failed(\n    proc_mock: ProcMock,\n    application_json: Path,\n    request: pytest.FixtureRequest,\n) -> None:\n    proc_mock.should_bad_exit_on(_get_typescript_generate_command(\"latest\", application_json, Path(\"client.ts\")))\n    result = invoke(f\"generate client -o client.ts {application_json.name}\", cwd=application_json.parent)\n\n    assert result.exit_code == -1\n    verify(\n        _normalize_output(result.output),\n        namer=PyTestNamer(request),\n    )\n\n\ndef test_generate_client_recursive(\n    proc_mock: ProcMock, cwd: Path, dir_with_app_spec_factory: DirWithAppSpecFactory\n) -> None:\n    dir_paths = [\n        cwd / \"dir1\",\n        cwd / \"dir2\",\n        cwd / \"dir2\" / \"sub_dir\",\n    ]\n    for dir_path in dir_paths:\n        dir_with_app_spec_factory(dir_path, \"application.json\")\n\n    result = invoke(\"generate client -o {app_spec_dir}/output.py .\", cwd=cwd)\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output))\n\n    for index, dir_path in enumerate(dir_paths):\n        output_path = dir_path / \"output.py\"\n        proc_mock.called[index].command[-1] = str(output_path)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_generate_client_no_app_spec_found(cwd: Path) -> None:\n    result = invoke(\"generate client -o output.py .\", cwd=cwd)\n    assert result.exit_code == 1\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_generate_client_output_path_is_dir(application_json: Path) -> None:\n    cwd = application_json.parent\n    (cwd / \"hello_world_app.py\").mkdir()\n\n    result = invoke(\"generate client -o {contract_name}.py .\", cwd=cwd)\n    assert result.exit_code == 0\n    verify(_normalize_output(result.output))\n\n\ndef test_snake_case() -> None:\n    assert _snake_case(\"SnakeCase\") == \"snake_case\"\n    assert _snake_case(\"snakeCase\") == \"snake_case\"\n    assert _snake_case(\"snake-case\") == \"snake_case\"\n    assert _snake_case(\"snake_case\") == \"snake_case\"\n    assert _snake_case(\"SNAKE_CASE\") == \"snake_case\"\n    assert _snake_case(\"Snake_Case\") == \"snake_case\"\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_no_app_spec_found.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nError: No app specs found\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_output_path_is_dir.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nERROR: Could not output to hello_world_app.py as it already exists and is a directory\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[--output {contract_name}.py-hello_world_app.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to hello_world_app.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/application.json -o hello_world_app.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-l python -v 1.1.0-hello_world_app_client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to hello_world_app_client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator==1.1.0 algokitgen-py -a {current_working_directory}/application.json -o hello_world_app_client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-l python-hello_world_app_client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to hello_world_app_client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/application.json -o hello_world_app_client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-o client.py --language python --version 1.1.2-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator==1.1.2 algokitgen-py -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-o client.py -p --mode minimal-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/application.json -o client.py -p --mode minimal' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-o client.py-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python[-o client.ts --language python-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python_arc32_filename.-o.client.py.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/app.arc32.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/app.arc32.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python_arc56_filename.-o.client.py.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/app.arc56.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/app.arc56.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_python_multiple_app_specs_in_directory.-o.client.py.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/app.arc56.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator algokitgen-py -a {current_working_directory}/app.arc56.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_recursive.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nGenerating Python client code for application specified in {current_working_directory}/dir1/application.json and writing to {current_working_directory}/dir1/output.py\nDEBUG: Running 'poetry run algokitgen-py -a {current_working_directory}/dir1/application.json -o {current_working_directory}/dir1/output.py' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nSTDOUT\nSTDERR\nGenerating Python client code for application specified in {current_working_directory}/dir2/application.json and writing to {current_working_directory}/dir2/output.py\nDEBUG: Running 'poetry run algokitgen-py -a {current_working_directory}/dir2/application.json -o {current_working_directory}/dir2/output.py' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nSTDOUT\nSTDERR\nGenerating Python client code for application specified in {current_working_directory}/dir2/sub_dir/application.json and writing to {current_working_directory}/dir2/sub_dir/output.py\nDEBUG: Running 'poetry run algokitgen-py -a {current_working_directory}/dir2/sub_dir/application.json -o {current_working_directory}/dir2/sub_dir/output.py' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux---output {contract_name}.ts-HelloWorldApp.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldApp.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldApp.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--l typescript -v 2.6.0-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@2.6.0 generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--l typescript-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--o client.py --language typescript-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--o client.ts --language typescript --version 3.0.0-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@3.0.0 generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--o client.ts -pn --mode minimal-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts -pn --mode minimal' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[linux--o client.ts-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS---output {contract_name}.ts-HelloWorldApp.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldApp.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldApp.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--l typescript -v 2.6.0-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@2.6.0 generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--l typescript-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--o client.py --language typescript-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--o client.ts --language typescript --version 3.0.0-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@3.0.0 generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--o client.ts -pn --mode minimal-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts -pn --mode minimal' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[macOS--o client.ts-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows---output {contract_name}.ts-HelloWorldApp.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldApp.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldApp.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--l typescript -v 2.6.0-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@2.6.0 generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--l typescript-HelloWorldAppClient.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to HelloWorldAppClient.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o HelloWorldAppClient.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--o client.py --language typescript-client.py].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--o client.ts --language typescript --version 3.0.0-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@3.0.0 generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--o client.ts -pn --mode minimal-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts -pn --mode minimal' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_client_typescript[windows--o client.ts-client.ts].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_help.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client  Create a typed ApplicationClient from an ARC-32/56 application.json\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_generate_no_options.approved.txt",
    "content": "Error: One of --language or --output is required to determine the client language to generate\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_npx_failed[linux].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\nClient generation failed for {current_working_directory}/application.json.\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_npx_failed[macOS].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\nClient generation failed for {current_working_directory}/application.json.\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_npx_failed[windows].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.ts\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@latest generate -a {current_working_directory}/application.json -o client.ts' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\nClient generation failed for {current_working_directory}/application.json.\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_npx_missing.approved.txt",
    "content": "Error: Unable to find npx install so that the `@algorandfoundation/algokit-client-generator` can be run; please install npx via https://www.npmjs.com/package/npx and then try `algokit generate client ...` again.\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_pipx_missing.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nError: Unable to find pipx install so that the `algokit-client-generator` can be run; please install pipx via https://pypa.github.io/pipx/ and then try `algokit generate client ...` again.\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_python_generator_is_installed_globally.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: algokit 1.13.0\nDEBUG: pipx: poetry 1.6.1\nDEBUG: pipx: algokit-client-generator 1.1.2\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'algokitgen-py -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: algokitgen-py: STDOUT\nDEBUG: algokitgen-py: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_python_generator_is_installed_in_project.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: algokit-client-generator 1.1.2 Algorand typed client Generator\nDEBUG: poetry: └── algokit-utils 2.2.1\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'poetry run algokitgen-py -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_python_generator_version_is_not_installed_anywhere.approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'poetry show algokit-client-generator --tree' in '{current_working_directory}'\nDEBUG: poetry: algokit-client-generator 1.1.2 Algorand typed client Generator\nDEBUG: poetry: └── algokit-utils 2.2.1\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'pipx list --short' in '{current_working_directory}'\nDEBUG: pipx: algokit 1.13.0\nDEBUG: pipx: poetry 1.6.1\nDEBUG: pipx: algokit-client-generator 1.1.2\nDEBUG: No matching installed client generator found, run client generator via pipx\nGenerating Python client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'pipx run --spec=algokit-client-generator==1.2.0 algokitgen-py -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_globally[linux].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_globally[macOS].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: STDOUT\nDEBUG: npm: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_globally[windows].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: STDOUT\nDEBUG: npm.cmd: STDERR\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm.cmd: ├── test@1.2.3\nDEBUG: npm.cmd: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx.cmd @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_in_project[linux].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/my-project\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_in_project[macOS].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/my-project\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_is_installed_in_project[windows].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: /Users/user/my-project\nDEBUG: npm.cmd: ├── test@1.2.3\nDEBUG: npm.cmd: └── @algorandfoundation/algokit-client-generator@1.1.2\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx.cmd @algorandfoundation/algokit-client-generator generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_version_is_not_installed_anywhere[linux].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/my-project\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@1.2.0 generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_version_is_not_installed_anywhere[macOS].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/my-project\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm: ├── test@1.2.3\nDEBUG: npm: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx --yes @algorandfoundation/algokit-client-generator@1.2.0 generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx: STDOUT\nDEBUG: npx: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_client.test_typescript_generator_version_is_not_installed_anywhere[windows].approved.txt",
    "content": "DEBUG: Searching for project installed client generator\nDEBUG: Running 'npm.cmd ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: /Users/user/my-project\nDEBUG: npm.cmd: ├── test@1.2.3\nDEBUG: npm.cmd: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: Searching for globally installed client generator\nDEBUG: Running 'npm.cmd --global ls --no-unicode' in '{current_working_directory}'\nDEBUG: npm.cmd: /Users/user/.nvm/versions/node/v20.11.0/lib\nDEBUG: npm.cmd: ├── test@1.2.3\nDEBUG: npm.cmd: └── @algorandfoundation/algokit-client-generator@1.1.2\nDEBUG: No matching installed client generator found, run client generator via npx\nGenerating TypeScript client code for application specified in {current_working_directory}/application.json and writing to client.py\nDEBUG: Running 'npx.cmd --yes @algorandfoundation/algokit-client-generator@1.2.0 generate -a {current_working_directory}/application.json -o client.py' in '{current_working_directory}'\nDEBUG: npx.cmd: STDOUT\nDEBUG: npx.cmd: STDERR\nSTDOUT\nSTDERR\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.py",
    "content": "from collections.abc import Callable\nfrom pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.conf import ALGOKIT_CONFIG\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.which_mock import WhichMock\n\nDirWithAppSpecFactory = Callable[[Path], Path]\n\n\n@pytest.fixture\ndef cwd_with_custom_folder(tmp_path_factory: TempPathFactory) -> tuple[Path, str]:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"smart_contract\").mkdir()\n    # Required for windows compatibility\n    return cwd, str((cwd / \"smart_contract\").absolute()).replace(\"\\\\\", r\"\\\\\")\n\n\n@pytest.fixture\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    which_mock.add(\"git\")\n    mocker.patch(\"algokit.cli.generate.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\ndef test_generate_custom_generate_commands_no_toml(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\"generate\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_custom_generate_commands_invalid_generic_generator(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    (cwd / ALGOKIT_CONFIG).write_text(\n        \"\"\"\n[generate]\ndescription = \"invalid\"\npath = \"invalid\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_custom_generate_commands_valid_generator(\n    cwd_with_custom_folder: tuple[Path, str],\n) -> None:\n    cwd, smart_contract_path = cwd_with_custom_folder\n    (cwd / ALGOKIT_CONFIG).write_text(\n        f\"\"\"\n[generate.smart_contract]\ndescription = \"Generates a new smart contract\"\npath = \"{smart_contract_path}\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_custom_generate_command_missing_git_valid_generator(\n    cwd_with_custom_folder: tuple[Path, str], which_mock: WhichMock\n) -> None:\n    which_mock.remove(\"git\")\n\n    cwd, smart_contract_path = cwd_with_custom_folder\n    (cwd / ALGOKIT_CONFIG).write_text(\n        f\"\"\"\n[generate.smart_contract]\ndescription = \"Generates a new smart contract\"\npath = \"{smart_contract_path}\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate smart-contract\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(\n        result.output,\n    )\n\n\ndef test_generate_custom_generate_commands_valid_generator_run(\n    cwd_with_custom_folder: tuple[Path, str], mocker: MockerFixture\n) -> None:\n    cwd, smart_contract_path = cwd_with_custom_folder\n    (cwd / ALGOKIT_CONFIG).write_text(\n        f\"\"\"\n[generate.smart_contract]\ndescription = \"Generates a new smart contract\"\npath = \"{smart_contract_path}\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    mock_copier_worker_cls = mocker.patch(\"copier._main.Worker\")\n    mock_copier_worker_cls.return_value.__enter__.return_value.src_path = str(cwd / \"smart_contract\")\n\n    result = invoke(\"generate smart-contract\", cwd=cwd, input=\"y\\n\")\n\n    assert result.exit_code == 0\n    assert mock_copier_worker_cls.call_args.kwargs[\"src_path\"] == str(cwd / \"smart_contract\")\n    verify(result.output)\n\n\ndef test_generate_custom_generate_commands_valid_generator_no_description(\n    cwd_with_custom_folder: tuple[Path, str],\n) -> None:\n    cwd, smart_contract_path = cwd_with_custom_folder\n    (cwd / ALGOKIT_CONFIG).write_text(\n        f\"\"\"\n[generate.smart_contract]\npath = \"{smart_contract_path}\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_custom_generate_commands_valid_generator_invalid_path(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(\n        \"\"\"\n[generate.smart_contract]\ndescription = \"Generates a new smart contract\"\npath = \"invalidpath\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_generate_custom_generate_commands_valid_generator_run_with_python_path(\n    dummy_algokit_template_with_python_task: dict[str, Path],\n) -> None:\n    cwd = dummy_algokit_template_with_python_task[\"cwd\"]\n    template_path = str(dummy_algokit_template_with_python_task[\"template_path\"]).replace(\"\\\\\", r\"\\\\\")\n    (cwd / ALGOKIT_CONFIG).write_text(\n        f\"\"\"\n[generate.smart_contract]\ndescription = \"Generates a new smart contract\"\npath = \"{template_path}\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n\n    result = invoke(\"generate smart-contract\", cwd=cwd, input=\"y\\n\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_command_missing_git_valid_generator.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: Git not found; please install git and add to path.\nSee https://github.com/git-guides/install-git for more information.\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_command_no_git_valid_generator.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: Git not found; please install git and add to path.\nSee https://github.com/git-guides/install-git for more information.\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_invalid_generic_generator.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Invalid generator configuration key \"description\" of value \"invalid\", skipping\nDEBUG: Invalid generator configuration key \"path\" of value \"invalid\", skipping\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client  Create a typed ApplicationClient from an ARC-32/56 application.json\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_no_toml.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client  Create a typed ApplicationClient from an ARC-32/56 application.json\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_valid_generator.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client          Create a typed ApplicationClient from an ARC-32/56...\n  smart-contract  Generates a new smart contract\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_valid_generator_invalid_path.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nWARNING: Path 'invalidpath' for generator 'smart_contract' does not exist, skipping\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client  Create a typed ApplicationClient from an ARC-32/56 application.json\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_valid_generator_no_description.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsage: algokit generate [OPTIONS] COMMAND [ARGS]...\n\n  Generate code for an Algorand project.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  client          Create a typed ApplicationClient from an ARC-32/56...\n  smart-contract  Generator command description is not supplied.\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_valid_generator_run.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nYou are about to run a generator. Please make sure it's from a trusted source (for example, official AlgoKit Templates). Do you want to proceed? [y/N]: y\nDEBUG: Running generator in {current_working_directory}/smart_contract\nGenerator {current_working_directory}/smart_contract executed successfully\n"
  },
  {
    "path": "tests/generate/test_generate_custom_generate_commands.test_generate_custom_generate_commands_valid_generator_run_with_python_path.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nYou are about to run a generator. Please make sure it's from a trusted source (for example, official AlgoKit Templates). Do you want to proceed? [y/N]: y\nDEBUG: Running generator in {current_working_directory}/dummy_template\nNo git tags found in template; using HEAD as ref\nGenerator {current_working_directory}/dummy_template executed successfully\n"
  },
  {
    "path": "tests/goal/__init__.py",
    "content": ""
  },
  {
    "path": "tests/goal/test_goal.py",
    "content": "import json\nfrom pathlib import Path\nfrom subprocess import CompletedProcess\n\nimport pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.sandbox import (\n    ALGOD_HEALTH_URL,\n    INDEXER_HEALTH_URL,\n    get_algod_network_template,\n    get_config_json,\n    get_docker_compose_yml,\n)\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\nDUMMY_CONTRACT_TEAL = \"\"\"\\n#pragma version 8\\nint 1\\nreturn\\n\"\"\"\n\n\ndef _normalize_output(output: str) -> str:\n    return output.replace(\"\\\\\", \"/\").replace(\"docker\", \"{container_engine}\").replace(\"podman\", \"{container_engine}\")\n\n\n@pytest.fixture\ndef _health_success(httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_response(url=ALGOD_HEALTH_URL)\n    httpx_mock.add_response(url=INDEXER_HEALTH_URL)\n\n\n@pytest.fixture\ndef cwd(tmp_path_factory: pytest.TempPathFactory) -> Path:\n    return tmp_path_factory.mktemp(\"cwd\")\n\n\n@pytest.fixture\ndef mocked_goal_mount_path(cwd: Path, monkeypatch: pytest.MonkeyPatch) -> Path:\n    mocked_goal_mount = cwd / \"goal_mount\"\n    mocked_goal_mount.mkdir()\n    monkeypatch.setattr(\"algokit.cli.goal.get_volume_mount_path_local\", lambda directory_name: cwd / \"goal_mount\")  # noqa: ARG005\n    return mocked_goal_mount\n\n\n@pytest.fixture\ndef _setup_latest_dummy_compose(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(get_docker_compose_yml())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(get_algod_network_template())\n\n\n@pytest.fixture\ndef _setup_input_files(cwd: Path, request: pytest.FixtureRequest) -> None:\n    files = request.param\n    for file in files:\n        if \"name\" in file:\n            if \"content\" in file:\n                (cwd / file[\"name\"]).write_text(file[\"content\"], encoding=\"utf-8\")\n            else:\n                (cwd / file[\"name\"]).touch()\n\n            assert (cwd / file[\"name\"]).exists()\n\n\n@pytest.fixture\ndef _mock_proc_with_running_localnet(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\"docker compose ls --format json --filter name=algokit_sandbox*\", [json.dumps([])])\n\n\n@pytest.fixture\ndef _mock_proc_with_algod_running_state(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\n        cmd=[\"docker\", \"compose\", \"ps\", \"algod\", \"--format\", \"json\"],\n        output=[json.dumps([{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}])],\n    )\n\n\ndef dump_file(cwd: Path) -> None:\n    (cwd / \"approval.compiled\").write_text(\n        \"\"\"\nI AM COMPILED!\n\"\"\",\n        encoding=\"utf-8\",\n    )\n\n\ndef dump_json_file(cwd: Path) -> None:\n    (cwd / \"balance_record.json\").write_text(\n        \"\"\"\nI AM COMPILED!\n\"\"\",\n        encoding=\"utf-8\",\n    )\n\n\ndef test_goal_help() -> None:\n    result = invoke(\"goal -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"_setup_latest_dummy_compose\",\n    \"mocked_goal_mount_path\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_no_args(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"goal\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_console(mocker: MockerFixture, app_dir_mock: AppDirs) -> None:\n    mocker.patch(\"algokit.core.proc.subprocess_run\").return_value = CompletedProcess(\n        [\"docker\", \"exec\"], 0, \"STDOUT+STDERR\"\n    )\n\n    result = invoke(\"goal --console\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\"_setup_latest_dummy_compose\", \"_mock_proc_with_running_localnet\", \"_health_success\")\ndef test_goal_console_algod_not_created(app_dir_mock: AppDirs, proc_mock: ProcMock, mocker: MockerFixture) -> None:\n    proc_mock.set_output([\"docker\", \"compose\", \"ps\", \"algod\", \"--format\", \"json\"], output=[json.dumps([])])\n\n    mocker.patch(\"algokit.core.proc.subprocess_run\").return_value = CompletedProcess(\n        [\"docker\", \"exec\"], 0, \"STDOUT+STDERR\"\n    )\n\n    result = invoke(\"goal --console\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_console_failed(app_dir_mock: AppDirs, mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.proc.subprocess_run\").return_value = CompletedProcess(\n        [\"docker\", \"exec\"], 1, \"STDOUT+STDERR\"\n    )\n\n    result = invoke(\"goal --console\")\n\n    assert result.exit_code == 1\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"_setup_latest_dummy_compose\",\n    \"mocked_goal_mount_path\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_simple_args(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"goal account list\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"_setup_latest_dummy_compose\",\n    \"mocked_goal_mount_path\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_complex_args(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"goal account export -a RKTAZY2ZLKUJBHDVVA3KKHEDK7PRVGIGOZAUUIZBNK2OEP6KQGEXKKUYUY\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\ndef test_goal_start_without_docker(proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"docker version\")\n\n    result = invoke(\"goal\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_goal_start_without_docker_engine_running(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker version\")\n\n    result = invoke(\"goal\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"mocked_goal_mount_path\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\"_setup_input_files\", [[{\"name\": \"transactions.txt\"}]], indirect=True)\ndef test_goal_simple_args_with_input_file(\n    proc_mock: ProcMock,\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"group\",\n    ]\n\n    proc_mock.set_output(expected_arguments, output=[\"File compiled\"])\n    result = invoke(\"goal clerk group transactions.txt\", cwd=cwd)\n\n    # Check if the path in command has changed in preprocess step\n    assert _normalize_output(proc_mock.called[3].command[9]) == \"/root/goal_mount/transactions.txt\"\n\n    # Check for the result status\n    assert result.exit_code == 0\n\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"mocked_goal_mount_path\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_simple_args_with_output_file(proc_mock: ProcMock, cwd: Path, app_dir_mock: AppDirs) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"account\",\n        \"dump\",\n    ]\n\n    proc_mock.set_output(\n        expected_arguments,\n        output=[\"File compiled\"],\n        side_effect=dump_json_file,\n        side_effect_args={\"cwd\": cwd},\n    )\n    result = invoke(\"goal account dump -o balance_record.json\")\n\n    # Check if the path in command has changed in preprocess step\n    assert _normalize_output(proc_mock.called[3].command[10]) == \"/root/goal_mount/balance_record.json\"\n\n    # Check for the result status\n    assert result.exit_code == 0\n\n    # Check if the output file is actually created and copied in cwd in postprocess step\n    assert (cwd / \"balance_record.json\").exists()\n\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"mocked_goal_mount_path\",\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\n    \"_setup_input_files\", [[{\"name\": \"approval.teal\", \"content\": DUMMY_CONTRACT_TEAL}]], indirect=True\n)\ndef test_goal_simple_args_with_input_output_files(\n    proc_mock: ProcMock,\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"compile\",\n    ]\n\n    proc_mock.set_output(\n        expected_arguments, output=[\"File compiled\"], side_effect=dump_file, side_effect_args={\"cwd\": cwd}\n    )\n\n    result = invoke(\"goal clerk compile approval.teal -o approval.compiled\", cwd=cwd)\n\n    # Check if the paths in command have changed in preprocess step\n    assert _normalize_output(proc_mock.called[3].command[9]) == \"/root/goal_mount/approval.teal\"\n    assert _normalize_output(proc_mock.called[3].command[11]) == \"/root/goal_mount/approval.compiled\"\n\n    # Check for the result status\n    assert result.exit_code == 0\n\n    # Check if the output file is created and copied in cwd in postprocess step\n    assert (cwd / \"approval.compiled\").exists()\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"mocked_goal_mount_path\",\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\n    \"_setup_input_files\",\n    [\n        [\n            {\"name\": \"approval1.teal\", \"content\": DUMMY_CONTRACT_TEAL},\n            {\"name\": \"approval2.teal\", \"content\": DUMMY_CONTRACT_TEAL},\n        ]\n    ],\n    indirect=True,\n)\ndef test_goal_simple_args_with_multiple_input_output_files(\n    proc_mock: ProcMock,\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"compile\",\n    ]\n\n    proc_mock.set_output(\n        expected_arguments, output=[\"File compiled\"], side_effect=dump_file, side_effect_args={\"cwd\": cwd}\n    )\n    result = invoke(\"goal clerk compile approval1.teal approval2.teal -o approval.compiled\", cwd=cwd)\n\n    # Check if the paths in command have changed in preprocess step\n    assert _normalize_output(proc_mock.called[3].command[9]) == \"/root/goal_mount/approval1.teal\"\n    assert _normalize_output(proc_mock.called[3].command[10]) == \"/root/goal_mount/approval2.teal\"\n    assert _normalize_output(proc_mock.called[3].command[12]) == \"/root/goal_mount/approval.compiled\"\n\n    # Check for the result\n    assert result.exit_code == 0\n\n    # Check if the output file is actually created and copied in cwd in postprocess step\n    assert (cwd / \"approval.compiled\").exists()\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\",\n    \"mocked_goal_mount_path\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\ndef test_goal_simple_args_without_file_error(\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    assert not (cwd / \"approval.teal\").exists()\n    result = invoke(\"goal clerk compile approval.teal -o approval.compiled\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\n    \"_setup_input_files\", [[{\"name\": \"approval.teal\", \"content\": DUMMY_CONTRACT_TEAL}]], indirect=True\n)\ndef test_goal_postprocess_of_command_args(\n    proc_mock: ProcMock,\n    cwd: Path,\n    mocked_goal_mount_path: Path,\n) -> None:\n    # adding some dummy files to the mocked_goal_mount_path\n    (mocked_goal_mount_path / \"approval.group\").touch()\n    (mocked_goal_mount_path / \"approval.group.sig\").touch()\n    (mocked_goal_mount_path / \"approval.group.sig.out\").touch()\n\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"compile\",\n    ]\n    proc_mock.set_output(\n        expected_arguments,\n        output=[\"File compiled\"],\n        side_effect=dump_file,\n        side_effect_args={\"cwd\": mocked_goal_mount_path},\n    )\n\n    result = invoke(\"goal clerk compile approval.teal -o approval.compiled\", cwd=cwd)\n    assert result.exit_code == 0\n\n    # check if the output files are no longer in the goal_mount_path\n    assert not (mocked_goal_mount_path / \"approval.compiled\").exists()\n\n    # check if the input/output file is in the cwd\n    assert (cwd / \"approval.compiled\").exists()\n    assert (cwd / \"approval.teal\").exists()\n\n    # check if the dummy files are still there\n    assert (mocked_goal_mount_path / \"approval.group\").exists()\n    assert (mocked_goal_mount_path / \"approval.group.sig\").exists()\n    assert (mocked_goal_mount_path / \"approval.group.sig.out\").exists()\n\n\n@pytest.mark.usefixtures(\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\"_setup_input_files\", [[{\"name\": \"group.gtxn\", \"content\": \"\"}]], indirect=True)\ndef test_goal_postprocess_of_single_output_arg_resulting_in_multiple_output_files(\n    proc_mock: ProcMock,\n    cwd: Path,\n    mocked_goal_mount_path: Path,\n) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"split\",\n    ]\n\n    def dump_files(cwd: Path) -> None:\n        (cwd / \"group-0.txn\").touch()\n        (cwd / \"group-1.txn\").touch()\n\n    proc_mock.set_output(\n        expected_arguments,\n        output=[\"Wrote transaction\"],\n        side_effect=dump_files,\n        side_effect_args={\"cwd\": mocked_goal_mount_path},\n    )\n\n    result = invoke(\"goal clerk split -i group.gtxn -o group.txn\", cwd=cwd)\n    assert result.exit_code == 0\n\n    # check if the output files are no longer in the goal_mount_path\n    assert not (mocked_goal_mount_path / \"group-0.txn\").exists()\n    assert not (mocked_goal_mount_path / \"group-1.txn\").exists()\n\n    # check if the input/output file is in the cwd\n    assert (cwd / \"group.gtxn\").exists()\n    assert (cwd / \"group-0.txn\").exists()\n    assert (cwd / \"group-1.txn\").exists()\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_mock_proc_with_running_localnet\")\ndef test_goal_compose_outdated(\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"Outdated\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(\"Outdated\")\n\n    result = invoke(\"goal help\", cwd=cwd)\n\n    assert result.exit_code == 1\n\n    verify(_normalize_output(result.output))\n\n\n@pytest.mark.usefixtures(\n    \"_setup_latest_dummy_compose\",\n    \"mocked_goal_mount_path\",\n    \"_mock_proc_with_algod_running_state\",\n    \"_mock_proc_with_running_localnet\",\n)\ndef test_goal_simple_args_on_named_localnet(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"goal account list\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n\n\n@pytest.mark.usefixtures(\n    \"mocked_goal_mount_path\",\n    \"_setup_input_files\",\n    \"_setup_latest_dummy_compose\",\n    \"_mock_proc_with_running_localnet\",\n    \"_mock_proc_with_algod_running_state\",\n)\n@pytest.mark.parametrize(\n    \"_setup_input_files\", [[{\"name\": \"contract.approval.teal\", \"content\": DUMMY_CONTRACT_TEAL}]], indirect=True\n)\ndef test_goal_simple_args_with_input_output_files_with_dot_convention_name(\n    proc_mock: ProcMock,\n    cwd: Path,\n    app_dir_mock: AppDirs,\n) -> None:\n    expected_arguments = [\n        \"docker\",\n        \"exec\",\n        \"--interactive\",\n        \"--workdir\",\n        \"/root\",\n        \"algokit_sandbox_algod\",\n        \"goal\",\n        \"clerk\",\n        \"compile\",\n    ]\n\n    proc_mock.set_output(\n        expected_arguments, output=[\"File compiled\"], side_effect=dump_file, side_effect_args={\"cwd\": cwd}\n    )\n\n    result = invoke(\"goal clerk compile contract.approval.teal -o approval.compiled\", cwd=cwd)\n\n    # Check if the paths in command have changed in preprocess step\n    assert _normalize_output(proc_mock.called[3].command[9]) == \"/root/goal_mount/contract.approval.teal\"\n    assert _normalize_output(proc_mock.called[3].command[11]) == \"/root/goal_mount/approval.compiled\"\n\n    # Check for the result status\n    assert result.exit_code == 0\n\n    # Check if the output file is created and copied in cwd in postprocess step\n    assert (cwd / \"approval.compiled\").exists()\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_complex_args.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal account export -a RKTAZY2ZLKUJBHDVVA3KKHEDK7PRVGIGOZAUUIZBNK2OEP6KQGEXKKUYUY' in '{current_working_directory}'\n STDOUT\n STDERR\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_compose_outdated.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nError: LocalNet definition is out of date; please run `algokit localnet reset` first!\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_console.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nOpening Bash console on the algod node; execute `exit` to return to original console\nDEBUG: Running '{container_engine} exec -it -w /root algokit_sandbox_algod bash' in '{current_working_directory}'\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_console_algod_not_created.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: []\nLocalNet isn't running\nStarting AlgoKit LocalNet now...\nDEBUG: Running '{container_engine} compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\n{container_engine}: STDOUT\n{container_engine}: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\nOpening Bash console on the algod node; execute `exit` to return to original console\nDEBUG: Running '{container_engine} exec -it -w /root algokit_sandbox_algod bash' in '{current_working_directory}'\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_console_failed.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nOpening Bash console on the algod node; execute `exit` to return to original console\nDEBUG: Running '{container_engine} exec -it -w /root algokit_sandbox_algod bash' in '{current_working_directory}'\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_help.approved.txt",
    "content": "Usage: algokit goal [OPTIONS] [GOAL_ARGS]...\n\n  Run the Algorand goal CLI against the AlgoKit LocalNet.\n\n  Look at https://dev.algorand.co/algokit/algokit-cli/goal for more information.\n\nOptions:\n  --console      Open a Bash console so you can execute multiple goal commands\n                 and/or interact with a filesystem.\n  --interactive  Force running the goal command in interactive mode.\n  -h, --help     Show this message and exit.\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_no_args.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal' in '{current_working_directory}'\n STDOUT\n STDERR\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal account list' in '{current_working_directory}'\n STDOUT\n STDERR\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_on_named_localnet.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal account list' in '{current_working_directory}'\n STDOUT\n STDERR\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_with_input_file.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal clerk group /root/goal_mount/transactions.txt' in '{current_working_directory}'\n File compiled\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_with_input_output_files.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal clerk compile /root/goal_mount/approval.teal -o /root/goal_mount/approval.compiled' in '{current_working_directory}'\n File compiled\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_with_input_output_files_with_dot_convention_name.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal clerk compile /root/goal_mount/contract.approval.teal -o /root/goal_mount/approval.compiled' in '{current_working_directory}'\n File compiled\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_with_multiple_input_output_files.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal clerk compile /root/goal_mount/approval1.teal /root/goal_mount/approval2.teal -o /root/goal_mount/approval.compiled' in '{current_working_directory}'\n File compiled\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_with_output_file.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nDEBUG: Running '{container_engine} exec --interactive --workdir /root algokit_sandbox_algod goal account dump -o /root/goal_mount/balance_record.json' in '{current_working_directory}'\n File compiled\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_simple_args_without_file_error.approved.txt",
    "content": "DEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: []\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nERROR: approval.teal does not exist.\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_start_without_docker.approved.txt",
    "content": "DEBUG: Running 'docker version' in '{current_working_directory}'\nError: docker not found; please install docker and add to path.\nSee https://www.docker.com/get-started/ for more information.\n"
  },
  {
    "path": "tests/goal/test_goal.test_goal_start_without_docker_engine_running.approved.txt",
    "content": "DEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: docker engine isn't running; please start it.\n"
  },
  {
    "path": "tests/init/__init__.py",
    "content": ""
  },
  {
    "path": "tests/init/example/__init__.py",
    "content": ""
  },
  {
    "path": "tests/init/example/test_example.py",
    "content": "import shutil\nfrom pathlib import Path\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.which_mock import WhichMock\n\nMOCK_EXAMPLES = [\n    {\"id\": \"react-vite\", \"name\": \"React Vite\", \"type\": \"frontend\"},\n    {\"id\": \"python-smart-contract\", \"name\": \"Python Smart Contract\", \"type\": \"contract\"},\n]\n# Define constants relative to the mocked Path.home() which will be tmp_path\nMOCK_USER_DIR_NAME = \".algokit\"\nMOCK_TEMPLATES_DIR_NAME = \"templates\"\nMOCK_EXAMPLES_DIR_NAME = \"examples\"\nMOCK_EXAMPLES_CONFIG_NAME = \"examples.yml\"\n\n\ndef get_mock_user_dir(home_path: Path) -> Path:\n    return home_path / MOCK_USER_DIR_NAME\n\n\ndef get_mock_templates_dir(home_path: Path) -> Path:\n    return get_mock_user_dir(home_path) / MOCK_TEMPLATES_DIR_NAME\n\n\ndef get_mock_examples_dir(home_path: Path) -> Path:\n    return get_mock_templates_dir(home_path) / MOCK_EXAMPLES_DIR_NAME\n\n\ndef get_mock_examples_config_path(home_path: Path) -> Path:\n    return get_mock_examples_dir(home_path) / MOCK_EXAMPLES_CONFIG_NAME\n\n\n@pytest.fixture(autouse=True)\ndef _setup_mocks(mocker: MockerFixture, tmp_path: Path) -> MagicMock:\n    \"\"\"Sets up mocks for dependencies used by the example command.\"\"\"\n    mock_home = mocker.patch(\"pathlib.Path.home\", return_value=tmp_path)\n\n    # Define paths based on mocked home\n    mock_examples_dir = get_mock_examples_dir(mock_home())\n    mock_examples_config_path = get_mock_examples_config_path(mock_home())\n\n    # Ensure constants point to mocked locations relative to tmp_path\n    mocker.patch(\"algokit.core.init.ALGOKIT_USER_DIR\", MOCK_USER_DIR_NAME)\n    mocker.patch(\"algokit.cli.init.example.ALGOKIT_USER_DIR\", MOCK_USER_DIR_NAME)\n\n    mocker.patch(\"algokit.core.init.ALGOKIT_TEMPLATES_DIR\", MOCK_TEMPLATES_DIR_NAME)\n    mocker.patch(\"algokit.cli.init.example.ALGOKIT_TEMPLATES_DIR\", MOCK_TEMPLATES_DIR_NAME)\n\n    # Mock _manage_templates_repository\n    mocker.patch(\"algokit.cli.init.example._manage_templates_repository\")\n\n    # Mock _load_algokit_examples to return mock data and check correct path is used\n    def _mock_load_examples(config_path: str) -> list[dict]:\n        assert Path(config_path) == mock_examples_config_path\n        return MOCK_EXAMPLES\n\n    mocker.patch(\"algokit.cli.init.example._load_algokit_examples\", side_effect=_mock_load_examples)\n\n    # Mock _open_ide\n    mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    # Mock shutil.copytree\n    mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n\n    # Mock git availability\n    which_mock = WhichMock()\n    which_mock.add(\"git\")\n    mocker.patch(\"algokit.cli.init.example.shutil.which\").side_effect = which_mock.which\n\n    # Mock ExampleSelector TUI App\n    mock_example_selector_instance = MagicMock()\n    # Configure default behavior (can be overridden per test)\n    mock_example_selector_instance.user_answers = {}\n    mock_example_selector_class = mocker.patch(\"algokit.cli.init.example.ExampleSelector\")\n    mock_example_selector_class.return_value = mock_example_selector_instance\n    # Mock the config path used by the TUI module itself\n    mocker.patch(\n        \"algokit.cli.tui.init.example_selector.examples_config_path\",\n        str(mock_examples_config_path.absolute()),\n    )\n\n    # Create mock source directories within the mocked ~/.algokit/templates/examples\n    for example in MOCK_EXAMPLES:\n        example_src_path = mock_examples_dir / example[\"id\"]\n        example_src_path.mkdir(parents=True, exist_ok=True)\n        (example_src_path / \"readme.md\").touch()  # Add a dummy file\n\n    # Return the mocked selector instance for potential customization in tests\n    return mock_example_selector_instance\n\n\n@pytest.fixture\ndef cwd(tmp_path: Path) -> Path:\n    \"\"\"Provides the temporary working directory for tests.\"\"\"\n    # Use a sub-directory within tmp_path to avoid conflicts with mocked home\n    test_cwd = tmp_path / \"test_cwd\"\n    test_cwd.mkdir()\n    return test_cwd\n\n\ndef test_example_command_help() -> None:\n    result = invoke(\"init example -h\")\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_example_command_with_valid_id(mocker: MockerFixture, cwd: Path) -> None:\n    example_id = \"react-vite\"\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n    home_path = Path.home()\n    expected_src = get_mock_examples_dir(home_path) / example_id\n    expected_dest = cwd / example_id\n\n    result = invoke([\"init\", \"example\", example_id], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert f\"Created example {example_id}\" in result.output\n    mock_copytree.assert_called_once_with(expected_src, expected_dest)\n    mock_open_ide.assert_called_once_with(expected_dest)\n    verify(result.output)\n\n\ndef test_example_command_with_invalid_id(mocker: MockerFixture, cwd: Path) -> None:\n    example_id = \"nonexistent\"\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    result = invoke([\"init\", \"example\", example_id], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert f\"Example {example_id} not found\" in result.output\n    assert \"Available example ids:\" in result.output\n    for example in MOCK_EXAMPLES:\n        assert f\"  {example['id']}\" in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n\n\ndef test_example_command_with_valid_id_source_not_exist(mocker: MockerFixture, cwd: Path) -> None:\n    example_id = \"react-vite\"\n    home_path = Path.home()\n    example_src_path = get_mock_examples_dir(home_path) / example_id\n\n    # Ensure the source directory doesn't exist for this test\n    if example_src_path.exists():\n        shutil.rmtree(example_src_path)\n\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    result = invoke([\"init\", \"example\", example_id], cwd=cwd)\n\n    assert result.exit_code == 0  # Command exits cleanly after printing error\n    assert f\"Example {example_id} not found\" in result.output\n    # Should not list available IDs if the ID was valid but source dir missing\n    assert \"Available example ids:\" not in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n\n\ndef test_example_command_with_valid_id_target_exists(mocker: MockerFixture, cwd: Path) -> None:\n    example_id = \"react-vite\"\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n    home_path = Path.home()\n    expected_src = get_mock_examples_dir(home_path) / example_id\n    expected_dest = cwd / example_id\n\n    # Pre-create the target directory\n    expected_dest.mkdir()\n\n    # Make copytree raise error if target exists (default behavior)\n    mock_copytree.side_effect = FileExistsError(\"Target exists\")\n\n    result = invoke([\"init\", \"example\", example_id], cwd=cwd)\n\n    # Expecting failure because shutil.copytree fails if dest exists\n    assert result.exit_code == 1\n    assert isinstance(result.exception, FileExistsError)\n    mock_copytree.assert_called_once_with(expected_src, expected_dest)\n    mock_open_ide.assert_not_called()\n\n\ndef test_example_command_tui_select_valid(mocker: MockerFixture, cwd: Path) -> None:\n    selected_example_id = \"python-smart-contract\"\n\n    # Get the mock instance from the fixture\n    mock_example_selector = mocker.patch(\"algokit.cli.init.example.ExampleSelector\").return_value\n    # Configure the mocked selector instance to return the selected ID\n    mock_example_selector.user_answers = {\"example_id\": selected_example_id}\n\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n    home_path = Path.home()\n    expected_src = get_mock_examples_dir(home_path) / selected_example_id\n    expected_dest = cwd / selected_example_id\n\n    result = invoke(\"init example\", cwd=cwd)\n\n    assert result.exit_code == 0\n    mock_example_selector.run.assert_called_once()\n    assert f\"Created example {selected_example_id}\" in result.output\n    mock_copytree.assert_called_once_with(expected_src, expected_dest)\n    mock_open_ide.assert_called_once_with(expected_dest)\n    verify(result.output)\n\n\ndef test_example_command_tui_select_nothing(mocker: MockerFixture, cwd: Path) -> None:\n    # Get the mock instance from the fixture\n    mock_example_selector = mocker.patch(\"algokit.cli.init.example.ExampleSelector\").return_value\n    # Ensure the mocked selector returns no selection\n    mock_example_selector.user_answers = {}\n\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    result = invoke(\"init example\", cwd=cwd)\n\n    assert result.exit_code == 0  # Command exits cleanly\n    mock_example_selector.run.assert_called_once()\n    # Assert that no 'Created example' message appears and no copy/IDE open happened\n    assert \"Created example\" not in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n\n\ndef test_example_command_tui_select_valid_but_source_missing(mocker: MockerFixture, cwd: Path) -> None:\n    selected_example_id = \"python-smart-contract\"\n\n    # Get the mock instance from the fixture\n    mock_example_selector = mocker.patch(\"algokit.cli.init.example.ExampleSelector\").return_value\n    # Configure the mocked selector instance to return the selected ID\n    mock_example_selector.user_answers = {\"example_id\": selected_example_id}\n\n    home_path = Path.home()\n    example_src_path = get_mock_examples_dir(home_path) / selected_example_id\n    # Ensure the source directory doesn't exist for this test\n    if example_src_path.exists():\n        shutil.rmtree(example_src_path)\n\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    result = invoke(\"init example\", cwd=cwd)\n\n    assert result.exit_code == 0  # Command exits cleanly after printing error\n    mock_example_selector.run.assert_called_once()\n    assert f\"Example {selected_example_id} not found\" in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n\n\ndef test_example_command_list_option(mocker: MockerFixture, cwd: Path) -> None:\n    \"\"\"Test that the --list option displays all available examples.\"\"\"\n\n    mock_copytree = mocker.patch(\"algokit.cli.init.example.shutil.copytree\")\n    mock_open_ide = mocker.patch(\"algokit.cli.init.example._open_ide\")\n\n    # Test with short flag\n    result = invoke([\"init\", \"example\", \"-l\"], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert \"Available examples:\" in result.output\n    for example in MOCK_EXAMPLES:\n        assert f\"  {example['id']} - {example.get('name', '')}\" in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n\n    # Test with long flag\n    result = invoke([\"init\", \"example\", \"--list\"], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert \"Available examples:\" in result.output\n    for example in MOCK_EXAMPLES:\n        assert f\"  {example['id']} - {example.get('name', '')}\" in result.output\n    mock_copytree.assert_not_called()\n    mock_open_ide.assert_not_called()\n    verify(result.output)\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_help.approved.txt",
    "content": "Usage: algokit init example [OPTIONS] [EXAMPLE_ID]\n\n  Initialize a new project from an example template.\n\n  Allows you to quickly create a new project by copying one of the official\n  AlgoKit example templates. If no example ID is provided, launches an\n  interactive selector to choose from available examples. The example will be\n  copied to a new directory in your current location.\n\nOptions:\n  -l, --list  List all available examples\n  -h, --help  Show this message and exit.\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_list_option.approved.txt",
    "content": "Available examples:\n  react-vite - React Vite\n  python-smart-contract - Python Smart Contract\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_tui_select_nothing.approved.txt",
    "content": "\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_tui_select_valid.approved.txt",
    "content": "Created example python-smart-contract\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_tui_select_valid_but_source_missing.approved.txt",
    "content": "Example python-smart-contract not found\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_with_invalid_id.approved.txt",
    "content": "Example nonexistent not found\nAvailable example ids:\n  react-vite\n  python-smart-contract\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_with_valid_id.approved.txt",
    "content": "Created example react-vite\n"
  },
  {
    "path": "tests/init/example/test_example.test_example_command_with_valid_id_source_not_exist.approved.txt",
    "content": "Example react-vite not found\n"
  },
  {
    "path": "tests/init/test_init.py",
    "content": "import json\nimport subprocess\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom pathlib import Path\n\nimport click\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.namer import NamerFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom approvaltests.scrubbers.scrubbers import Scrubber\nfrom prompt_toolkit.input import PipeInput\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.init import append_project_to_vscode_workspace\nfrom tests.utils.approvals import TokenScrubber, combine_scrubbers, verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\nPARENT_DIRECTORY = Path(__file__).parent\nGIT_BUNDLE_PATH = PARENT_DIRECTORY / \"copier-helloworld.bundle\"\n\n\ndef _remove_git_hints(output: str) -> str:\n    git_init_hint_prefix = \"DEBUG: git: hint:\"\n    lines = [line for line in output.splitlines() if not line.startswith(git_init_hint_prefix)]\n    return \"\\n\".join(lines)\n\n\ndef _remove_project_paths(output: str) -> str:\n    lines = [\n        \"DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\"\n        if \"DEBUG: Attempting to load project config from \" in line\n        else line\n        for line in output.splitlines()\n    ]\n\n    return \"\\n\".join(lines)\n\n\nclass MockPipeInput(str, Enum):\n    LEFT = \"\\x1b[D\"\n    RIGHT = \"\\x1b[C\"\n    UP = \"\\x1b[A\"\n    DOWN = \"\\x1b[B\"\n    ENTER = \"\\n\"\n\n\n@dataclass\nclass MockQuestionaryAnswer:\n    \"\"\"\n    Dummy class used to represent questionary answer with value indicating the question, and commands\n    being an array of emulated inputs required to be sent to the questionary to pick the desired answer.\n    \"\"\"\n\n    value: str\n    commands: list[MockPipeInput]\n\n\ndef make_output_scrubber(*extra_scrubbers: Callable[[str], str], **extra_tokens: str) -> Scrubber:\n    default_tokens = {\"test_parent_directory\": str(PARENT_DIRECTORY)}\n\n    tokens = default_tokens | extra_tokens\n    return combine_scrubbers(\n        *extra_scrubbers,\n        click.unstyle,\n        TokenScrubber(tokens=tokens),\n        TokenScrubber(tokens={\"test_parent_directory\": str(PARENT_DIRECTORY).replace(\"\\\\\", \"/\")}),\n        lambda t: t.replace(\"{test_parent_directory}\\\\\", \"{test_parent_directory}/\"),\n        _remove_project_paths,\n    )\n\n\n@pytest.fixture(autouse=True)\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    which_mock.add(\"git\")\n    mocker.patch(\"algokit.cli.init.command.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\nclass ExtendedTemplateKey(str, Enum):\n    # Include all keys from TemplateKey and add new ones\n    BASE = \"base\"\n    PYTHON = \"python\"\n    TYPESCRIPT = \"typescript\"\n    TEALSCRIPT = \"tealscript\"\n    FULLSTACK = \"fullstack\"\n    REACT = \"react\"\n    PYTHON_WITH_VERSION = \"python_with_version\"\n    SIMPLE = \"simple\"\n\n\n# Define a fixture to monkeypatch TemplateKey with ExtendedTemplateKey\n@pytest.fixture(autouse=True)\ndef _set_mocked_template_keys(monkeypatch: pytest.MonkeyPatch) -> None:\n    monkeypatch.setattr(\"algokit.cli.init.command.TemplateKey\", ExtendedTemplateKey)\n\n\n@pytest.fixture(autouse=True)\ndef _set_blessed_templates(mocker: MockerFixture) -> None:\n    from algokit.cli.init import init_group\n    from algokit.cli.init.helpers import BlessedTemplateSource\n\n    blessed_templates = {\n        ExtendedTemplateKey.SIMPLE: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-base-template\",\n            description=\"Does nothing helpful. simple\",\n        ),\n        ExtendedTemplateKey.PYTHON_WITH_VERSION: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-python-template\",\n            commit=\"f97be2c0e3975adfaeb16ef07a2b4bd6ce2afcff\",\n            description=\"Provides a good starting point to build python smart contracts productively, but pinned.\",\n        ),\n        ExtendedTemplateKey.FULLSTACK: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-base-template\",\n            description=\"Does nothing helpful. fullstack\",\n        ),\n        ExtendedTemplateKey.PYTHON: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-python-template\",\n            description=\"Does nothing helpful. python\",\n        ),\n        ExtendedTemplateKey.REACT: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-base-template\",\n            description=\"Does nothing helpful. react\",\n        ),\n        ExtendedTemplateKey.BASE: BlessedTemplateSource(\n            url=\"gh:algorandfoundation/algokit-base-template\",\n            description=\"Does nothing helpful. base\",\n        ),\n    }\n\n    (template_param,) = (p for p in init_group.params if p.name == \"template_name\")\n    template_param.type = click.Choice(list(blessed_templates))\n\n    mocker.patch(\"algokit.cli.init.command._get_blessed_templates\").return_value = blessed_templates\n\n\n@pytest.fixture(autouse=True)\ndef _override_bootstrap(mocker: MockerFixture) -> None:\n    def bootstrap_mock(p: Path, *, ci_mode: bool, max_depth: int = 1) -> None:  # noqa: ARG001\n        click.echo(f\"Executed `algokit project bootstrap all` in {p}\")\n\n    mocker.patch(\"algokit.cli.init.command.bootstrap_any_including_subdirs\").side_effect = bootstrap_mock\n\n\ndef test_init_help() -> None:\n    result = invoke(\"init -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_init_missing_git(which_mock: WhichMock) -> None:\n    which_mock.remove(\"git\")\n    result = invoke(\"init\")\n\n    assert result.exit_code != 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_invalid_name() -> None:\n    result = invoke(\"init --name invalid{name\")\n\n    assert result.exit_code != 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_no_interaction_required_no_git_no_network(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        f\"init --name myapp --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url \"\n        \"--answer project_name test --answer greeting hi --answer include_extra_file yes --bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(\"myapp\"),\n        Path(\"myapp\") / \"test\",\n        Path(\"myapp\") / \"test\" / \"extra_file.txt\",\n        Path(\"myapp\") / \"test\" / \"helloworld.txt\",\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_no_interaction_required_no_git_no_network_with_vscode(\n    tmp_path_factory: TempPathFactory,\n    proc_mock: ProcMock,\n    mock_questionary_input: PipeInput,\n    which_mock: WhichMock,\n    request: pytest.FixtureRequest,\n) -> None:\n    code_cmd = which_mock.add(\"code\")\n    proc_mock.set_output([code_cmd], [\"Launch project\"])\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    app_name = \"myapp\"\n    project_path = cwd / app_name\n    (project_path / \".vscode\").mkdir(parents=True)\n    mock_questionary_input.send_text(\"Y\")  # reuse existing directory\n\n    result = invoke(\n        f\"init --name {app_name} --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url \"\n        \"--answer project_name test --answer greeting hi --answer include_extra_file yes --bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber(), namer=PyTestNamer(request))\n\n\ndef test_init_no_interaction_required_no_git_no_network_with_vscode_and_readme(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, mock_questionary_input: PipeInput, which_mock: WhichMock\n) -> None:\n    code_cmd = which_mock.add(\"code\")\n    proc_mock.set_output([code_cmd], [\"Launch project\"])\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    app_name = \"myapp\"\n    project_path = cwd / app_name\n    (project_path / \".vscode\").mkdir(parents=True)\n    (project_path / \"README.txt\").touch()\n    mock_questionary_input.send_text(\"Y\")  # reuse existing directory\n\n    result = invoke(\n        f\"init --name {app_name} --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url \"\n        \"--answer project_name test --answer greeting hi --answer include_extra_file yes --bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_no_interaction_required_no_git_no_network_with_no_ide(\n    tmp_path_factory: TempPathFactory,\n    proc_mock: ProcMock,\n    mock_questionary_input: PipeInput,\n    which_mock: WhichMock,\n) -> None:\n    code_cmd = which_mock.add(\"code\")\n    proc_mock.should_fail_on(code_cmd)\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    app_name = \"myapp\"\n    project_path = cwd / app_name\n\n    (project_path / \".vscode\").mkdir(parents=True)\n    mock_questionary_input.send_text(\"Y\")  # reuse existing directory\n\n    result = invoke(\n        f\"init --name myapp --no-git --template-url '{GIT_BUNDLE_PATH}' \"\n        \"--UNSAFE-SECURITY-accept-template-url \"\n        \"--answer project_name test --answer greeting hi --answer include_extra_file yes \"\n        \"--bootstrap --no-ide --no-workspace\",\n        cwd=cwd,\n    )\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_no_interaction_required_defaults_no_git_no_network(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        f\"init --name myapp --no-git --defaults \"\n        f\"--template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(\"myapp\"),\n        Path(\"myapp\") / \"myapp\",\n        Path(\"myapp\") / \"myapp\" / \"helloworld.txt\",\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_minimal_interaction_required_no_git_no_network_no_bootstrap(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    # Accept community template\n    mock_questionary_input.send_text(\"Y\")\n    result = invoke(\n        f\"init --name myapp --no-git --template-url '{GIT_BUNDLE_PATH}' --defaults --no-bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(\"myapp\"),\n        Path(\"myapp\") / \"myapp\",\n        Path(\"myapp\") / \"myapp\" / \"helloworld.txt\",\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_minimal_interaction_required_yes_git_no_network(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"Y\")\n    dir_name = \"myapp\"\n    result = invoke(\n        f\"init --name {dir_name} --git --template-url '{GIT_BUNDLE_PATH}' --defaults --no-workspace\",\n        cwd=cwd,\n        env={\n            \"GIT_AUTHOR_NAME\": \"GitHub Actions\",\n            \"GIT_COMMITTER_NAME\": \"GitHub Actions\",\n            \"GIT_AUTHOR_EMAIL\": \"no-reply@example.com\",\n            \"GIT_COMMITTER_EMAIL\": \"no-reply@example.com\",\n        },\n    )\n\n    assert result.exit_code == 0\n    created_dir = cwd / dir_name\n    assert created_dir.is_dir()\n    paths = {p.relative_to(created_dir) for p in created_dir.iterdir()}\n    assert paths == {Path(\".git\"), Path(\"myapp\")}\n    git_rev_list = subprocess.run(\n        [\"git\", \"rev-list\", \"--max-parents=0\", \"HEAD\"], cwd=created_dir, capture_output=True, text=True, check=False\n    )\n    assert git_rev_list.returncode == 0\n    git_initial_commit_hash = git_rev_list.stdout[:7]\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(_remove_git_hints, git_initial_commit_hash=git_initial_commit_hash),\n    )\n\n\ndef test_init_do_not_use_existing_folder(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    (cwd / \"myapp\").mkdir()\n    mock_questionary_input.send_text(\"N\")\n\n    result = invoke(\n        \"init --name myapp --no-git --defaults\"\n        f\" --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_use_existing_folder(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    (cwd / \"myapp\").mkdir()\n    mock_questionary_input.send_text(\"Y\")  # override\n\n    result = invoke(\n        \"init --name myapp --no-git --defaults\"\n        f\" --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_existing_filename_same_as_folder_name(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"myapp\").touch()\n\n    mock_questionary_input.send_text(\"Y\")  # override\n\n    result = invoke(\n        \"init --name myapp --no-git --defaults \"\n        f\"--template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_template_selection(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    mock_questionary_input.send_text(\"\\n\\n\\n\")\n    result = invoke(\n        \"init --name myapp --no-git --defaults --no-workspace\",\n        cwd=cwd,\n    )\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_invalid_template_url(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"Y\")  # community warning\n    result = invoke(\n        \"init --name myapp --no-git --template-url https://www.google.com --defaults\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_project_name(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    project_name = \"FAKE_PROJECT\"\n    mock_questionary_input.send_text(project_name + \"\\n\")\n    mock_questionary_input.send_text(\"Y\")\n    result = invoke(\n        f\"init --no-git --defaults --template-url '{GIT_BUNDLE_PATH}' \"\n        f\"--UNSAFE-SECURITY-accept-template-url --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(project_name),\n        Path(project_name) / project_name,\n        Path(project_name) / project_name / \"helloworld.txt\",\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_bootstrap_yes(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    mock_questionary_input.send_text(\"Y\")\n    result = invoke(\n        f\"init -n myapp --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\"\n        \" --answer greeting hi --answer include_extra_file yes --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_bootstrap_no(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    mock_questionary_input.send_text(\"N\")\n    result = invoke(\n        f\"init -n myapp --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\"\n        \" --answer greeting hi --answer include_extra_file yes --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_project_name_not_empty(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    project_name = \"FAKE_PROJECT\"\n    mock_questionary_input.send_text(\"\\n\")\n    mock_questionary_input.send_text(project_name + \"\\n\")\n    command = (\n        f\"init --no-git --template-url '{GIT_BUNDLE_PATH}' \"\n        \"--UNSAFE-SECURITY-accept-template-url --defaults --no-workspace\"\n    )\n    result = invoke(command, cwd=cwd)\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(project_name),\n        Path(project_name) / project_name,\n        Path(project_name) / project_name / \"helloworld.txt\",\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_project_name_reenter_folder_name(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    project_name = \"FAKE_PROJECT\"\n    (cwd / project_name).mkdir()\n\n    mock_questionary_input.send_text(project_name + \"\\n\")\n    mock_questionary_input.send_text(\"N\")\n    project_name_2 = \"FAKE_PROJECT_2\"\n    mock_questionary_input.send_text(project_name_2 + \"\\n\")\n    command = (\n        f\"init --no-git --template-url '{GIT_BUNDLE_PATH}' \"\n        \"--UNSAFE-SECURITY-accept-template-url --defaults --no-workspace\"\n    )\n    result = invoke(command, cwd=cwd)\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths == {\n        Path(project_name_2),\n        Path(project_name_2) / project_name_2,\n        Path(project_name_2) / project_name_2 / \"helloworld.txt\",\n        Path(project_name),\n    }\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_ask_about_git(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"Y\")  # community one\n    mock_questionary_input.send_text(\"Y\")  # git\n    dir_name = \"myapp\"\n    result = invoke(\n        f\"init --name myapp --template-url '{GIT_BUNDLE_PATH}' --defaults --no-workspace\",\n        cwd=cwd,\n        env={\n            \"GIT_AUTHOR_NAME\": \"GitHub Actions\",\n            \"GIT_COMMITTER_NAME\": \"GitHub Actions\",\n            \"GIT_AUTHOR_EMAIL\": \"no-reply@example.com\",\n            \"GIT_COMMITTER_EMAIL\": \"no-reply@example.com\",\n        },\n    )\n\n    assert result.exit_code == 0\n    created_dir = cwd / dir_name\n    assert created_dir.is_dir()\n    paths = {p.relative_to(created_dir) for p in created_dir.iterdir()}\n    assert paths == {Path(\"myapp\"), Path(\".git\")}\n    git_rev_list = subprocess.run(\n        [\"git\", \"rev-list\", \"--max-parents=0\", \"HEAD\"], cwd=created_dir, capture_output=True, text=True, check=False\n    )\n    assert git_rev_list.returncode == 0\n    git_initial_commit_hash = git_rev_list.stdout[:7]\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(_remove_git_hints, git_initial_commit_hash=git_initial_commit_hash),\n    )\n\n\ndef test_init_template_url_and_template_name(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"Y\")  # community warning\n    result = invoke(\n        f\"init --name myapp --no-git --template simple --template-url '{GIT_BUNDLE_PATH}' --defaults\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.usefixtures(\"mock_questionary_input\")\ndef test_init_template_url_and_ref(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    mock_copier_worker_cls = mocker.patch(\"copier._main.Worker\")\n    mock_copier_worker_cls.return_value.__enter__.return_value.template.url_expanded = \"URL\"\n    ref = \"abcdef123456\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(\n        \"init --name myapp --no-git --no-bootstrap \"\n        \"--template-url gh:algorandfoundation/algokit-python-template \"\n        f\"--template-url-ref {ref} \"\n        \"--UNSAFE-SECURITY-accept-template-url --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    assert mock_copier_worker_cls.call_args.kwargs[\"vcs_ref\"] == ref\n\n\ndef test_init_blessed_template_url_get_community_warning(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"N\")  # community warning\n    result = invoke(\n        \"init --name myapp --no-git \"\n        \"--template-url gh:algorandfoundation/algokit-python-template --defaults \"\n        \"-a author_name None -a author_email None \",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_with_any_template_url_get_community_warning(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    mock_questionary_input.send_text(\"Y\")\n    result = invoke(\n        \"init --name myapp --no-git --no-bootstrap \"\n        \"--template-url gh:algorandfoundation/algokit-python-template --defaults --no-workspace \"\n        \"-a author_name None -a author_email None \",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths.issuperset(\n        {\n            Path(\"myapp\"),\n            Path(\"myapp\") / \"README.md\",\n            Path(\"myapp\") / \"smart_contracts\",\n        }\n    )\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(),\n    )\n\n\ndef test_init_with_any_template_url_get_community_warning_with_unsafe_tag(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(\n        \"init --name myapp --no-git --no-bootstrap \"\n        \"--template-url gh:algorandfoundation/algokit-python-template --defaults --no-workspace \"\n        \"-a author_name None -a author_email None --UNSAFE-SECURITY-accept-template-url\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths.issuperset(\n        {\n            Path(\"myapp\"),\n            Path(\"myapp\") / \"README.md\",\n            Path(\"myapp\") / \"smart_contracts\",\n        }\n    )\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(),\n    )\n\n\ndef test_init_no_community_template(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mock_questionary_input.send_text(\"N\")  # community warning\n    result = invoke(\n        f\"init --name myapp --no-git --template-url '{GIT_BUNDLE_PATH}' --defaults\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_input_template_url(tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    # Source for special keys https://github.com/tmbo/questionary/blob/master/tests/prompts/test_select.py\n    mock_questionary_input.send_text(\"\\x1b[A\")  # one up\n    mock_questionary_input.send_text(\"\\n\")  # enter\n\n    mock_questionary_input.send_text(str(GIT_BUNDLE_PATH) + \"\\n\")  # name\n    result = invoke(\n        \"init --name myapp --no-git --defaults --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_with_official_template_name(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        \"init --name myapp --no-git --no-bootstrap --template python --defaults --no-workspace \"\n        \"-a author_name None -a author_email None \",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths.issuperset(\n        {\n            Path(\"myapp\"),\n            Path(\"myapp\") / \"README.md\",\n            Path(\"myapp\") / \"smart_contracts\",\n        }\n    )\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(),\n    )\n\n\ndef test_init_with_official_template_name_and_hash(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        \"init --name myapp --no-git --template python_with_version\"\n        \" --defaults -a run_poetry_install False -a author_name None -a author_email None --no-workspace \",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths.issuperset(\n        {\n            Path(\"myapp\"),\n            Path(\"myapp\") / \"README.md\",\n            Path(\"myapp\") / \"smart_contracts\",\n        }\n    )\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_with_custom_env(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        (\n            \"init --name myapp --no-git --no-bootstrap --template python --defaults --no-workspace \"\n            \"-a author_name None -a author_email None \"\n            '-a algod_token \"abcdefghijklmnopqrstuvwxyz\" -a algod_server http://mylocalserver -a algod_port 1234 '\n            '-a indexer_token \"zyxwvutsrqponmlkjihgfedcba\" -a indexer_server http://myotherserver -a indexer_port 6789 '\n            \" -a run_poetry_install False\"\n        ),\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    paths = {p.relative_to(cwd) for p in cwd.rglob(\"*\")}\n    assert paths.issuperset(\n        {\n            Path(\"myapp\"),\n            Path(\"myapp\") / \"README.md\",\n            Path(\"myapp\") / \"smart_contracts\",\n        }\n    )\n\n    verify(\n        result.output,\n        scrubber=make_output_scrubber(),\n    )\n\n\ndef test_init_template_with_python_task_fails_on_missing_python(\n    mocker: MockerFixture, dummy_algokit_template_with_python_task: dict[str, Path]\n) -> None:\n    which_mock = WhichMock()\n    mocker.patch(\"algokit.core.utils.which\").side_effect = which_mock.which\n    mocker.patch(\"algokit.core.utils.get_base_python_path\", return_value=None)\n    which_mock.remove(\"python\")\n    which_mock.remove(\"python3\")\n\n    ref = \"HEAD\"\n    result = invoke(\n        [\n            \"init\",\n            \"--name\",\n            \"myapp\",\n            \"--no-git\",\n            \"--defaults\",\n            f\"--template-url={dummy_algokit_template_with_python_task['template_path']}\",\n            f\"--template-url-ref={ref}\",\n            \"--UNSAFE-SECURITY-accept-template-url\",\n            \"--no-workspace\",\n        ],\n        cwd=dummy_algokit_template_with_python_task[\"cwd\"],\n        input=\"y\\n\",\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_template_with_python_task_works(dummy_algokit_template_with_python_task: dict[str, Path]) -> None:\n    ref = \"HEAD\"\n    result = invoke(\n        [\n            \"init\",\n            \"--name\",\n            \"myapp\",\n            \"--no-git\",\n            \"--defaults\",\n            f\"--template-url={dummy_algokit_template_with_python_task['template_path']}\",\n            f\"--template-url-ref={ref}\",\n            \"--UNSAFE-SECURITY-accept-template-url\",\n            \"--no-workspace\",\n        ],\n        cwd=dummy_algokit_template_with_python_task[\"cwd\"],\n        input=\"y\\n\",\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\n@pytest.mark.parametrize(\n    (\"flow_steps\"),\n    [\n        [\n            MockQuestionaryAnswer(\"Smart Contract\", [MockPipeInput.ENTER, MockPipeInput.ENTER]),\n            None,  # no custom template URL\n        ],\n        [\n            MockQuestionaryAnswer(\"DApp Frontend\", [MockPipeInput.DOWN, MockPipeInput.ENTER]),\n            None,  # no custom template URL\n        ],\n        [\n            MockQuestionaryAnswer(\n                \"Full Stack\", [MockPipeInput.DOWN, MockPipeInput.DOWN, MockPipeInput.ENTER, MockPipeInput.ENTER]\n            ),\n            None,  # no custom template URL\n        ],\n        [\n            MockQuestionaryAnswer(\"Custom Template\", [MockPipeInput.UP, MockPipeInput.ENTER]),\n            \"gh:algorandfoundation/algokit-base-template\\n\",  # custom template URL\n        ],\n    ],\n)\ndef test_init_wizard_v2_flow(\n    flow_steps: list, tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    for step in flow_steps:\n        if isinstance(step, MockQuestionaryAnswer):\n            for command in step.commands:\n                mock_questionary_input.send_text(command.value)\n        elif isinstance(step, str):\n            mock_questionary_input.send_text(step)\n\n    # Act\n    result = invoke(\"init --defaults --no-git --name myapp --UNSAFE-SECURITY-accept-template-url\", cwd=cwd)\n\n    # Assert\n    project_type = flow_steps[0].value  # The first step always determines the project type\n    assert result.exit_code == 0\n    verify(\n        result.output,\n        options=NamerFactory.with_parameters(project_type),\n        scrubber=make_output_scrubber(),\n    )\n\n\ndef test_init_wizard_v2_workspace_nesting(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    # Act\n    project_a_result = invoke(\n        \"init -t python --no-git --defaults --name myapp \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'production'\",\n        cwd=cwd,\n    )\n    project_b_result = invoke(\n        \"init -t python --no-git --defaults --name myapp2 \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'starter'\",\n        cwd=cwd / \"myapp\" / \"projects\",\n    )\n    project_c_result = invoke(\n        \"init -t python --no-git --defaults --name myapp3 \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'starter' --no-workspace\",\n        cwd=cwd / \"myapp\" / \"projects\",\n    )\n    project_d_result = invoke(\n        \"init -t python --no-git --defaults --name myapp4 \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'starter'\",\n        cwd=cwd / \"myapp\",\n    )\n\n    # Assert\n    assert project_a_result.exit_code == 0\n    assert project_b_result.exit_code == 1\n    assert project_c_result.exit_code == 0\n    assert project_d_result.exit_code == 0\n\n\ndef test_init_wizard_v2_github_folder_with_workspace(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    answer = MockQuestionaryAnswer(\"Smart Contract\", [MockPipeInput.ENTER, MockPipeInput.ENTER])\n    for command in answer.commands:\n        mock_questionary_input.send_text(command.value)\n\n    # Act\n    result = invoke(\n        \"init -t python --no-git --defaults --name myapp \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'production'\",\n        cwd=cwd,\n    )\n\n    # Assert\n    cwd /= \"myapp\"\n    assert result.exit_code == 0\n    assert not cwd.joinpath(\"projects/myapp/.github\").exists()\n    assert cwd.joinpath(\".github\").exists()\n    assert cwd.glob(\".github/workflows/*.yaml\")\n\n\ndef test_init_wizard_v2_github_folder_with_workspace_partial(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    mock_questionary_input.send_text(\"y\\n\")  # Simulate workspace selection\n\n    github_workflow_path = cwd / \"myapp\" / \".github\" / \"workflows\"\n    github_workflow_path.mkdir(parents=True, exist_ok=True)\n    (github_workflow_path / \"cd.yaml\").touch()\n\n    # Act\n    result = invoke(\n        \"init -t python --no-git --defaults --name myapp \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'production'\",\n        input=\"y\\n\",\n        cwd=cwd,\n    )\n\n    # Assert\n    cwd /= \"myapp\"\n    assert result.exit_code == 0\n    assert not (cwd / \"projects/myapp/.github/workflows/cd.yaml\").exists()\n    assert (cwd / \".github/workflows/myapp-cd.yaml\").read_text() != \"\"\n    assert cwd.glob(\".github/workflows/*.yaml\")\n\n\ndef test_init_wizard_v2_github_folder_no_workspace(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    answer = MockQuestionaryAnswer(\"Smart Contract\", [MockPipeInput.ENTER, MockPipeInput.ENTER])\n    for command in answer.commands:\n        mock_questionary_input.send_text(command.value)\n\n    # Act\n    result = invoke(\n        \"init -t python --no-git --defaults --name myapp \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'production' --no-workspace\",\n        cwd=cwd,\n    )\n\n    # Assert\n    cwd /= \"myapp\"\n    assert result.exit_code == 0\n    assert not cwd.joinpath(\"projects\").exists()\n    assert cwd.joinpath(\".github\").exists()\n    assert cwd.glob(\".github/workflows/*.yaml\")\n\n\n@pytest.mark.parametrize(\n    (\"workspace_content\", \"expected_path\", \"expect_warning\"),\n    [\n        # Scenario 1: Valid codespace, new project added\n        (\n            \"\"\"\n        {\n          \"folders\": [\n            {\n              \"path\": \".\",\n              \"name\": \"ROOT\"\n            },\n            {\n              \"path\": \"projects/myapp\"\n            }\n          ]\n        }\n        \"\"\",\n            \"projects/myapp2\",\n            False,\n        ),\n        # Scenario 2: No codespace, nothing happens\n        (None, None, False),\n        # Scenario 3: Invalid codespace, warning expected\n        (\"INVALID_JSON\", None, True),\n    ],\n)\ndef test_init_wizard_v2_append_to_vscode_workspace(\n    *,\n    which_mock: WhichMock,\n    proc_mock: ProcMock,\n    tmp_path_factory: TempPathFactory,\n    mock_questionary_input: PipeInput,\n    workspace_content: str,\n    expected_path: str,\n    expect_warning: bool,\n) -> None:\n    # Arrange\n    code_cmd = which_mock.add(\"code\")\n    proc_mock.set_output([code_cmd], [\"Launch project\"])\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    answer = MockQuestionaryAnswer(\"Smart Contract\", [MockPipeInput.ENTER, MockPipeInput.ENTER])\n    for command in answer.commands:\n        mock_questionary_input.send_text(command.value)\n\n    # Act\n    project_a_result = invoke(\n        \"init -t python --no-git --defaults --name myapp \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'production'\",\n        cwd=cwd,\n    )\n    if workspace_content is not None:\n        workspace_file = cwd / \"myapp\" / \"myapp.code-workspace\"\n        workspace_file.write_text(workspace_content)\n\n    project_b_result = invoke(\n        \"init -t python --no-git --defaults --name myapp2 \"\n        \"--UNSAFE-SECURITY-accept-template-url -a preset_name 'starter'\",\n        cwd=cwd / \"myapp\",\n    )\n\n    # Assert\n    assert project_a_result.exit_code == 0\n    assert project_b_result.exit_code == 0\n    if expected_path and \"workspace_file\" in locals():\n        workspace_data = json.loads(workspace_file.read_text())\n        assert workspace_data[\"folders\"][-1][\"path\"] == expected_path\n    if expect_warning:\n        # This assumes the existence of a function `verify` to check for warnings in the output\n        verify(project_b_result.output)\n\n\n@pytest.mark.parametrize(\n    (\"initial_workspace\", \"project_path\", \"expected_workspace\", \"should_append\"),\n    [\n        # Test case 1: Different representations of root path\n        (\n            {\"folders\": [{\"path\": \"./\"}]},\n            \".\",\n            {\"folders\": [{\"path\": \"./\"}]},\n            False,\n        ),\n        # Test case 2: Normalized paths\n        (\n            {\"folders\": [{\"path\": \"projects/app1\"}]},\n            \"projects/app1\",\n            {\"folders\": [{\"path\": \"projects/app1\"}]},\n            False,\n        ),\n        # Test case 3: Different path separators\n        (\n            {\"folders\": [{\"path\": \"projects\\\\app1\"}]},\n            \"projects/app1\",\n            {\"folders\": [{\"path\": \"projects\\\\app1\"}]},\n            False,\n        ),\n        # Test case 4: Relative paths\n        (\n            {\"folders\": [{\"path\": \"./projects/app1\"}]},\n            \"projects/app1\",\n            {\"folders\": [{\"path\": \"./projects/app1\"}]},\n            False,\n        ),\n        # Test case 5: New unique path\n        (\n            {\"folders\": [{\"path\": \"projects/app1\"}]},\n            \"projects/app2\",\n            {\"folders\": [{\"path\": \"projects/app1\"}, {\"path\": \"projects/app2\"}]},\n            True,\n        ),\n        # Test case 6: Empty workspace\n        (\n            {\"folders\": []},\n            \"projects/app1\",\n            {\"folders\": [{\"path\": \"projects/app1\"}]},\n            True,\n        ),\n        # Test case 7: Path with trailing slash\n        (\n            {\"folders\": [{\"path\": \"projects/app1/\"}]},\n            \"projects/app1\",\n            {\"folders\": [{\"path\": \"projects/app1/\"}]},\n            False,\n        ),\n    ],\n)\ndef test_append_to_workspace_path_normalization(\n    *,\n    tmp_path_factory: pytest.TempPathFactory,\n    initial_workspace: dict,\n    project_path: str,\n    expected_workspace: dict,\n    should_append: bool,\n    caplog: pytest.LogCaptureFixture,\n) -> None:\n    \"\"\"Test various path normalization scenarios when appending to workspace.\"\"\"\n\n    # Arrange\n    tmp_path = tmp_path_factory.mktemp(\"workspace\")\n    workspace_file = tmp_path / \"test.code-workspace\"\n    with workspace_file.open(mode=\"w\", encoding=\"utf-8\") as f:\n        json.dump(initial_workspace, f)\n\n    project_path_obj = tmp_path / project_path\n    project_path_obj.mkdir(parents=True, exist_ok=True)\n\n    # Act\n    append_project_to_vscode_workspace(project_path_obj, workspace_file)\n\n    # Assert\n    with workspace_file.open(mode=\"r\", encoding=\"utf-8\") as f:\n        actual_workspace = json.load(f)\n\n    assert actual_workspace == expected_workspace\n\n    # Check logging\n    debug_messages = [r.message for r in caplog.records if r.levelname == \"DEBUG\"]\n    if should_append:\n        assert any(\"Appended project\" in msg for msg in debug_messages)\n    else:\n        assert any(\"already in workspace\" in msg for msg in debug_messages)\n"
  },
  {
    "path": "tests/init/test_init.test_init_ask_about_git.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nDEBUG: Running 'git rev-parse --show-toplevel' in '{current_working_directory}/myapp'\nDEBUG: git: fatal: not a git repository (or any of the parent directories): .git\n? Would you like to initialise a git repository and perform an initial commit? (Y/n)\nDEBUG: Running 'git init' in '{current_working_directory}/myapp'\nDEBUG: git: Initialized empty Git repository in {current_working_directory}/myapp/.git/\nDEBUG: Running 'git checkout -b main' in '{current_working_directory}/myapp'\nDEBUG: git: Switched to a new branch 'main'\nDEBUG: Running 'git add --all' in '{current_working_directory}/myapp'\nDEBUG: Running 'git commit -m Project initialised with AlgoKit CLI using template: {test_parent_directory}/copier-helloworld.bundle' in '{current_working_directory}/myapp'\nDEBUG: git: [main (root-commit) {git_initial_commit_hash}] Project initialised with AlgoKit CLI using template: {test_parent_directory}/copier-helloworld.bundle\nDEBUG: git: 1 file changed, 1 insertion(+)\nDEBUG: git: create mode 100644 myapp/helloworld.txt\n🎉 Performed initial git commit successfully! 🎉\n"
  },
  {
    "path": "tests/init/test_init.test_init_blessed_template_url_get_community_warning.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\n🛑 Bailing out... 👋\n"
  },
  {
    "path": "tests/init/test_init.test_init_bootstrap_no.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Do you want to run `algokit project bootstrap` for this new project? This will install and configure dependencies allowing it to be run immediately. (Y/n)\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_bootstrap_yes.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Do you want to run `algokit project bootstrap` for this new project? This will install and configure dependencies allowing it to be run immediately. (Y/n)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_do_not_use_existing_folder.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\n🛑 Bailing out... 👋\n"
  },
  {
    "path": "tests/init/test_init.test_init_existing_filename_same_as_folder_name.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nERROR: A file with the same name already exists in the current directory. Please use a different name.\n🛑 Bailing out... 👋\n"
  },
  {
    "path": "tests/init/test_init.test_init_help.approved.txt",
    "content": "Usage: algokit init [OPTIONS] COMMAND [ARGS]...\n\n  Initializes a new project from a template, including prompting for template\n  specific questions to be used in template rendering.\n\n  Templates can be default templates shipped with AlgoKit, or custom templates\n  in public Git repositories.\n\n  Includes ability to initialise Git repository, run algokit project bootstrap\n  and automatically open Visual Studio Code.\n\n  This should be run in the parent directory that you want the project folder\n  created in.\n\n  By default, the `--workspace` flag creates projects within a workspace\n  structure or integrates them into an existing one, promoting organized\n  management of multiple projects. Alternatively, to disable this behavior use\n  the `--no-workspace` flag, which ensures the new project is created in a\n  standalone target directory. This is suitable for isolated projects or when\n  workspace integration is unnecessary.\n\nOptions:\n  -n, --name TEXT                 Name of the project / directory / repository\n                                  to create.\n  -t, --template [simple|python_with_version|fullstack|python|react|base]\n                                  Name of an official template to use. To choose\n                                  interactively, run this command with no\n                                  arguments.\n  --template-url URL              URL to a git repo with a custom project\n                                  template.\n  --template-url-ref URL          Specific tag, branch or commit to use on git\n                                  repo specified with --template-url. Defaults\n                                  to latest.\n  --UNSAFE-SECURITY-accept-template-url\n                                  Accept the specified template URL,\n                                  acknowledging the security implications of\n                                  arbitrary code execution trusting an\n                                  unofficial template.\n  --git / --no-git                Initialise git repository in directory after\n                                  creation.\n  --defaults                      Automatically choose default answers without\n                                  asking when creating this template.\n  --bootstrap / --no-bootstrap    Whether to run `algokit project bootstrap` to\n                                  install and configure the new project's\n                                  dependencies locally.\n  --ide / --no-ide                Whether to open an IDE for you if the IDE and\n                                  IDE config are detected. Supported IDEs: VS\n                                  Code.\n  --workspace / --no-workspace    Whether to prefer structuring standalone\n                                  projects as part of a workspace. An AlgoKit\n                                  workspace is a conventional project structure\n                                  that allows managing multiple standalone\n                                  projects in a monorepo.\n  -a, --answer <key> <value>      Answers key/value pairs to pass to the\n                                  template.\n  -h, --help                      Show this message and exit.\n\nCommands:\n  example  Initialize a new project from an example template.\n"
  },
  {
    "path": "tests/init/test_init.test_init_input_template_url.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = Custom Template 🛠️\nWARNING: \nCommunity templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n\nEnter a custom project URL, or leave blank and press enter to go back to official template selection.\nNote that you can use gh: as a shorthand for github.com and likewise gl: for gitlab.com\nValid examples:\n - gh:copier-org/copier\n - gl:copier-org/copier\n - git@github.com:copier-org/copier.git\n - git+https://mywebsiteisagitrepo.example.com/\n - /local/path/to/git/repo\n - /local/path/to/git/bundle/file.bundle\n - ~/path/to/git/repo\n - ~/path/to/git/repo.bundle\n\n? Custom template URL:\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_invalid_template_url.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nERROR: Couldn't parse repo URL https://www.google.com. Try prefixing it with git+ ?\n🛑 Bailing out... 👋\n"
  },
  {
    "path": "tests/init/test_init.test_init_minimal_interaction_required_no_git_no_network_no_bootstrap.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_minimal_interaction_required_yes_git_no_network.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nDEBUG: Running 'git rev-parse --show-toplevel' in '{current_working_directory}/myapp'\nDEBUG: git: fatal: not a git repository (or any of the parent directories): .git\nDEBUG: Running 'git init' in '{current_working_directory}/myapp'\nDEBUG: git: Initialized empty Git repository in {current_working_directory}/myapp/.git/\nDEBUG: Running 'git checkout -b main' in '{current_working_directory}/myapp'\nDEBUG: git: Switched to a new branch 'main'\nDEBUG: Running 'git add --all' in '{current_working_directory}/myapp'\nDEBUG: Running 'git commit -m Project initialised with AlgoKit CLI using template: {test_parent_directory}/copier-helloworld.bundle' in '{current_working_directory}/myapp'\nDEBUG: git: [main (root-commit) {git_initial_commit_hash}] Project initialised with AlgoKit CLI using template: {test_parent_directory}/copier-helloworld.bundle\nDEBUG: git: 1 file changed, 1 insertion(+)\nDEBUG: git: create mode 100644 myapp/helloworld.txt\n🎉 Performed initial git commit successfully! 🎉\n"
  },
  {
    "path": "tests/init/test_init.test_init_missing_git.approved.txt",
    "content": "Error: Git not found; please install git and add to path.\nSee https://github.com/git-guides/install-git for more information.\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_community_template.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\n🛑 Bailing out... 👋\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_interaction_required_defaults_no_git_no_network.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_interaction_required_no_git_no_network.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_interaction_required_no_git_no_network_with_no_ide.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_interaction_required_no_git_no_network_with_vscode.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nVSCode configuration detected in project directory, and 'code' command is available on path, attempting to launch VSCode\nDEBUG: Running '/bin/code {current_working_directory}/myapp' in '{current_working_directory}'\nDEBUG: /bin/code: Launch project\n"
  },
  {
    "path": "tests/init/test_init.test_init_no_interaction_required_no_git_no_network_with_vscode_and_readme.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nVSCode configuration detected in project directory, and 'code' command is available on path, attempting to launch VSCode\nDEBUG: Running '/bin/code {current_working_directory}/myapp {current_working_directory}/myapp/README.txt' in '{current_working_directory}'\nDEBUG: /bin/code: Launch project\n"
  },
  {
    "path": "tests/init/test_init.test_init_project_name.approved.txt",
    "content": "WARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\n? Name of project / directory to create the project in:\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/FAKE_PROJECT\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/FAKE_PROJECT...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/FAKE_PROJECT\n🙌 Project initialized at `FAKE_PROJECT`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_project_name_not_empty.approved.txt",
    "content": "WARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\n? Name of project / directory to create the project in:\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/FAKE_PROJECT\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/FAKE_PROJECT...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/FAKE_PROJECT\n🙌 Project initialized at `FAKE_PROJECT`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_project_name_reenter_folder_name.approved.txt",
    "content": "WARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\n? Name of project / directory to create the project in:\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\n? Name of project / directory to create the project in:\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/FAKE_PROJECT_2\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/FAKE_PROJECT_2...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/FAKE_PROJECT_2\n🙌 Project initialized at `FAKE_PROJECT_2`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_template_selection.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = Smart Contracts 📜\n? Which language would you like to use for the smart contract?\nPython 🐍\nTypeScript 📘\nDEBUG: selected language = ContractLanguage.PYTHON\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_template_url_and_template_name.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nError: Cannot specify both --template and --template-url\n"
  },
  {
    "path": "tests/init/test_init.test_init_template_with_python_task_fails_on_missing_python.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {current_working_directory}/dummy_template@HEAD\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {current_working_directory}/dummy_template\n"
  },
  {
    "path": "tests/init/test_init.test_init_template_with_python_task_works.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {current_working_directory}/dummy_template@HEAD\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {current_working_directory}/dummy_template\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_use_existing_folder.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nNo git tags found in template; using HEAD as ref\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init.test_init_with_any_template_url_get_community_warning.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n? Continue anyway? (y/N)\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_with_any_template_url_get_community_warning_with_unsafe_tag.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_with_custom_env.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_with_official_template_name.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_with_official_template_name_and_hash.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: template source = gh:algorandfoundation/algokit-python-template@f97be2c0e3975adfaeb16ef07a2b4bd6ce2afcff\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_append_to_vscode_workspace.approved.txt",
    "content": "DEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/myapp2/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: project path = {current_working_directory}/myapp2\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Workspace structure detected! Moving the project to be instantiated into {current_working_directory}/projects\nStarting template copy and render at {current_working_directory}/projects/myapp2...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/projects/myapp2/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/projects/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/projects/myapp2/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/projects/myapp2\n🙌 Project initialized at `myapp2`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nWARNING: Invalid JSON format in the workspace file {current_working_directory}/myapp.code-workspace. Expecting value: line 1 column 1 (char 0)\nVSCode configuration detected in project directory, and 'code' command is available on path, attempting to launch VSCode\nDEBUG: Running '/bin/code {current_working_directory}/myapp.code-workspace {current_working_directory}/projects/myapp2/README.md' in '{current_working_directory}'\nDEBUG: /bin/code: Launch project\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_flow.Custom Template.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = Custom Template 🛠️\nWARNING: \nCommunity templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\n\nEnter a custom project URL, or leave blank and press enter to go back to official template selection.\nNote that you can use gh: as a shorthand for github.com and likewise gl: for gitlab.com\nValid examples:\n - gh:copier-org/copier\n - gl:copier-org/copier\n - git@github.com:copier-org/copier.git\n - git+https://mywebsiteisagitrepo.example.com/\n - /local/path/to/git/repo\n - /local/path/to/git/bundle/file.bundle\n - ~/path/to/git/repo\n - ~/path/to/git/repo.bundle\n\n? Custom template URL:\nDEBUG: template source = gh:algorandfoundation/algokit-base-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Workspace structure is ready! The project is to be placed under {current_working_directory}/myapp/projects/myapp\nStarting template copy and render at {current_working_directory}/myapp/projects/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-base-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp/projects/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-base-template\nDEBUG: Project {current_working_directory}/myapp/projects/myapp is already in workspace {current_working_directory}/myapp/projects/myapp/myapp.code-workspace, not appending.\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_flow.DApp Frontend.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = DApp Frontend 🖥️\nDEBUG: template source = gh:algorandfoundation/algokit-base-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Workspace structure is ready! The project is to be placed under {current_working_directory}/myapp/projects/myapp\nStarting template copy and render at {current_working_directory}/myapp/projects/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-base-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp/projects/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-base-template\nDEBUG: Project {current_working_directory}/myapp/projects/myapp is already in workspace {current_working_directory}/myapp/projects/myapp/myapp.code-workspace, not appending.\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_flow.Full Stack.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = Smart Contracts & DApp Frontend 🎛️\n? Which language would you like to use for the smart contract?\nPython 🐍\nTypeScript 📘\nDEBUG: selected language = ContractLanguage.PYTHON\nDEBUG: template source = gh:algorandfoundation/algokit-base-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-base-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-base-template\nDEBUG: Project {current_working_directory}/myapp is already in workspace {current_working_directory}/myapp/myapp.code-workspace, not appending.\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_flow.Smart Contract.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n? Which of these options best describes the project you want to build?\nSmart Contracts 📜\nDApp Frontend 🖥️\nSmart Contracts & DApp Frontend 🎛️\nCustom Template 🛠️\nDEBUG: selected project_type = Smart Contracts 📜\n? Which language would you like to use for the smart contract?\nPython 🐍\nTypeScript 📘\nDEBUG: selected language = ContractLanguage.PYTHON\nDEBUG: template source = gh:algorandfoundation/algokit-python-template\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Workspace structure is ready! The project is to be placed under {current_working_directory}/myapp/projects/myapp\nStarting template copy and render at {current_working_directory}/myapp/projects/myapp...\nDEBUG: final clone URL = https://github.com/algorandfoundation/algokit-python-template.git\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nExecuted `algokit project bootstrap all` in {current_working_directory}/myapp/projects/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\nYour selected template comes from:\n➡️  https://github.com/algorandfoundation/algokit-python-template\nDEBUG: Appended project {current_working_directory}/myapp/projects/myapp to workspace {current_working_directory}/myapp/myapp.code-workspace.\nYour template includes a README.md file, you might want to review that as a next step.\n"
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_github_folder_no_workspace.approved.txt",
    "content": ""
  },
  {
    "path": "tests/init/test_init.test_init_wizard_v2_github_folder_with_workspace.approved.txt",
    "content": ""
  },
  {
    "path": "tests/init/test_init.test_invalid_name.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsage: algokit init [OPTIONS] COMMAND [ARGS]...\nTry 'algokit init -h' for help.\n\nError: Invalid value for '--name' / '-n': Invalid directory name. Ensure it's a mix of letters, numbers, dashes, periods, and/or underscores, and not already used.\n"
  },
  {
    "path": "tests/init/test_init_with_bootstrap.py",
    "content": "from collections.abc import Callable\nfrom pathlib import Path\n\nimport click\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.scrubbers.scrubbers import Scrubber\nfrom prompt_toolkit.input import PipeInput\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_current_package_version\nfrom tests.utils.approvals import TokenScrubber, combine_scrubbers, verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\nPARENT_DIRECTORY = Path(__file__).parent\nGIT_BUNDLE_PATH = PARENT_DIRECTORY / \"copier-helloworld.bundle\"\n\n\ndef _remove_project_paths(output: str) -> str:\n    lines = [\n        \"DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\"\n        if \"DEBUG: Attempting to load project config from \" in line\n        else line\n        for line in output.splitlines()\n    ]\n\n    return \"\\n\".join(lines)\n\n\ndef make_output_scrubber(*extra_scrubbers: Callable[[str], str], **extra_tokens: str) -> Scrubber:\n    default_tokens = {\"test_parent_directory\": str(PARENT_DIRECTORY)}\n    tokens = default_tokens | extra_tokens\n    return combine_scrubbers(\n        *extra_scrubbers,\n        click.unstyle,\n        TokenScrubber(tokens=tokens),\n        TokenScrubber(tokens={\"test_parent_directory\": str(PARENT_DIRECTORY).replace(\"\\\\\", \"/\")}),\n        lambda t: t.replace(\"{test_parent_directory}\\\\\", \"{test_parent_directory}/\"),\n        _remove_project_paths,\n    )\n\n\ndef test_init_bootstrap_broken_poetry(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput, proc_mock: ProcMock, mocker: MockerFixture\n) -> None:\n    # Mock global preference to use Poetry for this test\n    mocker.patch(\"algokit.core.config_commands.py_package_manager.get_py_package_manager\", return_value=\"poetry\")\n    # Also mock the bootstrap function directly to ensure it uses Poetry\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n\n    proc_mock.should_bad_exit_on(\"poetry --version\")\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    app_name = \"myapp\"\n    project_path = cwd / app_name\n    project_path.mkdir()\n    (project_path / \"poetry.toml\").touch()\n    mock_questionary_input.send_text(\"Y\")  # reuse existing directory\n\n    result = invoke(\n        f\"init -n {app_name} --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\"\n        \" --answer greeting hi --answer include_extra_file yes --bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber())\n\n\ndef test_init_bootstrap_version_fail(\n    tmp_path_factory: TempPathFactory,\n    mock_questionary_input: PipeInput,\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    app_name = \"myapp\"\n    project_path = cwd / app_name\n    project_path.mkdir()\n    (project_path / ALGOKIT_CONFIG).write_text('[algokit]\\nmin_version = \"999.99.99\"\\n')\n    mock_questionary_input.send_text(\"Y\")  # reuse existing directory\n\n    result = invoke(\n        f\"init -n {app_name} --no-git --template-url '{GIT_BUNDLE_PATH}' --UNSAFE-SECURITY-accept-template-url\"\n        \" --answer greeting hi --answer include_extra_file yes --bootstrap --no-workspace\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_output_scrubber(current_version=get_current_package_version()))\n"
  },
  {
    "path": "tests/init/test_init_with_bootstrap.test_init_bootstrap_broken_poetry.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/myapp for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nERROR: Received an error while attempting bootstrap: poetry --version failed, please check your poetry install\nERROR: Bootstrap failed. Once any errors above are resolved, you can run `algokit project bootstrap` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/init/test_init_with_bootstrap.test_init_bootstrap_version_fail.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: Community templates have not been reviewed, and can execute arbitrary code.\nPlease inspect the template repository, and pay particular attention to the values of _tasks, _migrations and _jinja_extensions in copier.yml\nDEBUG: template source = {test_parent_directory}/copier-helloworld.bundle\nWARNING: Re-using existing directory, this is not recommended because if project generation fails, then we can't automatically cleanup.\n? Continue anyway? (y/N)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: project path = {current_working_directory}/myapp\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nStarting template copy and render at {current_working_directory}/myapp...\nDEBUG: final clone URL = {test_parent_directory}/copier-helloworld.bundle\nNo git tags found in template; using HEAD as ref\nTemplate render complete!\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nERROR: Received an error while attempting bootstrap: This template requires AlgoKit version 999.99.99 or higher, but you have AlgoKit version {current_version}. Please update AlgoKit.\nERROR: Bootstrap failed. Once any errors above are resolved, you can run `algokit project bootstrap` in {current_working_directory}/myapp\n🙌 Project initialized at `myapp`! For template specific next steps, consult the documentation of your selected template 🧐\n"
  },
  {
    "path": "tests/localnet/__init__.py",
    "content": ""
  },
  {
    "path": "tests/localnet/conftest.py",
    "content": "import json\n\nimport pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.sandbox import ALGOD_HEALTH_URL, ALGORAND_IMAGE, INDEXER_HEALTH_URL, INDEXER_IMAGE\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.fixture(autouse=True)\ndef _algod_health_fast_timings(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.sandbox.DEFAULT_WAIT_FOR_ALGOD\", 0.1)\n    mocker.patch(\"algokit.core.sandbox.DEFAULT_WAIT_FOR_INDEXER\", 0.1)\n    mocker.patch(\"algokit.core.sandbox.DEFAULT_HEALTH_TIMEOUT\", 0.1)\n\n\n@pytest.fixture\ndef _health_success(httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_response(url=ALGOD_HEALTH_URL)\n    httpx_mock.add_response(url=INDEXER_HEALTH_URL)\n\n\n@pytest.fixture\ndef _localnet_up_to_date(proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    arg = \"{{range .RepoDigests}}{{println .}}{{end}}\"\n\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", ALGORAND_IMAGE, \"--format\", arg],\n        [\"tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\n\"],\n    )\n\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", INDEXER_IMAGE, \"--format\", arg],\n        [\"tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\\n\"],\n    )\n\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest\",\n        json={\n            \"digest\": \"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\",\n        },\n    )\n\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest\",\n        json={\n            \"digest\": \"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\n        },\n    )\n\n\n@pytest.fixture\ndef _mock_proc_with_running_localnet(proc_mock: ProcMock, app_dir_mock: AppDirs) -> None:\n    compose_file_path = str(app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\")\n    proc_mock.set_output(\n        \"docker compose ls --format json --filter name=algokit_sandbox*\",\n        [json.dumps([{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": compose_file_path}])],\n    )\n"
  },
  {
    "path": "tests/localnet/test_localnet.py",
    "content": "from approvaltests import verify\n\nfrom tests.utils.click_invoker import invoke\n\n\ndef test_localnet_help() -> None:\n    result = invoke(\"localnet -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/localnet/test_localnet.test_localnet_help.approved.txt",
    "content": "Usage: algokit localnet [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  codespace  Manage the AlgoKit LocalNet in GitHub Codespaces.\n  config     Configure the container engine for AlgoKit LocalNet.\n  console    Run the Algorand goal CLI against the AlgoKit LocalNet via a Bash\n             console so you can execute multiple goal commands and/or interact\n             with a filesystem.\n  explore    Explore the AlgoKit LocalNet using lora.\n  logs       See the output of the Docker containers.\n  reset      Reset the AlgoKit LocalNet.\n  start      Start the AlgoKit LocalNet.\n  status     Check the status of the AlgoKit LocalNet.\n  stop       Stop the AlgoKit LocalNet.\n"
  },
  {
    "path": "tests/localnet/test_localnet_codespace.py",
    "content": "import platform\nfrom subprocess import CompletedProcess\n\nimport pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\ndef test_install_gh_already_installed(mocker: MockerFixture, proc_mock: ProcMock) -> None:\n    proc_mock.set_output([\"gh\", \"--version\"], [\"some version\"])\n    mocker.patch(\"algokit.cli.codespace.authenticate_with_github\", return_value=False)\n    result = invoke(\"localnet codespace\")\n    assert result.exit_code == 0\n\n\ndef test_install_gh_not_installed_failed_install(mocker: MockerFixture, proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on([\"gh\", \"--version\"])\n    mocker.patch(\"algokit.cli.codespace.authenticate_with_github\", return_value=False)\n    mocker.patch(\"algokit.core.codespace.install_github_cli_via_webi\", side_effect=RuntimeError(\"Failed to install gh\"))\n    mocker.patch(\"algokit.core.codespace.is_windows\", side_effect=RuntimeError(\"Failed to install gh\"))\n    result = invoke(\"localnet codespace\")\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.mock_platform_system(\"Windows\")\ndef test_install_gh_windows(\n    mocker: MockerFixture, proc_mock: ProcMock, httpx_mock: HTTPXMock, tmp_path_factory: pytest.TempPathFactory\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    dummy_script_path = cwd / \"webi_dummy_installer.ps1\"\n    dummy_script_path.touch()\n\n    proc_mock.should_fail_on(\n        [\"gh\", \"--version\"],\n    )\n    proc_mock.set_output(\n        [\"powershell\", \"-command\", \"(Get-Variable PSVersionTable -ValueOnly).PSVersion\"], [\"PowerShell 7.2.1\"]\n    )\n    proc_mock.set_output(\n        [\n            \"powershell\",\n            \"-File\",\n            str(dummy_script_path),\n        ],\n        [\"installed gh!\"],\n    )\n    httpx_mock.add_response(url=\"https://webi.ms/gh\", text=\"\")\n    mocker.patch(\"algokit.cli.codespace.authenticate_with_github\", return_value=False)\n    temp_file_mock = mocker.MagicMock()\n    temp_file_mock.__enter__.return_value.name = str(dummy_script_path)\n    mocker.patch(\"tempfile.NamedTemporaryFile\", return_value=temp_file_mock)\n\n    result = invoke(\"localnet codespace\")\n    assert result.exit_code == 0\n\n    verify(result.output.replace(str(dummy_script_path), \"{dummy_script_path}\"))\n\n\n@pytest.mark.skipif(platform.system().lower() == \"windows\", reason=\"Test only runs on Unix systems\")\ndef test_install_gh_unix(\n    mocker: MockerFixture, proc_mock: ProcMock, httpx_mock: HTTPXMock, tmp_path_factory: pytest.TempPathFactory\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    dummy_script_path = cwd / \"webi_dummy_installer.sh\"\n    dummy_script_path.touch()\n    proc_mock.set_output([\"bash\", \"--version\"], [\"GNU bash, version 3.2.57(1)-release\"])\n    proc_mock.should_fail_on(\n        [\"gh\", \"--version\"],\n    )\n    proc_mock.set_output([\"bash\", str(dummy_script_path)], [\"installed gh!\"])\n    httpx_mock.add_response(url=\"https://webi.sh/gh\", text=\"\")\n    mocker.patch(\"algokit.cli.codespace.authenticate_with_github\", return_value=False)\n\n    temp_file_mock = mocker.MagicMock()\n    temp_file_mock.__enter__.return_value.name = str(cwd / \"webi_dummy_installer.sh\")\n    mocker.patch(\"tempfile.NamedTemporaryFile\", return_value=temp_file_mock)\n\n    result = invoke(\"localnet codespace\")\n    assert result.exit_code == 0\n    verify(result.output.replace(str(cwd), \"{cwd}\"))\n\n\ndef test_invalid_scope_auth(\n    mocker: MockerFixture, proc_mock: ProcMock, tmp_path_factory: pytest.TempPathFactory\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    dummy_script_path = cwd / \"webi_dummy_installer.sh\"\n    dummy_script_path.touch()\n    proc_mock.set_output(\n        [\"gh\", \"auth\", \"status\"],\n        [\n            \"\"\"\n  ✓ Logged in to github.com account aorumbayev (keyring)\n  - Active account: true\n  - Git operations protocol: https\n  - Token: gho_************************************\n  - Token scopes: 'read:org', 'repo', 'workflow'\n\"\"\"\n        ],\n    )\n    mocker.patch(\"algokit.core.proc.subprocess_run\").return_value = CompletedProcess(\n        args=[\"docker\", \"exec\"], returncode=0, stdout=\"logged in!\"\n    )\n    proc_mock.set_output(\n        [\n            \"gh\",\n            \"codespace\",\n            \"create\",\n            \"--repo\",\n            \"algorandfoundation/algokit-base-template\",\n            \"--display-name\",\n            \"sandbox\",\n            \"--machine\",\n            \"basicLinux32gb\",\n        ],\n        [],\n    )\n    proc_mock.set_output(\n        [\"gh\", \"codespace\", \"list\", \"--json\", \"displayName\", \"--json\", \"state\", \"--json\", \"name\"],\n        [\n            \"\"\"\n            [{\"displayName\":\"sandbox\",\"state\":\"Available\",\"name\":\"sandbox\"}]\n            \"\"\"\n        ],\n    )\n    proc_mock.set_output(\n        [\"gh\", \"codespace\", \"delete\", \"--codespace\", \"sandbox\", \"--force\"], [\"Deleted unused codespace\"]\n    )\n    mocker.patch(\"algokit.cli.codespace.forward_ports_for_codespace\", return_value=None)\n    mocker.patch(\"algokit.core.codespace.run_with_animation\")\n\n    result = invoke(\"localnet codespace -n sandbox --force\")\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/localnet/test_localnet_codespace.test_install_gh_not_installed_failed_install.approved.txt",
    "content": "DEBUG: Running 'gh --version' in '{current_working_directory}'\nInstalling gh...\nERROR: Failed to automatically install gh cli: Failed to install gh\nERROR: Please install `gh cli` manually by following official documentation at https://cli.github.com/\n"
  },
  {
    "path": "tests/localnet/test_localnet_codespace.test_install_gh_unix.approved.txt",
    "content": "DEBUG: Running 'gh --version' in '{current_working_directory}'\nInstalling gh...\nHTTP Request: GET https://webi.sh/gh \"HTTP/1.1 200 OK\"\nDEBUG: Running 'bash --version' in '{current_working_directory}'\nDEBUG: bash: GNU bash, version 3.2.57(1)-release\nDEBUG: Running 'bash {cwd}/webi_dummy_installer.sh' in '{current_working_directory}'\nDEBUG: bash: installed gh!\ngh installed successfully!\nWARNING: Restart your terminal to activate the `gh` CLI and re-run `algokit localnet codespace` to get started...\n"
  },
  {
    "path": "tests/localnet/test_localnet_codespace.test_install_gh_windows.approved.txt",
    "content": "DEBUG: Running 'gh --version' in '{current_working_directory}'\nInstalling gh...\nHTTP Request: GET https://webi.ms/gh \"HTTP/1.1 200 OK\"\nDEBUG: Running 'powershell -command (Get-Variable PSVersionTable -ValueOnly).PSVersion' in '{current_working_directory}'\nDEBUG: powershell: PowerShell 7.2.1\nDEBUG: Running 'powershell -File {dummy_script_path}' in '{current_working_directory}'\nDEBUG: powershell: installed gh!\ngh installed successfully!\nWARNING: Restart your terminal to activate the `gh` CLI and re-run `algokit localnet codespace` to get started...\n"
  },
  {
    "path": "tests/localnet/test_localnet_codespace.test_invalid_scope_auth.approved.txt",
    "content": "DEBUG: Running 'gh --version' in '{current_working_directory}'\nDEBUG: gh: STDOUT\nDEBUG: gh: STDERR\nDEBUG: Running 'gh auth status' in '{current_working_directory}'\nDEBUG: gh: \nDEBUG: gh: ✓ Logged in to github.com account aorumbayev (keyring)\nDEBUG: gh: - Active account: true\nDEBUG: gh: - Git operations protocol: https\nDEBUG: gh: - Token: gho_************************************\nDEBUG: gh: - Token scopes: 'read:org', 'repo', 'workflow'\nERROR: Required 'codespace' scope is missing. Please ensure you have the 'codespace' scope by running `gh auth refresh-token -s codespace`.\nDEBUG: Running 'gh auth login -s codespace' in '{current_working_directory}'\nLogged in to GitHub Codespace\nDEBUG: Running 'gh codespace create --repo algorandfoundation/algokit-base-template --display-name sandbox --machine basicLinux32gb --idle-timeout 240m' in '{current_working_directory}'\nWaiting for codespace sandbox to be ready...\nDEBUG: Running 'gh codespace list --json displayName --json state --json name' in '{current_working_directory}'\nDEBUG: gh: \nDEBUG: gh: [{\"displayName\":\"sandbox\",\"state\":\"Available\",\"name\":\"sandbox\"}]\nDEBUG: gh: \nCodespace sandbox is now ready.\nWARNING: Keep the terminal open during the LocalNet session. Terminating the session will delete the codespace instance.\nLocalNet started in GitHub Codespace\nExiting...\nWARNING: Deleting the `sandbox` codespace...\nDEBUG: Running 'gh codespace delete --codespace sandbox --force' in '{current_working_directory}'\nDEBUG: gh: Deleted unused codespace\n"
  },
  {
    "path": "tests/localnet/test_localnet_console.py",
    "content": "import json\nfrom subprocess import CompletedProcess\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.sandbox import get_algod_network_template, get_config_json, get_docker_compose_yml\nfrom tests.goal.test_goal import _normalize_output\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\")\ndef test_goal_console(\n    mocker: MockerFixture, tmp_path_factory: pytest.TempPathFactory, app_dir_mock: AppDirs, proc_mock: ProcMock\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    mocked_goal_mount = cwd / \"goal_mount\"\n    mocked_goal_mount.mkdir()\n    mocker.patch(\"algokit.cli.goal.get_volume_mount_path_local\").return_value = mocked_goal_mount\n\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(get_docker_compose_yml())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(get_algod_network_template())\n\n    mocker.patch(\"algokit.core.proc.subprocess_run\").return_value = CompletedProcess(\n        [\"docker\", \"exec\"], 0, \"STDOUT+STDERR\"\n    )\n    proc_mock.set_output(\n        cmd=[\"docker\", \"compose\", \"ps\", \"algod\", \"--format\", \"json\"],\n        output=[json.dumps([{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}])],\n    )\n\n    result = invoke(\"localnet console\")\n\n    assert result.exit_code == 0\n    verify(\n        _normalize_output(result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\"))\n    )\n"
  },
  {
    "path": "tests/localnet/test_localnet_console.test_goal_console.approved.txt",
    "content": "DEBUG: Running '{container_engine} compose version --format json' in '{current_working_directory}'\nDEBUG: {container_engine}: {\"version\": \"v2.5.0\"}\nDEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} version' in '{current_working_directory}'\nDEBUG: {container_engine}: STDOUT\nDEBUG: {container_engine}: STDERR\nDEBUG: Running '{container_engine} compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/{container_engine}-compose.yml\"}]\nDEBUG: Running '{container_engine} compose ps algod --format json' in '{app_config}/sandbox'\nDEBUG: {container_engine}: [{\"Name\": \"algokit_sandbox_algod\", \"State\": \"running\"}]\nOpening Bash console on the algod node; execute `exit` to return to original console\nDEBUG: Running '{container_engine} exec -it -w /root algokit_sandbox_algod bash' in '{current_working_directory}'\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.py",
    "content": "import json\n\nimport pytest\n\nfrom algokit.core.sandbox import get_algod_network_template, get_config_json, get_docker_compose_yml\nfrom tests import get_combined_verify_output\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_without_existing_sandbox(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(\n            result.output.replace(\"\\\\\\\\\", \"\\\\\")\n            .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n            .replace(\"\\\\\", \"/\"),\n            \"{app_config}/sandbox/docker-compose.yml\",\n            (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_with_existing_sandbox_with_out_of_date_config(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"out of date config\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(\"out of date config\")\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 0\n    verify(\n        \"\\n\".join(\n            [\n                result.output.replace(\"\\\\\\\\\", \"\\\\\")\n                .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n                .replace(\"\\\\\", \"/\"),\n                \"{app_config}/sandbox/docker-compose.yml\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n                \"{app_config}/sandbox/algod_config.json\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").read_text(),\n            ]\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_with_existing_sandbox_with_up_to_date_config(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(get_docker_compose_yml())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(get_algod_network_template())\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\")\ndef test_localnet_reset_with_named_sandbox_config(proc_mock: ProcMock, app_dir_mock: AppDirs) -> None:\n    compose_file_path = str(app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\")\n    proc_mock.set_output(\n        \"docker compose ls --format json --filter name=algokit_sandbox*\",\n        [json.dumps([{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": compose_file_path}])],\n    )\n    (app_dir_mock.app_config_dir / \"sandbox_test\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\").write_text(\n        get_docker_compose_yml(name=\"algokit_sandbox_test\")\n    )\n    (app_dir_mock.app_config_dir / \"sandbox_test\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox_test\" / \"algod_network_template.json\").write_text(\n        get_algod_network_template()\n    )\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\", \"_health_success\", \"_mock_proc_with_running_localnet\", \"_mock_proc_with_running_localnet\"\n)\ndef test_localnet_reset_with_existing_sandbox_with_up_to_date_config_with_pull(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(get_docker_compose_yml())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(get_algod_network_template())\n\n    result = invoke(\"localnet reset --update\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_without_docker(proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"docker compose version\")\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_without_docker_compose(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker compose version\")\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_reset_without_docker_engine_running(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker version\")\n\n    result = invoke(\"localnet reset\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_with_existing_sandbox_with_out_of_date_config.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nCleaning up the running AlgoKit LocalNet...\nDEBUG: Running 'docker compose down' in '{app_config}/sandbox'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nSyncing LocalNet configuration\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n\n{app_config}/sandbox/docker-compose.yml\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n\n{app_config}/sandbox/algod_config.json\n{ \"GossipFanout\": 1, \"EndpointAddress\": \"0.0.0.0:8080\", \"DNSBootstrapID\": \"\", \"IncomingConnectionsLimit\": 0, \"Archival\":true, \"isIndexerActive\":false, \"EnableDeveloperAPI\":true, \"EnablePrivateNetworkAccessHeader\":true}\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_with_existing_sandbox_with_up_to_date_config.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nCleaning up the running AlgoKit LocalNet...\nDEBUG: Running 'docker compose down' in '{app_config}/sandbox'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_with_existing_sandbox_with_up_to_date_config_with_pull.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nCleaning up the running AlgoKit LocalNet...\nDEBUG: Running 'docker compose down' in '{app_config}/sandbox'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nFetching any container updates from DockerHub...\nDEBUG: Running 'docker compose pull --ignore-pull-failures --quiet' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: Image version cache reset\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_with_named_sandbox_config.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox_test/docker-compose.yml\"}]\nCleaning up the running AlgoKit LocalNet...\nDEBUG: Running 'docker compose down' in '{app_config}/sandbox_test'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox_test'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_without_docker.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nError: Container engine not found; please install Docker or Podman and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_without_docker_compose.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine compose not found; please install Docker Compose or Podman Compose and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_without_docker_engine_running.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine isn't running; please start it.\n"
  },
  {
    "path": "tests/localnet/test_localnet_reset.test_localnet_reset_without_existing_sandbox.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Existing LocalNet not found; creating from scratch...\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n----\n{app_config}/sandbox/docker-compose.yml:\n----\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.py",
    "content": "import json\n\nimport httpx\nimport pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.sandbox import (\n    ALGOD_HEALTH_URL,\n    ALGORAND_IMAGE,\n    INDEXER_IMAGE,\n    get_algod_network_template,\n    get_config_json,\n    get_docker_compose_yml,\n)\nfrom tests import get_combined_verify_output\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.fixture\ndef _localnet_out_of_date(proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    arg = \"{{range .RepoDigests}}{{println .}}{{end}}\"\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", ALGORAND_IMAGE, \"--format\", arg],\n        [\"tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\\n\"],\n    )\n\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", INDEXER_IMAGE, \"--format\", arg],\n        [\"tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\n\"],\n    )\n\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest\",\n        json={\n            \"digest\": \"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\",\n        },\n    )\n\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest\",\n        json={\n            \"digest\": \"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\n        },\n    )\n\n\n@pytest.fixture\ndef _localnet_img_check_cmd_error(\n    proc_mock: ProcMock,\n    httpx_mock: HTTPXMock,\n) -> None:\n    arg = \"{{range .RepoDigests}}{{println .}}{{end}}\"\n    proc_mock.should_fail_on([\"docker\", \"image\", \"inspect\", ALGORAND_IMAGE, \"--format\", arg])\n    proc_mock.should_fail_on([\"docker\", \"image\", \"inspect\", INDEXER_IMAGE, \"--format\", arg])\n\n    httpx_mock.add_exception(\n        httpx.RemoteProtocolError(\"No response\"),\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest\",\n    )\n\n    httpx_mock.add_exception(\n        httpx.RemoteProtocolError(\"No response\"),\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest\",\n    )\n\n\n@pytest.mark.usefixtures(\"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(\n            result.output.replace(\"\\\\\\\\\", \"\\\\\")\n            .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n            .replace(\"\\\\\", \"/\"),\n            \"{app_config}/sandbox/docker-compose.yml\",\n            (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\")\ndef test_localnet_start_with_name(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\n        \"docker compose ls --format json --filter name=algokit_sandbox*\",\n        [\n            json.dumps(\n                [\n                    {\n                        \"Name\": \"algokit_sandbox_test\",\n                        \"Status\": \"running\",\n                        \"ConfigFiles\": str(app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\"),\n                    }\n                ]\n            )\n        ],\n    )\n    result = invoke(\"localnet start --name test\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(\n            result.output.replace(\"\\\\\\\\\", \"\\\\\")\n            .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n            .replace(\"\\\\\", \"/\"),\n            \"{app_config}/sandbox_test/docker-compose.yml\",\n            (app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\").read_text(),\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_health_failure(app_dir_mock: AppDirs, httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_exception(httpx.RemoteProtocolError(\"No response\"), url=ALGOD_HEALTH_URL)\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(\n            result.output.replace(\"\\\\\\\\\", \"\\\\\")\n            .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n            .replace(\"\\\\\", \"/\"),\n            \"{app_config}/sandbox/docker-compose.yml\",\n            (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_health_bad_status(app_dir_mock: AppDirs, httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_response(status_code=500, url=ALGOD_HEALTH_URL)\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(\n            result.output.replace(\"\\\\\\\\\", \"\\\\\")\n            .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n            .replace(\"\\\\\", \"/\"),\n            \"{app_config}/sandbox/docker-compose.yml\",\n            (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_failure(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker compose up\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_up_to_date_definition(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(get_docker_compose_yml())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(get_config_json())\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(get_algod_network_template())\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_out_of_date_definition(app_dir_mock: AppDirs, mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.sandbox.ComposeSandbox.is_algod_dev_mode\", return_value=True)\n\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"out of date config\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(\"out of date config\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").write_text(\"out of date config\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        \"\\n\".join(\n            [\n                result.output.replace(\"\\\\\\\\\", \"\\\\\")\n                .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n                .replace(\"\\\\\", \"/\"),\n                \"{app_config}/sandbox/docker-compose.yml\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n                \"{app_config}/sandbox/algod_config.json\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").read_text(),\n                \"{app_config}/sandbox/algod_network_template.json\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\").read_text(),\n            ]\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_out_of_date_definition_and_missing_config(app_dir_mock: AppDirs, mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.sandbox.ComposeSandbox.is_algod_dev_mode\", return_value=True)\n\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"out of date config\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        \"\\n\".join(\n            [\n                result.output.replace(\"\\\\\\\\\", \"\\\\\")\n                .replace(str(app_dir_mock.app_config_dir), \"{app_config}\")\n                .replace(\"\\\\\", \"/\"),\n                \"{app_config}/sandbox/docker-compose.yml\",\n                (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").read_text(),\n            ]\n        )\n    )\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_without_docker(proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"docker compose version\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_without_docker_compose(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker compose version\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_without_docker_engine_running(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker version\")\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_with_old_docker_compose_version(proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\"docker compose version --format json\", [json.dumps({\"version\": \"v2.2.1\"})])\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_with_unparseable_docker_compose_version(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\"docker compose version --format json\", [json.dumps({\"version\": \"v2.5-dev123\"})])\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_with_gitpod_docker_compose_version(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    proc_mock.set_output(\"docker compose version --format json\", [json.dumps({\"version\": \"v2.10.0-gitpod.0\"})])\n\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_out_of_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_out_date(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\n    \"proc_mock\", \"_health_success\", \"_localnet_img_check_cmd_error\", \"_mock_proc_with_running_localnet\"\n)\ndef test_localnet_img_check_cmd_error(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet start\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_with_custom_config_dir(tmp_path_factory: pytest.TempPathFactory) -> None:\n    custom_config_dir = tmp_path_factory.mktemp(\"custom_config\")\n    config_dir = str(custom_config_dir.absolute()).replace(\"\\\\\", \"/\")\n    result = invoke(f\"localnet start --config-dir {config_dir}\")\n\n    assert result.exit_code == 0\n    assert custom_config_dir.exists()\n    assert (custom_config_dir / \"sandbox\").exists()\n    assert (custom_config_dir / \"sandbox\" / \"docker-compose.yml\").exists()\n    assert (custom_config_dir / \"sandbox\" / \"algod_network_template.json\").exists()\n    assert (custom_config_dir / \"sandbox\" / \"algod_config.json\").exists()\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_health_success\", \"_localnet_up_to_date\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_start_with_no_dev_mode(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet start --no-dev\")\n\n    assert result.exit_code == 0\n    # Verify that DevMode is set to false in the algod_network_template.json\n    network_template = json.loads(\n        (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_network_template.json\")\n        .read_text()\n        .replace(\"NUM_ROUNDS\", '\"NUM_ROUNDS\"')\n    )\n    assert not network_template[\"Genesis\"][\"DevMode\"]\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_img_check_cmd_error.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: Failed to get local image versions: No such file or directory: docker\nDEBUG: Error checking image status: No response\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: Failed to get local image versions: No such file or directory: docker\nDEBUG: Error checking image status: No response\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n----\n{app_config}/sandbox/docker-compose.yml:\n----\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_failure.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nError: Failed to start LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_health_bad_status.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 500 Internal Server Error\"\nDEBUG: AlgoKit LocalNet health check returned 500, waiting\nWARNING: AlgoKit LocalNet failed to return a successful health check\n----\n{app_config}/sandbox/docker-compose.yml:\n----\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_health_failure.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nDEBUG: AlgoKit LocalNet health request failed for algod\nWARNING: AlgoKit LocalNet failed to return a successful health check\n----\n{app_config}/sandbox/docker-compose.yml:\n----\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_out_date.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nWARNING: indexer has a new version available, run `algokit localnet reset --update` to get the latest version\nWARNING: algod has a new version available, run `algokit localnet reset --update` to get the latest version\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_out_of_date_definition.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nWARNING: LocalNet definition is out of date; please run `algokit localnet reset`\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n\n{app_config}/sandbox/docker-compose.yml\nout of date config\n{app_config}/sandbox/algod_config.json\nout of date config\n{app_config}/sandbox/algod_network_template.json\nout of date config\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_out_of_date_definition_and_missing_config.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nWARNING: LocalNet definition is out of date; please run `algokit localnet reset`\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n\n{app_config}/sandbox/docker-compose.yml\nout of date config\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_up_to_date_definition.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not require updating\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_custom_config_dir.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"test/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nDEBUG: HTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nDEBUG: HTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{custom_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nDEBUG: HTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n----\n{custom_config}/sandbox/docker-compose.yml:\n----\nname: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 4003:8081\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n    healthcheck:\n      test: [\"CMD-SHELL\", \"pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB\"]\n      interval: 1s\n      timeout: 5s\n      retries: 10\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_gitpod_docker_compose_version.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.10.0-gitpod.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_name.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox_test\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox_test/docker-compose.yml\"}]\nDEBUG: The sandbox_test directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nThe named LocalNet configuration has been created in {app_config}/sandbox_test. \nYou can edit the configuration by changing those files. Running `algokit localnet reset` will ensure the configuration is applied\nA named LocalNet is running, update checks are disabled. If you wish to synchronize with the latest version, run `algokit localnet reset --update`\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox_test'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n----\n{app_config}/sandbox_test/docker-compose.yml:\n----\nname: \"algokit_sandbox_test\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_test_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_test_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_test_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_test_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_no_dev_mode.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"test/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nDEBUG: HTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nDEBUG: HTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nRefreshed 'DevMode' flag to 'False'\nWould you like to restart 'LocalNet' to apply 'DevMode' flag set to 'False'? Otherwise, the next `algokit localnet reset` will restart with the new flag [Y/n]: \nCleaning up the running AlgoKit LocalNet...\nDEBUG: Running 'docker compose down' in '{app_config}/sandbox'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nDEBUG: HTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n----\n{app_config}/sandbox/algod_network_template.json:\n----\n{\n    \"Genesis\": {\n      \"NetworkName\": \"followermodenet\",\n      \"RewardsPoolBalance\": 0,\n      \"FirstPartKeyRound\": 0,\n      \"LastPartKeyRound\": NUM_ROUNDS,\n      \"Wallets\": [\n        {\n          \"Name\": \"Wallet1\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet2\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet3\",\n          \"Stake\": 20,\n          \"Online\": true\n        }\n      ],\n      \"DevMode\": false\n    },\n    \"Nodes\": [\n      {\n        \"Name\": \"data\",\n        \"IsRelay\": true,\n        \"Wallets\": [\n          {\n            \"Name\": \"Wallet1\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet2\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet3\",\n            \"ParticipationOnly\": false\n          }\n        ]\n      },\n      {\n        \"Name\": \"follower\",\n        \"IsRelay\": false,\n        \"ConfigJSONOverride\":\n        \"{\\\"EnableFollowMode\\\":true,\\\"EndpointAddress\\\":\\\"0.0.0.0:8081\\\",\\\"MaxAcctLookback\\\":64,\\\"CatchupParallelBlocks\\\":64,\\\"CatchupBlockValidateMode\\\":3}\"\n      }\n    ]\n  }\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_old_docker_compose_version.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.2.1\"}\nError: Minimum compose version supported: v2.5.0, installed = v2.2.1\nPlease update your compose install\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_with_unparseable_docker_compose_version.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5-dev123\"}\nWARNING: Unable to extract compose version from output: \n{\"version\": \"v2.5-dev123\"}\nPlease ensure a minimum of compose v2.5.0 is used\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: LocalNet compose file does not exist yet; writing it out for the first time\nStarting AlgoKit LocalNet now...\nDEBUG: Running 'docker compose up --detach --quiet-pull --wait' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nDEBUG: AlgoKit LocalNet started, waiting for health check\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, algod is ready\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: AlgoKit LocalNet health check successful, indexer is ready\nStarted; execute `algokit explore` to explore LocalNet in a web user interface.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_without_docker.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nError: Container engine not found; please install Docker or Podman and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_without_docker_compose.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine compose not found; please install Docker Compose or Podman Compose and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_start.test_localnet_start_without_docker_engine_running.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine isn't running; please start it.\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.py",
    "content": "import copy\nimport json\nfrom typing import TypedDict\n\nimport httpx\nimport pytest\nfrom pytest_httpx import HTTPXMock\n\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_successful(app_dir_mock: AppDirs, proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/v2/status\", json={\"last-round\": 1, \"time-since-last-round\": 15.3 * 1e9}\n    )\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/versions\",\n        json={\n            \"genesis_id\": \"{genesis_id}\",\n            \"genesis_hash_b64\": \"{genesis_hash_b64}\",\n            \"build\": {\"major\": 1, \"minor\": 2, \"build_number\": 1},\n        },\n    )\n    httpx_mock.add_response(\n        url=\"http://localhost:8980/health\", json={\"round\": 1, \"errors\": [\"error\"], \"version\": \"v1.0\"}\n    )\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_http_error(app_dir_mock: AppDirs, proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/v2/status\", json={\"last-round\": 1, \"time-since-last-round\": 15.3 * 1e9}\n    )\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/versions\",\n        json={\n            \"genesis_id\": \"{genesis_id}\",\n            \"genesis_hash_b64\": \"{genesis_hash_b64}\",\n            \"build\": {\"major\": 1, \"minor\": 2, \"build_number\": 1},\n        },\n    )\n    httpx_mock.add_exception(httpx.ReadTimeout(\"Unable to read within timeout\"))\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_unexpected_port(app_dir_mock: AppDirs, proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/v2/status\",\n        json={\"last-round\": 1, \"time-since-last-round\": 15.3 * 1e9},\n    )\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/versions\",\n        json={\n            \"genesis_id\": \"{genesis_id}\",\n            \"genesis_hash_b64\": \"{genesis_hash_b64}\",\n            \"build\": {\"major\": 1, \"minor\": 2, \"build_number\": 1},\n        },\n    )\n\n    unexpected_port_compose_ps_output = copy.deepcopy(compose_ps_output)\n    # Change the indexer configuration to use a different port\n    unexpected_port_compose_ps_output[2][\"Publishers\"][0][\"TargetPort\"] = 1234\n    unexpected_port_compose_ps_output[2][\"Publishers\"][0][\"PublishedPort\"] = 1234\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(unexpected_port_compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_service_not_started(app_dir_mock: AppDirs, proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    httpx_mock.add_response(\n        url=\"http://localhost:8980/health\", json={\"round\": 1, \"errors\": [\"error\"], \"version\": \"v1.0\"}\n    )\n\n    service_not_started_compose_ps_output = copy.deepcopy(compose_ps_output)\n    # Change the algod state to stopped\n    service_not_started_compose_ps_output[0][\"State\"] = \"stopped\"\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(service_not_started_compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_docker_error(app_dir_mock: AppDirs, proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/v2/status\", json={\"last-round\": 1, \"time-since-last-round\": 15.3 * 1e9}\n    )\n    httpx_mock.add_response(\n        url=\"http://localhost:4001/versions\",\n        json={\n            \"genesis_id\": \"{genesis_id}\",\n            \"genesis_hash_b64\": \"{genesis_hash_b64}\",\n            \"build\": {\"major\": 1, \"minor\": 2, \"build_number\": 1},\n        },\n    )\n\n    docker_error_compose_ps_output = copy.deepcopy(compose_ps_output)\n    # Remove indexer publisher to create an error state\n    docker_error_compose_ps_output[2][\"Publishers\"] = []\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(docker_error_compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_missing_service(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n\n    # Change to keep algod and indexerdb\n    missing_service_compose_ps_output = [compose_ps_output[0].copy(), compose_ps_output[3].copy()]\n\n    proc_mock.set_output(\n        \"docker compose ps --format json\",\n        [json.dumps(missing_service_compose_ps_output)],\n    )\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_failure(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n    proc_mock.set_output(\"docker compose ps --format json\", output=[json.dumps([])])\n\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_mock_proc_with_running_localnet\", \"_localnet_up_to_date\")\ndef test_localnet_status_no_existing_definition(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_status_without_docker(proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"docker compose version\")\n\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_status_without_docker_compose(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker compose version\")\n\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_status_without_docker_engine_running(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker version\")\n\n    result = invoke(\"localnet status\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\nclass DockerServicePublisher(TypedDict):\n    URL: str\n    TargetPort: int\n    PublishedPort: int\n    Protocol: str\n\n\nclass DockerServiceInfo(TypedDict):\n    ID: str\n    Name: str\n    Image: str\n    Command: str\n    Project: str\n    Service: str\n    Created: int\n    State: str\n    Status: str\n    Health: str\n    ExitCode: int\n    Publishers: list[DockerServicePublisher]\n\n\ncompose_ps_output: list[DockerServiceInfo] = [\n    {\n        \"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\",\n        \"Name\": \"algokit_algod\",\n        \"Image\": \"algorand/algod:latest\",\n        \"Command\": \"/node/run/run.sh\",\n        \"Project\": \"algokit_sandbox\",\n        \"Service\": \"algod\",\n        \"Created\": 1701664778,\n        \"State\": \"running\",\n        \"Status\": \"\",\n        \"Health\": \"\",\n        \"ExitCode\": 0,\n        \"Publishers\": [\n            {\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"},\n            {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"},\n            {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"},\n            {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"},\n            {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"},\n        ],\n    },\n    {\n        \"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\",\n        \"Name\": \"algokit_conduit\",\n        \"Image\": \"algorandfoundation/conduit-localnet:latest\",\n        \"Command\": \"docker-entrypoint.sh\",\n        \"Project\": \"algokit_sandbox\",\n        \"Service\": \"conduit\",\n        \"Created\": 1701664778,\n        \"State\": \"running\",\n        \"Status\": \"\",\n        \"Health\": \"\",\n        \"ExitCode\": 0,\n        \"Publishers\": [],\n    },\n    {\n        \"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\",\n        \"Name\": \"algokit_sandbox-indexer-1\",\n        \"Image\": \"algorand/indexer:latest\",\n        \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\",\n        \"Project\": \"algokit_sandbox\",\n        \"Service\": \"indexer\",\n        \"Created\": 1701664778,\n        \"State\": \"running\",\n        \"Status\": \"\",\n        \"Health\": \"\",\n        \"ExitCode\": 0,\n        \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 8980, \"PublishedPort\": 8980, \"Protocol\": \"tcp\"}],\n    },\n    {\n        \"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\",\n        \"Name\": \"algokit_postgres\",\n        \"Image\": \"postgres:13-alpine\",\n        \"Command\": \"docker-entrypoint.sh postgres\",\n        \"Project\": \"algokit_sandbox\",\n        \"Service\": \"indexer-db\",\n        \"Created\": 1701664778,\n        \"State\": \"running\",\n        \"Status\": \"\",\n        \"Health\": \"\",\n        \"ExitCode\": 0,\n        \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}],\n    },\n]\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_docker_error.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\", \"Name\": \"algokit_conduit\", \"Image\": \"algorandfoundation/conduit-localnet:latest\", \"Command\": \"docker-entrypoint.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"conduit\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\", \"Name\": \"algokit_sandbox-indexer-1\", \"Image\": \"algorand/indexer:latest\", \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nHTTP Request: GET http://localhost:4001/versions \"HTTP/1.1 200 OK\"\n# algod status\nStatus: Running\nPort: 4001\nLast round: 1\nTime since last round: 15.3s\nGenesis ID: {genesis_id}\nGenesis hash: {genesis_hash_b64}\nVersion: 1.2.1\n# conduit status\nStatus: Running\n# indexer-db status\nStatus: Running\n# indexer status\nStatus: Error\nError: At least one container isn't running; execute `algokit localnet start` to start the LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_failure.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: []\nError: LocalNet has not been initialized yet, please run 'algokit localnet start'\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_http_error.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\", \"Name\": \"algokit_conduit\", \"Image\": \"algorandfoundation/conduit-localnet:latest\", \"Command\": \"docker-entrypoint.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"conduit\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\", \"Name\": \"algokit_sandbox-indexer-1\", \"Image\": \"algorand/indexer:latest\", \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 8980, \"PublishedPort\": 8980, \"Protocol\": \"tcp\"}]}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nHTTP Request: GET http://localhost:4001/versions \"HTTP/1.1 200 OK\"\nDEBUG: Error checking indexer status: Unable to read within timeout\n# algod status\nStatus: Running\nPort: 4001\nLast round: 1\nTime since last round: 15.3s\nGenesis ID: {genesis_id}\nGenesis hash: {genesis_hash_b64}\nVersion: 1.2.1\n# conduit status\nStatus: Running\n# indexer-db status\nStatus: Running\n# indexer status\nStatus: Error\nError: At least one container isn't running; execute `algokit localnet start` to start the LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_missing_service.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nError: LocalNet has not been initialized yet, please run 'algokit localnet start'\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_no_existing_definition.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: LocalNet has not been initialized yet, please run 'algokit localnet start'\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_service_not_started.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"stopped\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\", \"Name\": \"algokit_conduit\", \"Image\": \"algorandfoundation/conduit-localnet:latest\", \"Command\": \"docker-entrypoint.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"conduit\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\", \"Name\": \"algokit_sandbox-indexer-1\", \"Image\": \"algorand/indexer:latest\", \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 8980, \"PublishedPort\": 8980, \"Protocol\": \"tcp\"}]}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: http://localhost:8980/health response: {'round': 1, 'errors': ['error'], 'version': 'v1.0'}\n# algod status\nStatus: Not running\n# conduit status\nStatus: Running\n# indexer-db status\nStatus: Running\n# indexer status\nStatus: Running\nPort: 8980\nLast round: 1\nVersion: v1.0\nError: At least one container isn't running; execute `algokit localnet start` to start the LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_successful.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\", \"Name\": \"algokit_conduit\", \"Image\": \"algorandfoundation/conduit-localnet:latest\", \"Command\": \"docker-entrypoint.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"conduit\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\", \"Name\": \"algokit_sandbox-indexer-1\", \"Image\": \"algorand/indexer:latest\", \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 8980, \"PublishedPort\": 8980, \"Protocol\": \"tcp\"}]}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nHTTP Request: GET http://localhost:4001/versions \"HTTP/1.1 200 OK\"\nHTTP Request: GET http://localhost:8980/health \"HTTP/1.1 200 OK\"\nDEBUG: http://localhost:8980/health response: {'round': 1, 'errors': ['error'], 'version': 'v1.0'}\n# algod status\nStatus: Running\nPort: 4001\nLast round: 1\nTime since last round: 15.3s\nGenesis ID: {genesis_id}\nGenesis hash: {genesis_hash_b64}\nVersion: 1.2.1\n# conduit status\nStatus: Running\n# indexer-db status\nStatus: Running\n# indexer status\nStatus: Running\nPort: 8980\nLast round: 1\nVersion: v1.0\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_unexpected_port.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: Running 'docker image inspect algorand/indexer:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest \"HTTP/1.1 200 OK\"\nDEBUG: Running 'docker image inspect algorand/algod:latest --format {{range .RepoDigests}}{{println .}}{{end}}' in '{current_working_directory}'\nDEBUG: docker: tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nHTTP Request: GET https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest \"HTTP/1.1 200 OK\"\n# container engine\nName: docker (change with `algokit config container-engine`)\nDEBUG: Running 'docker compose ps --format json' in '{app_config}/sandbox'\nDEBUG: docker: [{\"ID\": \"e900c9dfe5e4676ca7fb3ac38cbee366ca5429ae447222282b64c059f5727a47\", \"Name\": \"algokit_algod\", \"Image\": \"algorand/algod:latest\", \"Command\": \"/node/run/run.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"algod\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"\", \"TargetPort\": 4160, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 7833, \"PublishedPort\": 4002, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 8080, \"PublishedPort\": 4001, \"Protocol\": \"tcp\"}, {\"URL\": \"\", \"TargetPort\": 9100, \"PublishedPort\": 0, \"Protocol\": \"tcp\"}, {\"URL\": \"0.0.0.0\", \"TargetPort\": 9392, \"PublishedPort\": 9392, \"Protocol\": \"tcp\"}]}, {\"ID\": \"2ba986bf8539527dbc1f2c3e9d8f83e834099ffea30d31f341691b172748464f\", \"Name\": \"algokit_conduit\", \"Image\": \"algorandfoundation/conduit-localnet:latest\", \"Command\": \"docker-entrypoint.sh\", \"Project\": \"algokit_sandbox\", \"Service\": \"conduit\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": []}, {\"ID\": \"fa5b36dddbd112eb8b52ccd4de7db47c55ad49124b0483896a23f6727335cb3d\", \"Name\": \"algokit_sandbox-indexer-1\", \"Image\": \"algorand/indexer:latest\", \"Command\": \"docker-entrypoint.sh daemon --enable-all-parameters\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 1234, \"PublishedPort\": 1234, \"Protocol\": \"tcp\"}]}, {\"ID\": \"f3a0bf6fe1e1fcbff96b88f39e30bcadab4c1792234c970d654b7a34fb71e1d7\", \"Name\": \"algokit_postgres\", \"Image\": \"postgres:13-alpine\", \"Command\": \"docker-entrypoint.sh postgres\", \"Project\": \"algokit_sandbox\", \"Service\": \"indexer-db\", \"Created\": 1701664778, \"State\": \"running\", \"Status\": \"\", \"Health\": \"\", \"ExitCode\": 0, \"Publishers\": [{\"URL\": \"0.0.0.0\", \"TargetPort\": 5432, \"PublishedPort\": 5443, \"Protocol\": \"tcp\"}]}]\nHTTP Request: GET http://localhost:4001/v2/status \"HTTP/1.1 200 OK\"\nHTTP Request: GET http://localhost:4001/versions \"HTTP/1.1 200 OK\"\n# algod status\nStatus: Running\nPort: 4001\nLast round: 1\nTime since last round: 15.3s\nGenesis ID: {genesis_id}\nGenesis hash: {genesis_hash_b64}\nVersion: 1.2.1\n# conduit status\nStatus: Running\n# indexer-db status\nStatus: Running\n# indexer status\nStatus: Error\nError: At least one container isn't running; execute `algokit localnet start` to start the LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_without_docker.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nError: Container engine not found; please install Docker or Podman and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_without_docker_compose.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine compose not found; please install Docker Compose or Podman Compose and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_status.test_localnet_status_without_docker_engine_running.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine isn't running; please start it.\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.py",
    "content": "import json\n\nimport pytest\n\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_stop(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(\"existing\")\n\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\ndef test_localnet_stop_with_name(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox_test\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\").write_text(\"existing\")\n    (app_dir_mock.app_config_dir / \"sandbox_test\" / \"algod_config.json\").write_text(\"existing\")\n    proc_mock.set_output(\n        \"docker compose ls --format json --filter name=algokit_sandbox*\",\n        [\n            json.dumps(\n                [\n                    {\n                        \"Name\": \"algokit_sandbox_test\",\n                        \"Status\": \"running\",\n                        \"ConfigFiles\": str(app_dir_mock.app_config_dir / \"sandbox_test\" / \"docker-compose.yml\"),\n                    }\n                ]\n            )\n        ],\n    )\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"_mock_proc_with_running_localnet\")\ndef test_localnet_stop_failure(app_dir_mock: AppDirs, proc_mock: ProcMock) -> None:\n    (app_dir_mock.app_config_dir / \"sandbox\").mkdir()\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"docker-compose.yml\").write_text(\"existing\")\n    (app_dir_mock.app_config_dir / \"sandbox\" / \"algod_config.json\").write_text(\"existing\")\n    proc_mock.should_bad_exit_on(\"docker compose stop\")\n\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 1\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"proc_mock\", \"_mock_proc_with_running_localnet\")\ndef test_localnet_stop_no_existing_definition(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 0\n    verify(\n        result.output.replace(\"\\\\\\\\\", \"\\\\\").replace(str(app_dir_mock.app_config_dir), \"{app_config}\").replace(\"\\\\\", \"/\")\n    )\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_stop_without_docker(proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"docker compose version\")\n\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_stop_without_docker_compose(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker compose version\")\n\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_localnet_stop_without_docker_engine_running(proc_mock: ProcMock) -> None:\n    proc_mock.should_bad_exit_on(\"docker version\")\n\n    result = invoke(\"localnet stop\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nStopping AlgoKit LocalNet now...\nDEBUG: Running 'docker compose stop' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nLocalNet Stopped; execute `algokit localnet start` to start it again.\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_failure.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nStopping AlgoKit LocalNet now...\nDEBUG: Running 'docker compose stop' in '{app_config}/sandbox'\ndocker: STDOUT\ndocker: STDERR\nError: Failed to stop LocalNet\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_no_existing_definition.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox/docker-compose.yml\"}]\nDEBUG: The sandbox directory does not exist yet; creating it\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_with_name.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nDEBUG: Running 'docker compose ls --format json --filter name=algokit_sandbox*' in '{current_working_directory}'\nDEBUG: docker: [{\"Name\": \"algokit_sandbox_test\", \"Status\": \"running\", \"ConfigFiles\": \"{app_config}/sandbox_test/docker-compose.yml\"}]\nStopping AlgoKit LocalNet now...\nDEBUG: Running 'docker compose stop' in '{app_config}/sandbox_test'\ndocker: STDOUT\ndocker: STDERR\nLocalNet Stopped; execute `algokit localnet start` to start it again.\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_without_docker.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nError: Container engine not found; please install Docker or Podman and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_without_docker_compose.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine compose not found; please install Docker Compose or Podman Compose and add to path.\n"
  },
  {
    "path": "tests/localnet/test_localnet_stop.test_localnet_stop_without_docker_engine_running.approved.txt",
    "content": "DEBUG: Running 'docker compose version --format json' in '{current_working_directory}'\nDEBUG: docker: {\"version\": \"v2.5.0\"}\nDEBUG: Running 'docker version' in '{current_working_directory}'\nDEBUG: docker: STDOUT\nDEBUG: docker: STDERR\nError: Container engine isn't running; please start it.\n"
  },
  {
    "path": "tests/localnet/test_sandbox.py",
    "content": "import json\nimport time\n\nimport pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.sandbox import (\n    ALGORAND_IMAGE,\n    IMAGE_VERSION_CHECK_INTERVAL,\n    INDEXER_IMAGE,\n    ComposeSandbox,\n    _get_image_version_cache,\n    _get_image_version_cache_path,\n    _update_image_version_cache,\n    get_algod_network_template,\n    get_conduit_yaml,\n    get_config_json,\n    get_docker_compose_yml,\n)\nfrom tests.utils.approvals import verify\nfrom tests.utils.proc_mock import ProcMock\n\n\ndef test_get_config_json() -> None:\n    config_json = json.loads(get_config_json())\n    verify(json.dumps(config_json, indent=2))\n\n\ndef test_get_conduit_yaml() -> None:\n    conduit_yaml = get_conduit_yaml()\n    verify(conduit_yaml)\n\n\ndef test_get_docker_compose_yml() -> None:\n    docker_compose_yml = get_docker_compose_yml()\n    verify(docker_compose_yml)\n\n\ndef test_algod_network_template_json() -> None:\n    algod_network_template_json = get_algod_network_template()\n    verify(algod_network_template_json)\n\n\n@pytest.fixture\ndef _mock_image_check_responses(proc_mock: ProcMock, httpx_mock: HTTPXMock) -> None:\n    \"\"\"Mock the docker and HTTP responses needed for image version checks.\"\"\"\n    arg = \"{{range .RepoDigests}}{{println .}}{{end}}\"\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", ALGORAND_IMAGE, \"--format\", arg],\n        [\"tag@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\n\"],\n    )\n    proc_mock.set_output(\n        [\"docker\", \"image\", \"inspect\", INDEXER_IMAGE, \"--format\", arg],\n        [\"tag@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\\n\"],\n    )\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/indexer/tags/latest\",\n        json={\"digest\": \"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\"},\n    )\n    httpx_mock.add_response(\n        url=\"https://registry.hub.docker.com/v2/repositories/algorand/algod/tags/latest\",\n        json={\"digest\": \"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"},\n    )\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_image_check_responses\")\ndef test_check_docker_compose_for_new_image_versions_no_cache(proc_mock: ProcMock) -> None:\n    \"\"\"Should check versions when cache file doesn't exist.\"\"\"\n    cache_path = _get_image_version_cache_path()\n    if cache_path.exists():\n        cache_path.unlink()\n\n    sandbox = ComposeSandbox()\n    sandbox.check_docker_compose_for_new_image_versions()\n\n    assert cache_path.exists()\n    # Verify check was run\n    assert any(\"image\" in call.command and \"inspect\" in call.command for call in proc_mock.called)\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_check_docker_compose_for_new_image_versions_cache_fresh_shows_warnings(\n    proc_mock: ProcMock, caplog: pytest.LogCaptureFixture\n) -> None:\n    \"\"\"Should skip registry check but show warnings when cache indicates images are outdated.\"\"\"\n    _update_image_version_cache(indexer_outdated=True, algod_outdated=True)\n\n    sandbox = ComposeSandbox()\n    sandbox.check_docker_compose_for_new_image_versions()\n\n    # Verify registry check was skipped\n    assert not any(\"image\" in call.command and \"inspect\" in call.command for call in proc_mock.called)\n    # Verify warnings were shown based on cached state\n    assert \"indexer has a new version available\" in caplog.text\n    assert \"algod has a new version available\" in caplog.text\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_check_docker_compose_for_new_image_versions_cache_fresh_no_warnings_when_up_to_date(\n    proc_mock: ProcMock, caplog: pytest.LogCaptureFixture\n) -> None:\n    \"\"\"Should skip registry check and not show warnings when cache indicates images are up to date.\"\"\"\n    _update_image_version_cache(indexer_outdated=False, algod_outdated=False)\n\n    sandbox = ComposeSandbox()\n    sandbox.check_docker_compose_for_new_image_versions()\n\n    # Verify registry check was skipped\n    assert not any(\"image\" in call.command and \"inspect\" in call.command for call in proc_mock.called)\n    # Verify no warnings were shown\n    assert \"indexer has a new version available\" not in caplog.text\n    assert \"algod has a new version available\" not in caplog.text\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_image_check_responses\")\ndef test_check_docker_compose_for_new_image_versions_cache_expired(proc_mock: ProcMock, mocker: MockerFixture) -> None:\n    \"\"\"Should check versions when cache is expired.\"\"\"\n    _update_image_version_cache(indexer_outdated=False, algod_outdated=False)\n    # Mock time to be past the cache interval\n    mocker.patch(\"algokit.core.sandbox.time.time\", return_value=time.time() + IMAGE_VERSION_CHECK_INTERVAL + 1)\n\n    sandbox = ComposeSandbox()\n    sandbox.check_docker_compose_for_new_image_versions()\n\n    # Verify check was run\n    assert any(\"image\" in call.command and \"inspect\" in call.command for call in proc_mock.called)\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\")\ndef test_get_image_version_cache_returns_stored_state() -> None:\n    \"\"\"Should return the stored state from the cache.\"\"\"\n    _update_image_version_cache(indexer_outdated=True, algod_outdated=False)\n\n    cached_state = _get_image_version_cache()\n\n    assert cached_state is not None\n    assert cached_state.indexer_outdated is True\n    assert cached_state.algod_outdated is False\n\n\n@pytest.mark.use_real_image_version_cache\n@pytest.mark.usefixtures(\"app_dir_mock\", \"_mock_image_check_responses\")\ndef test_check_docker_compose_for_new_image_versions_force_bypasses_cache(proc_mock: ProcMock) -> None:\n    \"\"\"Should check versions when force=True even if cache is fresh.\"\"\"\n    # Set up fresh cache indicating images are up to date\n    _update_image_version_cache(indexer_outdated=False, algod_outdated=False)\n\n    sandbox = ComposeSandbox()\n    sandbox.check_docker_compose_for_new_image_versions(force=True)\n\n    # Verify check was run despite fresh cache\n    assert any(\"image\" in call.command and \"inspect\" in call.command for call in proc_mock.called)\n"
  },
  {
    "path": "tests/localnet/test_sandbox.test_algod_network_template_json.approved.txt",
    "content": "{\n    \"Genesis\": {\n      \"NetworkName\": \"followermodenet\",\n      \"RewardsPoolBalance\": 0,\n      \"FirstPartKeyRound\": 0,\n      \"LastPartKeyRound\": NUM_ROUNDS,\n      \"Wallets\": [\n        {\n          \"Name\": \"Wallet1\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet2\",\n          \"Stake\": 40,\n          \"Online\": true\n        },\n        {\n          \"Name\": \"Wallet3\",\n          \"Stake\": 20,\n          \"Online\": true\n        }\n      ],\n      \"DevMode\": true\n    },\n    \"Nodes\": [\n      {\n        \"Name\": \"data\",\n        \"IsRelay\": true,\n        \"Wallets\": [\n          {\n            \"Name\": \"Wallet1\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet2\",\n            \"ParticipationOnly\": false\n          },\n          {\n            \"Name\": \"Wallet3\",\n            \"ParticipationOnly\": false\n          }\n        ]\n      },\n      {\n        \"Name\": \"follower\",\n        \"IsRelay\": false,\n        \"ConfigJSONOverride\":\n        \"{\\\"EnableFollowMode\\\":true,\\\"EndpointAddress\\\":\\\"0.0.0.0:8081\\\",\\\"MaxAcctLookback\\\":64,\\\"CatchupParallelBlocks\\\":64,\\\"CatchupBlockValidateMode\\\":3}\"\n      }\n    ]\n  }\n"
  },
  {
    "path": "tests/localnet/test_sandbox.test_get_conduit_yaml.approved.txt",
    "content": "# Log verbosity: PANIC, FATAL, ERROR, WARN, INFO, DEBUG, TRACE\nlog-level: INFO\n\n# If no log file is provided logs are written to stdout.\n#log-file:\n\n# Number of retries to perform after a pipeline plugin error.\nretry-count: 10\n\n# Time duration to wait between retry attempts.\nretry-delay: \"5s\"\n\n# Optional filepath to use for pidfile.\n#pid-filepath: /path/to/pidfile\n\n# Whether or not to print the conduit banner on startup.\nhide-banner: false\n\n# When enabled prometheus metrics are available on '/metrics'\nmetrics:\n  mode: OFF\n  addr: \":9999\"\n  prefix: \"conduit\"\n\n# The importer is typically an algod follower node.\nimporter:\n  name: localnet_algod\n  config:\n    lead-node-url: \"http://algod:8080\"\n    follower-node-url: \"http://algod:8081\"\n    token: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n# Zero or more processors may be defined to manipulate what data\n# reaches the exporter.\nprocessors:\n\n# An exporter is defined to do something with the data.\nexporter:\n  name: postgresql\n  config:\n    # Pgsql connection string\n    # See https://github.com/jackc/pgconn for more details\n    connection-string: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb\"\n\n    # Maximum connection number for connection pool\n    # This means the total number of active queries that can be running\n    # concurrently can never be more than this\n    max-conn: 20\n"
  },
  {
    "path": "tests/localnet/test_sandbox.test_get_config_json.approved.txt",
    "content": "{\n  \"GossipFanout\": 1,\n  \"EndpointAddress\": \"0.0.0.0:8080\",\n  \"DNSBootstrapID\": \"\",\n  \"IncomingConnectionsLimit\": 0,\n  \"Archival\": true,\n  \"isIndexerActive\": false,\n  \"EnableDeveloperAPI\": true,\n  \"EnablePrivateNetworkAccessHeader\": true\n}\n"
  },
  {
    "path": "tests/localnet/test_sandbox.test_get_docker_compose_yml.approved.txt",
    "content": "name: \"algokit_sandbox\"\n\nservices:\n  algod:\n    container_name: \"algokit_sandbox_algod\"\n    image: algorand/algod:latest\n    ports:\n      - 4001:8080\n      - 4002:7833\n      - 9392:9392\n    environment:\n      START_KMD: 1\n      KMD_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      ADMIN_TOKEN: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n      GOSSIP_PORT: 10000\n    init: true\n    volumes:\n      - type: bind\n        source: ./algod_config.json\n        target: /etc/algorand/config.json\n      - type: bind\n        source: ./algod_network_template.json\n        target: /etc/algorand/template.json\n      - ./goal_mount:/root/goal_mount\n\n  conduit:\n    container_name: \"algokit_sandbox_conduit\"\n    image: algorandfoundation/conduit-localnet:latest\n    restart: unless-stopped\n    volumes:\n      - type: bind\n        source: ./conduit.yml\n        target: /etc/algorand/conduit.yml\n    depends_on:\n      - indexer-db\n      - algod\n\n  indexer-db:\n    container_name: \"algokit_sandbox_postgres\"\n    image: postgres:16-alpine\n    ports:\n      - 5443:5432\n    user: postgres\n    environment:\n      POSTGRES_USER: algorand\n      POSTGRES_PASSWORD: algorand\n      POSTGRES_DB: indexerdb\n\n  indexer:\n    container_name: \"algokit_sandbox_indexer\"\n    image: algorand/indexer:latest\n    ports:\n      - 8980:8980\n    restart: unless-stopped\n    command: daemon --enable-all-parameters\n    environment:\n      INDEXER_POSTGRES_CONNECTION_STRING: \"host=indexer-db port=5432 user=algorand password=algorand dbname=indexerdb sslmode=disable\"\n    depends_on:\n      - conduit\n"
  },
  {
    "path": "tests/portability/test_pyinstaller_binary.py",
    "content": "import logging\nimport subprocess\nimport sys\nimport time\nfrom os import environ\nfrom pathlib import Path\n\nimport pytest\n\nalgokit = \"algokit\"\nlogger = logging.getLogger(__name__)\npytestmark = pytest.mark.pyinstaller_binary_tests\n\n\ndef command_str_to_list(command: str) -> list[str]:\n    return command.split(\" \")\n\n\n@pytest.mark.parametrize(\n    (\"command\", \"exit_codes\"),\n    [\n        (command_str_to_list(\"algokit --help\"), [0]),\n        (command_str_to_list(\"algokit doctor\"), [0]),\n        (command_str_to_list(\"algokit task vanity-address PY\"), [0]),\n    ],\n)\ndef test_non_interactive_algokit_commands(\n    command: list[str], exit_codes: list[int], tmp_path_factory: pytest.TempPathFactory\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    # Create a 'playground' directory\n    if \"build\" in command:\n        cwd = cwd / \"playground\"\n        cwd.mkdir(exist_ok=True)\n\n    execution_result = subprocess.run(command, capture_output=True, text=True, check=False, cwd=cwd)\n    logger.info(f\"Command {command} returned {execution_result.stdout}\")\n\n    # Parts of doctor will fail in CI on macOS and windows on github actions since docker isn't available by default\n    if \"doctor\" in command and sys.platform in [\"darwin\", \"windows\", \"win32\"] and environ.get(\"CI\"):\n        exit_codes.append(1)\n\n    assert execution_result.returncode in exit_codes, f\"Command {command} failed with {execution_result.stderr}\"\n\n\ndef test_algokit_init_and_project_run(tmp_path_factory: pytest.TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    # Run algokit init\n    init_command = command_str_to_list(\"algokit init --name playground -t python --no-git --no-ide --defaults\")\n    init_result = subprocess.run(init_command, capture_output=True, text=True, check=False, cwd=cwd)\n    logger.info(f\"Command {init_command} returned {init_result.stdout}\")\n    assert init_result.returncode == 0, f\"Init command failed with {init_result.stderr}\"\n\n    # Run algokit project run build\n    build_cwd = cwd / \"playground\"\n    build_cwd.mkdir(exist_ok=True)\n    build_command = command_str_to_list(\"algokit -v project run build -- hello_world\")\n    build_result = subprocess.run(build_command, capture_output=True, text=True, check=False, cwd=build_cwd)\n    logger.info(f\"Command {build_command} returned {build_result.stdout}\")\n    assert build_result.returncode == 0, f\"Build command failed with {build_result.stderr}\"\n\n\ndef test_algokit_init_with_template_url(\n    dummy_algokit_template_with_python_task: dict[str, Path],\n) -> None:\n    # TODO: revisit to improve\n    # currently we are passing non default option --no-workspace to avoid creating a workspace since its a dummy\n    # template. To cover and test workspace creation on real templates, we need to find a way to have `algokit`\n    # available globally within the worker running the binary IF the template defined custom copier tasks that invoke\n    # global `algokit` executable as part of instantiation of child template (for example fullstack).\n    command = command_str_to_list(\n        \"init --name testproject \"\n        \"--UNSAFE-SECURITY-accept-template-url \"\n        f\"--template-url {dummy_algokit_template_with_python_task['template_path']} \"\n        \"--template-url-ref=HEAD --no-git --no-ide --defaults --no-workspace\"\n    )\n\n    process = subprocess.Popen(\n        [algokit, *command],\n        stdin=subprocess.PIPE,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.STDOUT,\n        text=True,\n        cwd=dummy_algokit_template_with_python_task[\"cwd\"],\n    )\n\n    full_output = \"\"\n    logger.info(f\"Running command: {' '.join([algokit, *command])}\")\n    while process.poll() is None and process.stdout and process.stdin:\n        output = process.stdout.readline()\n        full_output += output  # Accumulate the output\n        logger.debug(output.strip())  # Log each line of stdout in real-time\n\n        if \"y/n\" in output.lower():  # adjust this as needed based on the exact prompt text\n            answer = \"y\\n\"\n            process.stdin.write(answer)\n            process.stdin.flush()\n\n        time.sleep(0.1)\n\n    # After the process ends, log the full stdout\n    logger.info(f\"Command init returned:\\n{full_output}\")\n    logger.error(process.stderr)\n    assert process.returncode == 0, f\"Command init failed with {process.stderr}\"\n"
  },
  {
    "path": "tests/project/__init__.py",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/__init__.py",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap.py",
    "content": "from tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef test_bootstrap_help() -> None:\n    result = invoke(\"project bootstrap -h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap.test_bootstrap_help.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nUsage: algokit project bootstrap [OPTIONS] COMMAND [ARGS]...\n\n  Expedited initial setup for any developer by installing and configuring\n  dependencies and other key development environment setup activities.\n\nOptions:\n  --force     Continue even if minimum AlgoKit version is not met\n  -h, --help  Show this message and exit.\n\nCommands:\n  all     Runs all bootstrap sub-commands in the current directory and immediate\n          sub directories.\n  env     Copies .env.template file to .env in the current working directory and\n          prompts for any unspecified values.\n  npm     Runs `npm install` in the current working directory to install Node.js\n          dependencies.\n  pnpm    Runs `pnpm install` in the current working directory to install\n          Node.js dependencies.\n  poetry  Installs Python Poetry (if not present) and runs `poetry install` in\n          the current working directory to install Python dependencies.\n  uv      Installs UV (if not present) and runs `uv sync` in the current working\n          directory to install Python dependencies.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.py",
    "content": "from pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.conf import ALGOKIT_CONFIG, get_current_package_version\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef _setup_workspace(cwd: Path) -> None:\n    \"\"\"\n    Sets up the workspace configuration.\n    \"\"\"\n    algokit_config_path = cwd / \".algokit.toml\"\n    algokit_config_path.write_text(\n        \"\"\"\n        [project]\n        type = \"workspace\"\n        projects_root_dir = 'artifacts'\n        \"\"\"\n    )\n\n\ndef _setup_standalone_project(cwd: Path, project_name: str, project_type: str) -> None:\n    \"\"\"\n    Sets up a standalone project of a specified type within the workspace.\n    \"\"\"\n    project_dir = cwd / \"artifacts\" / project_name\n    project_dir.mkdir(parents=True)\n    project_config_path = project_dir / \".algokit.toml\"\n    project_config_path.write_text(\n        f\"\"\"\n        [project]\n        type = \"{project_type}\"\n        name = \"{project_name}\"\n        \"\"\"\n    )\n    (project_dir / \".env.template\").touch()\n    if project_type == \"contract\":\n        (project_dir / \"poetry.toml\").touch()\n    elif project_type == \"frontend\":\n        (project_dir / \"package.json\").touch()\n\n\ndef test_bootstrap_all_empty(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_all_algokit_min_version(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    current_version = get_current_package_version()\n    (cwd / ALGOKIT_CONFIG).write_text('[algokit]\\nmin_version = \"999.99.99\"\\n')\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output.replace(current_version, \"{current_version}\"))\n\n\ndef test_bootstrap_all_algokit_min_version_ignore_error(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    current_version = get_current_package_version()\n    (cwd / ALGOKIT_CONFIG).write_text('[algokit]\\nmin_version = \"999.99.99\"\\n')\n    result = invoke(\n        \"project bootstrap --force all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output.replace(current_version, \"{current_version}\"))\n\n\ndef test_bootstrap_all_env(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env.template\").touch()\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_poetry(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    # Mock global preference to use Poetry for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"poetry.toml\").touch()\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_all_npm(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest, mocker: MockerFixture\n) -> None:\n    # Mock global preference to use npm for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=\"npm\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\n        \"project bootstrap all --interactive\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_poetry_via_pyproject(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    # Mock global preference to use Poetry for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"pyproject.toml\").write_text(\"[tool.poetry]\", encoding=\"utf-8\")\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_all_skip_dirs(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".venv\").mkdir()\n    (cwd / \"__pycache__\").mkdir()\n    (cwd / \"node_modules\").mkdir()\n    (cwd / \"file.txt\").touch()\n    (cwd / \"empty_dir\").mkdir()\n    (cwd / \"boring_dir\").mkdir()\n    (cwd / \"boring_dir\" / \"file.txt\").touch()\n    (cwd / \"double_nested_dir\").mkdir()\n    (cwd / \"double_nested_dir\" / \"nest1\").mkdir()\n    (cwd / \"double_nested_dir\" / \"nest2\").mkdir()\n    (cwd / \"double_nested_dir\" / \"nest2\" / \"file.txt\").touch()\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_sub_dir(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    # Mock global preference to use Poetry for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"empty_dir\").mkdir()\n    (cwd / \"live_dir\").mkdir()\n    (cwd / \"live_dir\" / \".env.template\").touch()\n    (cwd / \"live_dir\" / \"poetry.toml\").touch()\n\n    result = invoke(\n        \"project bootstrap all\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_projects_name_filter(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    # Mock global preference to use Poetry for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    _setup_workspace(cwd)\n    _setup_standalone_project(cwd, \"project_1\", \"contract\")\n    result = invoke(\"project bootstrap all --project-name project_1\", cwd=cwd)\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_projects_name_filter_not_found(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    _setup_workspace(cwd)\n    _setup_standalone_project(cwd, \"project_1\", \"contract\")\n    result = invoke(\"project bootstrap all --project-name project_2\", cwd=cwd)\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_projects_type_filter(tmp_path_factory: TempPathFactory, mocker: MockerFixture) -> None:\n    # Mock global preferences for this test\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\")\n    mocker.patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=\"npm\")\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    _setup_workspace(cwd)\n    _setup_standalone_project(cwd, \"project_1\", \"contract\")\n    _setup_standalone_project(cwd, \"project_2\", \"contract\")\n    _setup_standalone_project(cwd, \"project_3\", \"contract\")\n    _setup_standalone_project(cwd, \"project_4\", \"frontend\")\n\n    result = invoke(\"project bootstrap all --type frontend --interactive\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output.replace(\".cmd\", \"\"))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_all_projects_type_filter_not_found(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    _setup_workspace(cwd)\n    _setup_standalone_project(cwd, \"project_1\", \"contract\")\n    _setup_standalone_project(cwd, \"project_2\", \"contract\")\n    _setup_standalone_project(cwd, \"project_3\", \"contract\")\n\n    result = invoke(\"project bootstrap all --type frontend\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_algokit_min_version.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: This template requires AlgoKit version 999.99.99 or higher, but you have AlgoKit version {current_version}. Please update AlgoKit.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_algokit_min_version_ignore_error.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nWARNING: This template requires AlgoKit version 999.99.99 or higher, but you have AlgoKit version {current_version}. Please update AlgoKit.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_empty.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_env.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Running `algokit project bootstrap env`\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\nDEBUG: Skipping {current_working_directory}/.env\nDEBUG: Skipping {current_working_directory}/.env.template\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_npm[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_npm[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_npm[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm.cmd install' in '{current_working_directory}'\nnpm.cmd: STDOUT\nnpm.cmd: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_poetry.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Skipping {current_working_directory}/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_poetry_via_pyproject.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_filter.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project1/.algokit.toml\nDEBUG: Checking {current_working_directory}/artifacts/project1 for bootstrapping needs\nDEBUG: Running `algokit bootstrap env`\nDEBUG: {current_working_directory}/artifacts/project1/.env doesn't exist yet\nDEBUG: {current_working_directory}/artifacts/project1/.env.template exists\nCopying {current_working_directory}/artifacts/project1/.env.template to {current_working_directory}/artifacts/project1/.env and prompting for empty values\nDEBUG: Running `algokit bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}/artifacts/project1'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Skipping {current_working_directory}/artifacts/project1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project1/.env\nDEBUG: Skipping {current_working_directory}/artifacts/project1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project1/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_filter_not_found.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project1/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_name_filter.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Checking {current_working_directory}/artifacts/project_1 for bootstrapping needs\nDEBUG: Running `algokit project bootstrap env`\nDEBUG: {current_working_directory}/artifacts/project_1/.env doesn't exist yet\nDEBUG: {current_working_directory}/artifacts/project_1/.env.template exists\nCopying {current_working_directory}/artifacts/project_1/.env.template to {current_working_directory}/artifacts/project_1/.env and prompting for empty values\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}/artifacts/project_1'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.env\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_name_filter_not_found.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_type_filter.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/poetry.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_2/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/poetry.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_3/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/poetry.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_4/.algokit.toml\nDEBUG: Checking {current_working_directory}/artifacts/project_4 for bootstrapping needs\nDEBUG: Running `algokit project bootstrap env`\nDEBUG: {current_working_directory}/artifacts/project_4/.env doesn't exist yet\nDEBUG: {current_working_directory}/artifacts/project_4/.env.template exists\nCopying {current_working_directory}/artifacts/project_4/.env.template to {current_working_directory}/artifacts/project_4/.env and prompting for empty values\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_4/.algokit.toml\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}/artifacts/project_4'\nnpm: STDOUT\nnpm: STDERR\nDEBUG: Skipping {current_working_directory}/artifacts/project_4/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_4/.env\nDEBUG: Skipping {current_working_directory}/artifacts/project_4/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_4/package.json\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_projects_type_filter_not_found.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/artifacts for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_1/poetry.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_2/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_2/poetry.toml\nDEBUG: Attempting to load project config from {current_working_directory}/artifacts/project_3/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/.algokit.toml\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/.env.template\nDEBUG: Skipping {current_working_directory}/artifacts/project_3/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_skip_dirs.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Skipping {current_working_directory}/.venv\nDEBUG: Skipping {current_working_directory}/__pycache__\nDEBUG: Attempting to load project config from {current_working_directory}/boring_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/boring_dir for bootstrapping needs\nDEBUG: Skipping {current_working_directory}/boring_dir/file.txt\nDEBUG: Attempting to load project config from {current_working_directory}/double_nested_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/double_nested_dir for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/double_nested_dir/nest1/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/double_nested_dir/nest1 for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/double_nested_dir/nest2/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/double_nested_dir/nest2 for bootstrapping needs\nDEBUG: Skipping {current_working_directory}/double_nested_dir/nest2/file.txt\nDEBUG: Attempting to load project config from {current_working_directory}/empty_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/empty_dir for bootstrapping needs\nDEBUG: Skipping {current_working_directory}/file.txt\nDEBUG: Skipping {current_working_directory}/node_modules\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_all.test_bootstrap_all_sub_dir.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/empty_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/empty_dir for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/live_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory}/live_dir for bootstrapping needs\nDEBUG: Running `algokit project bootstrap env`\nDEBUG: {current_working_directory}/live_dir/.env doesn't exist yet\nDEBUG: {current_working_directory}/live_dir/.env.template exists\nCopying {current_working_directory}/live_dir/.env.template to {current_working_directory}/live_dir/.env and prompting for empty values\nDEBUG: Attempting to load project config from {current_working_directory}/live_dir/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}/live_dir'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Skipping {current_working_directory}/live_dir/.env\nDEBUG: Skipping {current_working_directory}/live_dir/.env.template\nDEBUG: Skipping {current_working_directory}/live_dir/poetry.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.py",
    "content": "import click\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.namer import NamerFactory\nfrom approvaltests.scrubbers.scrubbers import Scrubber\nfrom prompt_toolkit.input import PipeInput\n\nfrom tests import get_combined_verify_output\nfrom tests.utils.approvals import TokenScrubber, combine_scrubbers, verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef make_output_scrubber(**tokens: str) -> Scrubber:\n    return combine_scrubbers(click.unstyle, TokenScrubber(tokens=tokens))\n\n\ndef test_bootstrap_env_no_files(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_env_dotenv_exists(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env\").touch()\n    (cwd / \".env.template\").touch()\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\n@pytest.mark.parametrize(\n    \"env_file_name\",\n    [\n        \".env.localnet.template\",\n        \".env.template\",\n        \".env.localnet\",\n        \".env\",\n    ],\n)\ndef test_bootstrap_network_prefixed_envs(env_file_name: str, tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / env_file_name).touch()\n    if not env_file_name.endswith(\".template\"):\n        (cwd / f\"{env_file_name}.template\").touch()\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, options=NamerFactory.with_parameters(env_file_name))\n\n\ndef test_bootstrap_env_multiple_templates(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env.template\").touch()\n    (cwd / \".env.localnet.template\").touch()\n    (cwd / \".env.testnet.template\").touch()\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_env_dotenv_missing_template_exists(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env.template\").write_text(\"env_template_contents\")\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(get_combined_verify_output(result.output, \".env\", (cwd / \".env\").read_text(\"utf-8\")))\n\n\n@pytest.mark.usefixtures(\"mock_questionary_input\")\ndef test_bootstrap_env_dotenv_with_values(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env.template\").write_text(\n        \"\"\"\nTOKEN_1=123\n# comment for token 2 - you should enter a valid value\n# another comment\nTOKEN_2_WITH_MULTI_LINES_COMMENT=test\nTOKEN_3=test value with spaces\n\nTOKEN_4_WITH_NO_EQUALS_SIGN\n# another comment\nTOKEN_5_SPECIAL_CHAR=*\n\"\"\"\n    )\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(get_combined_verify_output(result.output, \".env\", (cwd / \".env\").read_text(\"utf-8\")))\n\n\n@pytest.mark.mock_platform_system(\"Darwin\")\ndef test_bootstrap_env_dotenv_different_prompt_scenarios(\n    tmp_path_factory: TempPathFactory, mock_questionary_input: PipeInput, monkeypatch: pytest.MonkeyPatch\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env.template\").write_text(\n        \"\"\"\nTOKEN_1=123\n\n# comment for token 2 - you should enter a valid value\n# another comment\nTOKEN_2_WITH_MULTI_LINES_COMMENT=\nTOKEN_3=test value\n\nTOKEN_4_WITH_SPACES =\nTOKEN_5_WITHOUT_COMMENT=\nTOKEN_WITH_NO_EQUALS_SIGN\n# another comment\nTOKEN_6_EMPTY_WITH_COMMENT=\nTOKEN_7_VALUE_WILL_BE_EMPTY=\nTOKEN_8 = value with spaces\nTOKEN_8_SPECIAL_CHAR=*\n\"\"\"\n    )\n    # remove ci flag from env (when running in github actions)\n    monkeypatch.delenv(\"CI\", raising=False)\n\n    # provide values for tokens\n    mock_questionary_input.send_text(\"test value for TOKEN_2_WITH_MULTI_LINES_COMMENT\")\n    mock_questionary_input.send_text(\"\\n\")  # enter\n    mock_questionary_input.send_text(\"test value for TOKEN_4_WITH_SPACES\")\n    mock_questionary_input.send_text(\"\\n\")  # enter\n    mock_questionary_input.send_text(\"test value for TOKEN_5_WITHOUT_COMMENT\")\n    mock_questionary_input.send_text(\"\\n\")  # enter\n    mock_questionary_input.send_text(\"test value for TOKEN_6_EMPTY_WITH_COMMENT\")\n    mock_questionary_input.send_text(\"\\n\")  # enter\n    mock_questionary_input.send_text(\"\")  # Empty value for TOKEN_7_VALUE_WILL_BE_EMPTY\n    mock_questionary_input.send_text(\"\\n\")  # enter\n\n    result = invoke(\n        \"project bootstrap env\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(\n        get_combined_verify_output(result.output, \".env\", (cwd / \".env\").read_text(\"utf-8\")),\n        scrubber=make_output_scrubber(),\n    )\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_dotenv_different_prompt_scenarios.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\n# comment for token 2 - you should enter a valid value\n# another comment\n\n? Please provide a value for TOKEN_2_WITH_MULTI_LINES_COMMENT:\n\n? Please provide a value for TOKEN_4_WITH_SPACES:\n\n? Please provide a value for TOKEN_5_WITHOUT_COMMENT:\n# another comment\n\n? Please provide a value for TOKEN_6_EMPTY_WITH_COMMENT:\n\n? Please provide a value for TOKEN_7_VALUE_WILL_BE_EMPTY:\n----\n.env:\n----\n\nTOKEN_1=123\n\n# comment for token 2 - you should enter a valid value\n# another comment\nTOKEN_2_WITH_MULTI_LINES_COMMENT=test value for TOKEN_2_WITH_MULTI_LINES_COMMENT\nTOKEN_3=test value\n\nTOKEN_4_WITH_SPACES=test value for TOKEN_4_WITH_SPACES\nTOKEN_5_WITHOUT_COMMENT=test value for TOKEN_5_WITHOUT_COMMENT\nTOKEN_WITH_NO_EQUALS_SIGN\n# another comment\nTOKEN_6_EMPTY_WITH_COMMENT=test value for TOKEN_6_EMPTY_WITH_COMMENT\nTOKEN_7_VALUE_WILL_BE_EMPTY=\nTOKEN_8 = value with spaces\nTOKEN_8_SPECIAL_CHAR=*\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_dotenv_exists.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n.env already exists; skipping bootstrap of .env\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_dotenv_missing_template_exists.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\n----\n.env:\n----\nenv_template_contents\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_dotenv_with_values.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\n----\n.env:\n----\n\nTOKEN_1=123\n# comment for token 2 - you should enter a valid value\n# another comment\nTOKEN_2_WITH_MULTI_LINES_COMMENT=test\nTOKEN_3=test value with spaces\n\nTOKEN_4_WITH_NO_EQUALS_SIGN\n# another comment\nTOKEN_5_SPECIAL_CHAR=*\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_multiple_templates.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env.localnet doesn't exist yet\nDEBUG: {current_working_directory}/.env.localnet.template exists\nCopying {current_working_directory}/.env.localnet.template to {current_working_directory}/.env.localnet and prompting for empty values\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\nDEBUG: {current_working_directory}/.env.testnet doesn't exist yet\nDEBUG: {current_working_directory}/.env.testnet.template exists\nCopying {current_working_directory}/.env.testnet.template to {current_working_directory}/.env.testnet and prompting for empty values\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_env_no_files.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nNo .env or .env.{network_name}.template files found; nothing to do here, skipping bootstrap.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_network_prefixed_env_dotenv_exists.approved.txt",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_network_prefixed_envs..env.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n.env already exists; skipping bootstrap of .env\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_network_prefixed_envs..env.localnet.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n.env.localnet already exists; skipping bootstrap of .env.localnet\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_network_prefixed_envs..env.localnet.template.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env.localnet doesn't exist yet\nDEBUG: {current_working_directory}/.env.localnet.template exists\nCopying {current_working_directory}/.env.localnet.template to {current_working_directory}/.env.localnet and prompting for empty values\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_env.test_bootstrap_network_prefixed_envs..env.template.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: {current_working_directory}/.env doesn't exist yet\nDEBUG: {current_working_directory}/.env.template exists\nCopying {current_working_directory}/.env.template to {current_working_directory}/.env and prompting for empty values\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.py",
    "content": "import pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\ndef test_bootstrap_npm_without_npm(\n    proc_mock: ProcMock, tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest, mock_platform_system: str\n) -> None:\n    proc_mock.should_fail_on(f\"npm{'.cmd' if mock_platform_system == 'Windows' else ''} install\")\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\n        \"project bootstrap npm --no-ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_npm_without_package_file(tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(\n        \"project bootstrap npm --no-ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\")\ndef test_bootstrap_npm_without_npm_and_package_file(\n    proc_mock: ProcMock, tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest\n) -> None:\n    proc_mock.should_fail_on(\"npm install\")\n    proc_mock.should_fail_on(\"npm.cmd install\")\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\n        \"project bootstrap npm --no-ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_npm_happy_path(tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\n        \"project bootstrap npm --no-ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_npm_ci_mode_with_lock_file(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n    (cwd / \"package-lock.json\").touch()\n\n    result = invoke(\n        \"project bootstrap npm --ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_npm_ci_mode_without_lock_file(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\n        \"project bootstrap npm --ci\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1  # Should fail when no package-lock.json exists\n    verify(result.output, namer=PyTestNamer(request))\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_with_lock_file[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm ci' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_with_lock_file[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm ci' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_with_lock_file[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm.cmd ci' in '{current_working_directory}'\nnpm.cmd: STDOUT\nnpm.cmd: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_without_lock_file[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nError: Cannot run `npm ci` because `package-lock.json` is missing. Please run `npm install` instead and commit it to your source control.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_without_lock_file[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nError: Cannot run `npm ci` because `package-lock.json` is missing. Please run `npm install` instead and commit it to your source control.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_ci_mode_without_lock_file[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nError: Cannot run `npm ci` because `package-lock.json` is missing. Please run `npm install` instead and commit it to your source control.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_happy_path[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_happy_path[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_happy_path[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm.cmd install' in '{current_working_directory}'\nnpm.cmd: STDOUT\nnpm.cmd: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nError: Failed to run `npm install` for {current_working_directory}/package.json. Is npm installed and available on PATH?\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nError: Failed to run `npm install` for {current_working_directory}/package.json. Is npm installed and available on PATH?\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling npm dependencies\nDEBUG: Running 'npm.cmd install' in '{current_working_directory}'\nError: Failed to run `npm.cmd install` for {current_working_directory}/package.json. Is npm installed and available on PATH?\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm_and_package_file[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm_and_package_file[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_npm_and_package_file[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_package_file[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_package_file[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_npm.test_bootstrap_npm_without_package_file[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of npm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.py",
    "content": "\"\"\"Focused tests for package manager selection logic in bootstrap.\nTests only the new configuration-driven behavior, not redundant scenarios.\n\"\"\"\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_respects_configured_package_managers(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest, mocker: MockerFixture\n) -> None:\n    \"\"\"Test that bootstrap respects user's configured package managers.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"pyproject.toml\").write_text('[project]\\nname = \"test\"\\nversion = \"0.1.0\"')\n    (cwd / \"package.json\").touch()\n    (cwd / \"pnpm-lock.yaml\").touch()  # Required for CI mode\n\n    # Mock user preferences\n    mocker.patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"uv\")\n    mocker.patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=\"pnpm\")\n\n    result = invoke(\"project bootstrap all\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_bootstrap_respects_configured_package_managers[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap uv`\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nDEBUG: uv: STDOUT\nDEBUG: uv: STDERR\nInstalling Python dependencies and setting up Python virtual environment via UV\nDEBUG: Running 'uv sync' in '{current_working_directory}'\nuv: STDOUT\nuv: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap pnpm`\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm install' in '{current_working_directory}'\npnpm: STDOUT\npnpm: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pnpm-lock.yaml\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_bootstrap_respects_configured_package_managers[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap uv`\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nDEBUG: uv: STDOUT\nDEBUG: uv: STDERR\nInstalling Python dependencies and setting up Python virtual environment via UV\nDEBUG: Running 'uv sync' in '{current_working_directory}'\nuv: STDOUT\nuv: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap pnpm`\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm install' in '{current_working_directory}'\npnpm: STDOUT\npnpm: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pnpm-lock.yaml\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_bootstrap_respects_configured_package_managers[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap uv`\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nDEBUG: uv: STDOUT\nDEBUG: uv: STDERR\nInstalling Python dependencies and setting up Python virtual environment via UV\nDEBUG: Running 'uv sync' in '{current_working_directory}'\nuv: STDOUT\nuv: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running `algokit project bootstrap pnpm`\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm.cmd install' in '{current_working_directory}'\npnpm.cmd: STDOUT\npnpm.cmd: STDERR\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pnpm-lock.yaml\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_interactive_prompt_fallback_with_preference_saving.approved.txt",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_project_override_takes_precedence_over_user_preference.approved.txt",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_project_override_takes_precedence_over_user_preference[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using Python package manager from .algokit.toml: poetry\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using JavaScript package manager from .algokit.toml: npm\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/package-lock.json\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_project_override_takes_precedence_over_user_preference[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using Python package manager from .algokit.toml: poetry\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using JavaScript package manager from .algokit.toml: npm\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm install' in '{current_working_directory}'\nnpm: STDOUT\nnpm: STDERR\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/package-lock.json\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_project_override_takes_precedence_over_user_preference[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No 'min_version' specified in .algokit.toml file.\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Checking {current_working_directory} for bootstrapping needs\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using Python package manager from .algokit.toml: poetry\nDEBUG: Running `algokit project bootstrap poetry`\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Using JavaScript package manager from .algokit.toml: npm\nDEBUG: Running `algokit project bootstrap npm`\nInstalling npm dependencies\nDEBUG: Running 'npm.cmd install' in '{current_working_directory}'\nnpm.cmd: STDOUT\nnpm.cmd: STDERR\nDEBUG: Skipping {current_working_directory}/.algokit.toml\nDEBUG: Skipping {current_working_directory}/package-lock.json\nDEBUG: Skipping {current_working_directory}/package.json\nDEBUG: Skipping {current_working_directory}/pyproject.toml\nFinished bootstrapping {current_working_directory}\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_package_manager_selection.test_smart_defaults_when_no_user_preference.approved.txt",
    "content": ""
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.py",
    "content": "\"\"\"Essential tests for pnpm bootstrap functionality.\nFocuses on critical scenarios: success, failure, and CI mode validation.\n\"\"\"\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\n@pytest.mark.usefixtures(\"mock_platform_system\", \"proc_mock\")\ndef test_bootstrap_pnpm_happy_path(tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest) -> None:\n    \"\"\"Test successful pnpm bootstrap.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\"project bootstrap pnpm --no-ci\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\ndef test_bootstrap_pnpm_without_package_file(tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest) -> None:\n    \"\"\"Test pnpm bootstrap when package.json doesn't exist.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    result = invoke(\"project bootstrap pnpm --no-ci\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\ndef test_bootstrap_pnpm_ci_mode_without_lock_file(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest\n) -> None:\n    \"\"\"Test pnpm bootstrap in CI mode without lock file fails appropriately.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"package.json\").touch()\n\n    result = invoke(\"project bootstrap pnpm --ci\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output, namer=PyTestNamer(request))\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.test_bootstrap_pnpm_ci_mode_without_lock_file.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling pnpm dependencies\nError: Cannot run in CI mode because `pnpm-lock.yaml` is missing. Please run `pnpm install` to generate the lockfile and commit it to your source control.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.test_bootstrap_pnpm_happy_path[linux].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm install' in '{current_working_directory}'\npnpm: STDOUT\npnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.test_bootstrap_pnpm_happy_path[macOS].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm install' in '{current_working_directory}'\npnpm: STDOUT\npnpm: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.test_bootstrap_pnpm_happy_path[windows].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nInstalling pnpm dependencies\nDEBUG: Running 'pnpm.cmd install' in '{current_working_directory}'\npnpm.cmd: STDOUT\npnpm.cmd: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_pnpm.test_bootstrap_pnpm_without_package_file.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n{current_working_directory}/package.json doesn't exist; nothing to do here, skipping bootstrap of pnpm\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.py",
    "content": "import sys\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom _pytest.fixtures import FixtureRequest\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom prompt_toolkit.input import PipeInput\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.fixture(scope=\"module\")\ndef python_base_executable() -> str:\n    from algokit.core.utils import get_base_python_path\n\n    value = get_base_python_path()\n    if value is None:\n        pytest.fail(\"Python base detection failed, this should work (even in CI)\")\n    return value\n\n\n@pytest.fixture\ndef system_python_paths(request: FixtureRequest, mocker: MockerFixture) -> MagicMock:\n    python_names: list[str] = getattr(request, \"param\", [])\n\n    def which(cmd: str) -> str | None:\n        if cmd in python_names:\n            return f\"/bin/{cmd}\"\n        return None\n\n    mock = mocker.patch(\"algokit.core.utils.which\")\n    mock.side_effect = which\n    return mock\n\n\ndef test_base_python_path(python_base_executable: str) -> None:\n    \"\"\"When running in a venv (expected test mode), we should be able to resolve to base python.\n    Otherwise, they should be the same\"\"\"\n    assert (python_base_executable == sys.executable) == (sys.prefix == sys.base_prefix)\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_poetry_with_poetry() -> None:\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_poetry_without_poetry(proc_mock: ProcMock, mock_questionary_input: PipeInput) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_bootstrap_poetry_without_poetry_failed_install(proc_mock: ProcMock, mock_questionary_input: PipeInput) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_bad_exit_on(\"pipx install poetry\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_bootstrap_poetry_without_poetry_failed_poetry_path(\n    proc_mock: ProcMock, mock_questionary_input: PipeInput\n) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_fail_on(\"poetry install\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.parametrize(\n    \"system_python_paths\",\n    [\n        pytest.param([], id=\"no_system_pythons\"),\n        pytest.param([\"python\"], id=\"python_only\"),\n        pytest.param([\"python3\"], id=\"python3_only\"),\n        pytest.param([\"python\", \"python3\"], id=\"python_and_python3\"),\n    ],\n    indirect=[\"system_python_paths\"],\n)\n@pytest.mark.usefixtures(\"system_python_paths\")\ndef test_bootstrap_poetry_without_poetry_or_pipx_path(\n    request: FixtureRequest,\n    proc_mock: ProcMock,\n    python_base_executable: str,\n    mock_questionary_input: PipeInput,\n) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_fail_on(\"pipx --version\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 0\n    verify(result.output.replace(python_base_executable, \"{python_base_executable}\"), namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"system_python_paths\")\ndef test_bootstrap_poetry_without_poetry_or_pipx_path_failed_install(\n    proc_mock: ProcMock, python_base_executable: str, mock_questionary_input: PipeInput\n) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_fail_on(\"pipx --version\")\n    proc_mock.should_bad_exit_on(f\"{python_base_executable} -m pipx install poetry\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 1\n    verify(result.output.replace(python_base_executable, \"{python_base_executable}\"))\n\n\n@pytest.mark.usefixtures(\"system_python_paths\")\ndef test_bootstrap_poetry_without_poetry_or_pipx_path_failed_poetry_path(\n    proc_mock: ProcMock, python_base_executable: str, mock_questionary_input: PipeInput\n) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_fail_on(\"pipx --version\")\n    proc_mock.should_fail_on(\"poetry install\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 1\n    verify(result.output.replace(python_base_executable, \"{python_base_executable}\"))\n\n\n@pytest.mark.usefixtures(\"system_python_paths\")\ndef test_bootstrap_poetry_without_poetry_or_pipx_path_or_pipx_module(\n    proc_mock: ProcMock, python_base_executable: str, mock_questionary_input: PipeInput\n) -> None:\n    proc_mock.should_fail_on(\"poetry --version\")\n    proc_mock.should_fail_on(\"pipx --version\")\n    proc_mock.should_bad_exit_on(f\"{python_base_executable} -m pipx --version\")\n    # Yes, install poetry\n    mock_questionary_input.send_text(\"Y\")\n\n    result = invoke(\"project bootstrap poetry\")\n\n    assert result.exit_code == 1\n    verify(result.output.replace(python_base_executable, \"{python_base_executable}\"))\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_with_poetry.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Running 'pipx install poetry' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_failed_install.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Running 'pipx install poetry' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nError: Unable to install poetry via pipx; please install poetry manually via https://python-poetry.org/docs/ and try `algokit project bootstrap poetry` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_failed_poetry_path.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nDEBUG: Running 'pipx install poetry' in '{current_working_directory}'\nDEBUG: pipx: STDOUT\nDEBUG: pipx: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\nError: Unable to access Poetry on PATH after installing it via pipx; check pipx installations are on your path by running `pipx ensurepath` and try `algokit project bootstrap poetry` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path[no_system_pythons].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '{python_base_executable} -m pipx --version' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nDEBUG: Running '{python_base_executable} -m pipx install poetry' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path[python3_only].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '/bin/python3 -m pipx --version' in '{current_working_directory}'\nDEBUG: /bin/python3: STDOUT\nDEBUG: /bin/python3: STDERR\nDEBUG: Running '/bin/python3 -m pipx install poetry' in '{current_working_directory}'\nDEBUG: /bin/python3: STDOUT\nDEBUG: /bin/python3: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path[python_and_python3].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '/bin/python3 -m pipx --version' in '{current_working_directory}'\nDEBUG: /bin/python3: STDOUT\nDEBUG: /bin/python3: STDERR\nDEBUG: Running '/bin/python3 -m pipx install poetry' in '{current_working_directory}'\nDEBUG: /bin/python3: STDOUT\nDEBUG: /bin/python3: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path[python_only].approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '/bin/python -m pipx --version' in '{current_working_directory}'\nDEBUG: /bin/python: STDOUT\nDEBUG: /bin/python: STDERR\nDEBUG: Running '/bin/python -m pipx install poetry' in '{current_working_directory}'\nDEBUG: /bin/python: STDOUT\nDEBUG: /bin/python: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\npoetry: STDOUT\npoetry: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path_failed_install.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '{python_base_executable} -m pipx --version' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nDEBUG: Running '{python_base_executable} -m pipx install poetry' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nError: Unable to install poetry via pipx; please install poetry manually via https://python-poetry.org/docs/ and try `algokit project bootstrap poetry` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path_failed_poetry_path.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '{python_base_executable} -m pipx --version' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nDEBUG: Running '{python_base_executable} -m pipx install poetry' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nInstalling Python dependencies and setting up Python virtual environment via Poetry\nDEBUG: Running 'poetry install' in '{current_working_directory}'\nError: Unable to access Poetry on PATH after installing it via pipx; check pipx installations are on your path by running `pipx ensurepath` and try `algokit project bootstrap poetry` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_poetry.test_bootstrap_poetry_without_poetry_or_pipx_path_or_pipx_module.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nPoetry not found; attempting to install it...\n? We couldn't find `poetry`; can we install it for you via pipx so we can install Python dependencies? (Y/n)\nDEBUG: Running 'pipx --version' in '{current_working_directory}'\nDEBUG: Running '{python_base_executable} -m pipx --version' in '{current_working_directory}'\nDEBUG: {python_base_executable}: STDOUT\nDEBUG: {python_base_executable}: STDERR\nError: Unable to find pipx install so that poetry can be installed; please install pipx via https://pypa.github.io/pipx/ and then try `algokit project bootstrap poetry` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_translation.py",
    "content": "\"\"\"Tests for package manager command translation during bootstrap.\"\"\"\n\nimport tempfile\nfrom pathlib import Path\n\nimport pytest\n\nfrom algokit.core.project.bootstrap import (\n    _translate_package_manager_in_toml,\n    _translate_single_command,\n)\n\n\n@pytest.mark.parametrize(\n    (\"cmd\", \"js_manager\", \"py_manager\", \"expected\"),\n    [\n        # JavaScript translations - compatible commands\n        (\"npm run build\", \"pnpm\", None, \"pnpm run build\"),\n        (\"npm install\", \"pnpm\", None, \"pnpm install\"),\n        (\"npm test\", \"pnpm\", None, \"pnpm test\"),\n        (\"npm start\", \"pnpm\", None, \"pnpm start\"),\n        (\"pnpm run build\", \"npm\", None, \"npm run build\"),\n        (\"pnpm install\", \"npm\", None, \"npm install\"),\n        (\"pnpm test\", \"npm\", None, \"npm test\"),\n        (\"pnpm start\", \"npm\", None, \"npm start\"),\n        # JavaScript incompatible commands (no translation)\n        (\"npm exec jest\", \"pnpm\", None, \"npm exec jest\"),  # Different behavior\n        (\"npx create-react-app\", \"pnpm\", None, \"npx create-react-app\"),  # Different behavior\n        (\"npm fund\", \"pnpm\", None, \"npm fund\"),  # No pnpm equivalent\n        (\"pnpm exec jest\", \"npm\", None, \"pnpm exec jest\"),  # Different behavior\n        (\"pnpm dlx create-react-app\", \"npm\", None, \"pnpm dlx create-react-app\"),  # No npm equivalent\n        # Python translations - compatible commands\n        (\"poetry install\", None, \"uv\", \"uv sync\"),\n        (\"poetry install --verbose\", None, \"uv\", \"uv sync --verbose\"),\n        (\"poetry run pytest\", None, \"uv\", \"uv run pytest\"),\n        (\"poetry add requests\", None, \"uv\", \"uv add requests\"),\n        (\"poetry remove django\", None, \"uv\", \"uv remove django\"),\n        (\"poetry lock\", None, \"uv\", \"uv lock\"),\n        (\"uv sync\", None, \"poetry\", \"poetry install\"),\n        (\"uv run python app.py\", None, \"poetry\", \"poetry run python app.py\"),\n        (\"uv add numpy\", None, \"poetry\", \"poetry add numpy\"),\n        # Python incompatible commands (no translation)\n        (\"poetry show\", None, \"uv\", \"poetry show\"),  # No equivalent\n        (\"poetry config\", None, \"uv\", \"poetry config\"),  # No equivalent\n        (\"uv pip install\", None, \"poetry\", \"uv pip install\"),  # No equivalent\n        # No translation cases\n        (\"npm run build\", None, None, \"npm run build\"),\n        (\"echo hello\", \"pnpm\", \"uv\", \"echo hello\"),\n    ],\n)\ndef test_translate_single_command(cmd: str, js_manager: str | None, py_manager: str | None, expected: str) -> None:\n    \"\"\"Test command translation logic.\"\"\"\n    assert _translate_single_command(cmd, js_manager, py_manager) == expected\n\n\ndef test_translate_toml_file() -> None:\n    \"\"\"Test that .algokit.toml file is correctly translated.\"\"\"\n    with tempfile.TemporaryDirectory() as tmp_dir:\n        project_dir = Path(tmp_dir)\n        toml_path = project_dir / \".algokit.toml\"\n\n        # Create initial .algokit.toml\n        toml_content = \"\"\"\n[project.run]\nbuild = { commands = [\"npm run build\", \"poetry run test\"] }\n\"\"\"\n        toml_path.write_text(toml_content)\n\n        # Translate\n        _translate_package_manager_in_toml(project_dir, \"pnpm\", \"uv\")\n\n        # Verify by checking the file content\n        result_content = toml_path.read_text()\n        assert '\"pnpm run build\"' in result_content\n        assert '\"uv run test\"' in result_content\n        assert '\"npm run build\"' not in result_content\n        assert '\"poetry run test\"' not in result_content\n\n\ndef test_translate_preserves_non_command_content() -> None:\n    \"\"\"Test that translation preserves other TOML content.\"\"\"\n    with tempfile.TemporaryDirectory() as tmp_dir:\n        project_dir = Path(tmp_dir)\n        toml_path = project_dir / \".algokit.toml\"\n\n        # Create .algokit.toml with various content\n        original = \"\"\"\n[algokit]\nmin_version = \"1.0.0\"\n\n[project]\nname = \"test\"\ntype = \"backend\"\n\n[project.run]\ndev = { commands = [\"npm start\"], description = \"Start dev\" }\n\"\"\"\n        toml_path.write_text(original)\n\n        # Translate\n        _translate_package_manager_in_toml(project_dir, \"pnpm\", None)\n\n        # Verify structure is preserved\n        result_content = toml_path.read_text()\n        # Check that non-command content is preserved\n        assert 'min_version = \"1.0.0\"' in result_content\n        assert 'name = \"test\"' in result_content\n        assert 'type = \"backend\"' in result_content\n        assert 'description = \"Start dev\"' in result_content\n        # Check that command was translated\n        assert '\"pnpm start\"' in result_content\n        assert '\"npm start\"' not in result_content\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_uv.py",
    "content": "\"\"\"Essential tests for uv bootstrap functionality.\nFocuses on key scenarios: happy path, missing uv, and poetry migration.\n\"\"\"\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom approvaltests.pytest.py_test_namer import PyTestNamer\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_uv_happy_path(tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest) -> None:\n    \"\"\"Test successful uv bootstrap when uv is installed.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"pyproject.toml\").write_text('[project]\\nname = \"test\"\\nversion = \"0.1.0\"')\n\n    result = invoke(\"project bootstrap uv\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output, namer=PyTestNamer(request))\n\n\ndef test_bootstrap_uv_user_declines_install(\n    proc_mock: ProcMock, tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest, mocker: MockerFixture\n) -> None:\n    \"\"\"Test uv bootstrap when uv is not installed and user declines installation.\"\"\"\n    proc_mock.should_fail_on(\"uv --version\")\n    mocker.patch(\"algokit.core.questionary_extensions.prompt_confirm\", return_value=False)\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"pyproject.toml\").write_text('[project]\\nname = \"test\"\\nversion = \"0.1.0\"')\n\n    result = invoke(\"project bootstrap uv\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output, namer=PyTestNamer(request))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_bootstrap_uv_poetry_project_migration_declined(\n    tmp_path_factory: TempPathFactory, request: pytest.FixtureRequest, mocker: MockerFixture\n) -> None:\n    \"\"\"Test uv bootstrap with poetry project when user declines migration.\"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"pyproject.toml\").write_text('[tool.poetry]\\nname = \"test\"\\nversion = \"0.1.0\"')\n\n    mocker.patch(\"algokit.core.questionary_extensions.prompt_confirm\", return_value=False)\n\n    result = invoke(\"project bootstrap uv\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output, namer=PyTestNamer(request))\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_uv.test_bootstrap_uv_happy_path.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nDEBUG: uv: STDOUT\nDEBUG: uv: STDERR\nInstalling Python dependencies and setting up Python virtual environment via UV\nDEBUG: Running 'uv sync' in '{current_working_directory}'\nuv: STDOUT\nuv: STDERR\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_uv.test_bootstrap_uv_poetry_project_migration_declined.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nDEBUG: uv: STDOUT\nDEBUG: uv: STDERR\nError: This project is configured to use Poetry. Please use `algokit project bootstrap poetry`, set poetry as default package manager via `algokit config py-package-manager`, or modify your pyproject.toml to be compatible with UV.\n"
  },
  {
    "path": "tests/project/bootstrap/test_bootstrap_uv.test_bootstrap_uv_user_declines_install.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Running 'uv --version' in '{current_working_directory}'\nUV not found; attempting to install it...\nError: Unable to install uv; please install uv manually via https://github.com/astral-sh/uv and try `algokit project bootstrap uv` again.\n"
  },
  {
    "path": "tests/project/bootstrap/test_precedence_hierarchy.py",
    "content": "\"\"\"Unit tests for package manager precedence hierarchy logic.\"\"\"\n\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nfrom algokit.core.project.bootstrap import (\n    _determine_javascript_package_manager,\n    _determine_python_package_manager,\n)\n\n\ndef test_python_package_manager_precedence_hierarchy(tmp_path: Path) -> None:\n    \"\"\"Test that Python package manager selection follows correct precedence.\"\"\"\n    project_dir = tmp_path\n\n    # Test 1: Project override takes precedence over everything\n    with (\n        patch(\"algokit.core.project.bootstrap._get_py_package_manager_override\", return_value=\"uv\"),\n        patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"poetry\"),\n    ):\n        result = _determine_python_package_manager(project_dir)\n        assert result == \"uv\"\n\n    # Test 2: User preference takes precedence over smart defaults\n    (project_dir / \"poetry.toml\").write_text(\"\")  # Would suggest poetry\n    with (\n        patch(\"algokit.core.project.bootstrap._get_py_package_manager_override\", return_value=None),\n        patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=\"uv\"),\n    ):\n        result = _determine_python_package_manager(project_dir)\n        assert result == \"uv\"\n\n    # Test 3: Smart defaults when no user preference\n    with (\n        patch(\"algokit.core.project.bootstrap._get_py_package_manager_override\", return_value=None),\n        patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=None),\n    ):\n        result = _determine_python_package_manager(project_dir)\n        assert result == \"poetry\"  # Should detect poetry.toml\n\n\ndef test_javascript_package_manager_precedence_hierarchy(tmp_path: Path) -> None:\n    \"\"\"Test that JavaScript package manager selection follows correct precedence.\"\"\"\n    project_dir = tmp_path\n\n    # Test 1: Project override takes precedence over everything\n    with (\n        patch(\"algokit.core.project.bootstrap._get_js_package_manager_override\", return_value=\"npm\"),\n        patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=\"pnpm\"),\n    ):\n        result = _determine_javascript_package_manager(project_dir)\n        assert result == \"npm\"\n\n    # Test 2: User preference takes precedence over smart defaults\n    (project_dir / \"pnpm-lock.yaml\").write_text(\"\")  # Would suggest pnpm\n    with (\n        patch(\"algokit.core.project.bootstrap._get_js_package_manager_override\", return_value=None),\n        patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=\"npm\"),\n    ):\n        result = _determine_javascript_package_manager(project_dir)\n        assert result == \"npm\"\n\n    # Test 3: Smart defaults when no user preference\n    with (\n        patch(\"algokit.core.project.bootstrap._get_js_package_manager_override\", return_value=None),\n        patch(\"algokit.core.project.bootstrap.get_js_package_manager\", return_value=None),\n    ):\n        result = _determine_javascript_package_manager(project_dir)\n        assert result == \"pnpm\"  # Should detect pnpm-lock.yaml\n\n\ndef test_interactive_prompt_saves_preference(tmp_path: Path) -> None:\n    \"\"\"Test that interactive prompt saves the user's choice.\"\"\"\n    project_dir = tmp_path\n\n    with (\n        patch(\"algokit.core.project.bootstrap._get_py_package_manager_override\", return_value=None),\n        patch(\"algokit.core.project.bootstrap.get_py_package_manager\", return_value=None),\n        patch(\"questionary.select\") as mock_select,\n        patch(\"algokit.core.project.bootstrap.save_py_package_manager\") as mock_save,\n    ):\n        mock_select.return_value.ask.return_value = \"uv\"\n\n        result = _determine_python_package_manager(project_dir)\n\n        assert result == \"uv\"\n        mock_save.assert_called_once_with(\"uv\")\n"
  },
  {
    "path": "tests/project/deploy/__init__.py",
    "content": ""
  },
  {
    "path": "tests/project/deploy/test_deploy.py",
    "content": "import json\nimport sys\nfrom pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom algosdk.account import generate_account\nfrom algosdk.mnemonic import from_private_key\nfrom approvaltests.namer import NamerFactory\nfrom pytest_mock import MockerFixture\n\nfrom algokit.cli.common.utils import sanitize_extra_args\nfrom algokit.core.conf import ALGOKIT_CONFIG\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\nPYTHON_EXECUTABLE = sys.executable\n# need to use an escaped python executable path in config files for windows\nPYTHON_EXECUTABLE_ESCAPED = PYTHON_EXECUTABLE.replace(\"\\\\\", \"\\\\\\\\\")\n# note: spaces around the string inside print are important,\n# we need to test the usage of shlex.split vs str.split, to handle\n# splitting inside quotes properly\nTEST_PYTHON_COMMAND = \"print(' test_command_invocation ')\"\n\n\n@pytest.fixture(autouse=True)\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    mocker.patch(\"algokit.core.utils.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\ndef test_algokit_config_empty_array(tmp_path_factory: TempPathFactory) -> None:\n    empty_array_config = \"\"\"\n[project.deploy]\ncommand = []\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(empty_array_config, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_algokit_config_invalid_syntax(tmp_path_factory: TempPathFactory) -> None:\n    invalid_config = \"\"\"\n{\"dummy\": \"json\"}\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(invalid_config, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_algokit_config_name_overrides(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock\n) -> None:\n    config_with_override = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n\n[project.deploy.localnet]\ncommand = \"command_b\"\n\n[project.deploy.testnet]\ncommand = \"command_c\"\n    \"\"\".strip()\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_override, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n    (cwd / \".env.localnet\").touch()\n    (cwd / \".env.testnet\").touch()\n\n    resolved_cmd = which_mock.add(\"command_c\")\n    proc_mock.set_output([resolved_cmd], [\"picked testnet\"])\n\n    result = invoke([\"project\", \"deploy\", \"testnet\"], cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_algokit_config_name_no_base(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock\n) -> None:\n    config_with_override = \"\"\"\n[project.deploy.localnet]\ncommand = \"command_a\"\n\n[project.deploy.testnet]\ncommand = \"command_b\"\n    \"\"\".strip()\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_override, encoding=\"utf-8\")\n    (cwd / \".env.localnet\").touch()\n    (cwd / \".env.testnet\").touch()\n\n    cmd = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd], [\"picked localnet\"])\n\n    result = invoke([\"project\", \"deploy\", \"localnet\"], cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_command_invocation_and_command_splitting(tmp_path: Path) -> None:\n    config_data = \"\"\"\n[project.deploy]\ncommand = [\"not\", \"used\"]\n    \"\"\".strip()\n    (tmp_path / ALGOKIT_CONFIG).write_text(config_data, encoding=\"utf-8\")\n    result = invoke(\n        [\n            \"project\",\n            \"deploy\",\n            \"--command\",\n            f'{PYTHON_EXECUTABLE} -c \"{TEST_PYTHON_COMMAND}\"',\n        ],\n        cwd=tmp_path,\n    )\n    assert result.exit_code == 0\n    verify(result.output.replace(PYTHON_EXECUTABLE, \"<sys.executable>\"))\n\n\ndef test_command_splitting_from_config(tmp_path: Path) -> None:\n    config_data = rf\"\"\"\n[project.deploy]\ncommand = \"{PYTHON_EXECUTABLE_ESCAPED} -c \\\"{TEST_PYTHON_COMMAND}\\\"\"\n    \"\"\".strip()\n    (tmp_path / ALGOKIT_CONFIG).write_text(config_data, encoding=\"utf-8\")\n    result = invoke([\"project\", \"deploy\"], cwd=tmp_path)\n    assert result.exit_code == 0\n    verify(result.output.replace(PYTHON_EXECUTABLE, \"<sys.executable>\"))\n\n\ndef test_command_without_splitting_from_config(tmp_path: Path) -> None:\n    config_data = rf\"\"\"\n[project.deploy]\ncommand = [\"{PYTHON_EXECUTABLE_ESCAPED}\", \"-c\", \"{TEST_PYTHON_COMMAND}\"]\n    \"\"\".strip()\n    (tmp_path / ALGOKIT_CONFIG).write_text(config_data, encoding=\"utf-8\")\n    result = invoke([\"project\", \"deploy\"], cwd=tmp_path)\n    assert result.exit_code == 0\n    verify(result.output.replace(PYTHON_EXECUTABLE, \"<sys.executable>\"))\n\n\n@pytest.mark.usefixtures(\"proc_mock\")\ndef test_command_not_found_and_no_config(tmp_path: Path) -> None:\n    cmd = \"gm\"\n    result = invoke([\"project\", \"deploy\", \"--command\", cmd], cwd=tmp_path)\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_command_not_executable(proc_mock: ProcMock, tmp_path: Path, which_mock: WhichMock) -> None:\n    cmd = \"gm\"\n    cmd_resolved = which_mock.add(cmd)\n    proc_mock.should_deny_on([cmd_resolved])\n    result = invoke([\"project\", \"deploy\", \"--command\", cmd], cwd=tmp_path)\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_command_bad_exit_code(proc_mock: ProcMock, tmp_path: Path, which_mock: WhichMock) -> None:\n    cmd = \"gm\"\n    cmd_resolved = which_mock.add(cmd)\n    proc_mock.should_bad_exit_on([cmd_resolved], output=[\"it is not morning\"])\n    result = invoke([\"project\", \"deploy\", \"--command\", cmd], cwd=tmp_path)\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_algokit_env_name_missing(tmp_path_factory: TempPathFactory, which_mock: WhichMock) -> None:\n    config_with_override = \"\"\"\n[project.deploy.customnet]\ncommand = \"command_a\"\n    \"\"\".strip()\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_override, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n\n    which_mock.add(\"command_a\")\n    result = invoke([\"project\", \"deploy\", \"customnet\"], cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_algokit_env_and_name_correct_set(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, monkeypatch: pytest.MonkeyPatch, which_mock: WhichMock\n) -> None:\n    env_config = \"\"\"\nENV_A=GENERIC_ENV_A\nENV_B=GENERIC_ENV_B\nENV_C=GENERIC_ENV_C\n    \"\"\".strip()\n\n    env_name_config = \"\"\"\nENV_A=LOCALNET_ENV_A\nENV_B=LOCALNET_ENV_B\n    \"\"\".strip()\n\n    monkeypatch.setenv(\"ENV_A\", \"ENVIRON_ENV_A\")\n\n    config_with_deploy_name = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n\n[project.deploy.localnet]\ncommand = \"command_b\"\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_deploy_name, encoding=\"utf-8\")\n    (cwd / \".env\").write_text(env_config, encoding=\"utf-8\")\n    (cwd / \".env.localnet\").write_text(env_name_config, encoding=\"utf-8\")\n\n    cmd_resolved = which_mock.add(\"command_b\")\n    proc_mock.set_output([cmd_resolved], [\"picked localnet\"])\n\n    result = invoke([\"project\", \"deploy\", \"localnet\"], cwd=cwd)\n\n    assert proc_mock.called[1].env\n    passed_env_vars = proc_mock.called[1].env\n\n    assert passed_env_vars[\"ENV_A\"] == \"ENVIRON_ENV_A\"  # os.environ is highest loading priority\n    assert passed_env_vars[\"ENV_B\"] == \"LOCALNET_ENV_B\"  # then .env.{name}\n    assert passed_env_vars[\"ENV_C\"] == \"GENERIC_ENV_C\"  # lastly .env\n\n    verify(result.output)\n\n\ndef test_algokit_deploy_only_base_deploy_config(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock\n) -> None:\n    config_with_only_base_deploy = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n    \"\"\".strip()\n\n    env_config = \"\"\"\nENV_A=GENERIC_ENV_A\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_only_base_deploy, encoding=\"utf-8\")\n    (cwd / \".env\").write_text(env_config, encoding=\"utf-8\")\n\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"picked base deploy command\"])\n\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert proc_mock.called[1].env\n    passed_env_vars = proc_mock.called[1].env\n\n    assert passed_env_vars[\"ENV_A\"] == \"GENERIC_ENV_A\"\n\n    verify(result.output)\n\n\ndef test_ci_flag_interactivity_mode_via_env(\n    tmp_path_factory: TempPathFactory,\n    mocker: MockerFixture,\n    monkeypatch: pytest.MonkeyPatch,\n    proc_mock: ProcMock,\n    which_mock: WhichMock,\n) -> None:\n    monkeypatch.setenv(\"CI\", \"true\")\n\n    mock_prompt = mocker.patch(\"click.prompt\")\n\n    config_with_only_base_deploy = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\nenvironment_secrets = [\n    \"DEPLOYER_MNEMONIC\"\n]\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_only_base_deploy, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"picked base deploy command\"])\n\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n\n    mock_prompt.assert_not_called()\n    assert result.exit_code != 0\n\n    verify(result.output)\n\n\ndef test_ci_flag_interactivity_mode_via_cli(\n    tmp_path_factory: TempPathFactory,\n    mocker: MockerFixture,\n    proc_mock: ProcMock,\n    which_mock: WhichMock,\n) -> None:\n    mock_prompt = mocker.patch(\"click.prompt\")\n\n    config_with_only_base_deploy = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\nenvironment_secrets = [\n    \"DEPLOYER_MNEMONIC\"\n]\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_only_base_deploy, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"picked base deploy command\"])\n\n    result = invoke([\"project\", \"deploy\", \"--ci\"], cwd=cwd)\n\n    mock_prompt.assert_not_called()\n    assert result.exit_code != 0\n\n    verify(result.output)\n\n\n# environment_secrets set\ndef test_secrets_prompting_via_stdin(\n    tmp_path_factory: TempPathFactory,\n    mocker: MockerFixture,\n    proc_mock: ProcMock,\n    monkeypatch: pytest.MonkeyPatch,\n    which_mock: WhichMock,\n) -> None:\n    # ensure Github Actions CI env var is not overriding behavior\n    monkeypatch.delenv(\"CI\", raising=False)\n\n    # mock click.prompt\n    mock_prompt = mocker.patch(\"click.prompt\", return_value=\"secret_value\")\n    config_with_only_base_deploy = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\nenvironment_secrets = [\n    \"DEPLOYER_MNEMONIC\"\n]\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_only_base_deploy, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"picked base deploy command\"])\n\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n    mock_prompt.assert_called_once()  # ensure called\n    assert result.exit_code == 0  # ensure success\n\n    # assert that entered value is passed to proc run\n    assert proc_mock.called[1].env\n    called_env = proc_mock.called[1].env\n    assert \"DEPLOYER_MNEMONIC\" in called_env\n    assert called_env[\"DEPLOYER_MNEMONIC\"] == \"secret_value\"\n\n    verify(result.output)\n\n\ndef test_deploy_custom_project_dir(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    custom_folder = cwd / \"custom_folder\"\n\n    custom_folder.mkdir()\n    (custom_folder / ALGOKIT_CONFIG).write_text(\n        \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    (custom_folder / \".env.testnet\").touch()\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"picked base deploy command\"])\n\n    input_answers = [\"N\"]\n\n    # Below is needed for escaping the backslash in the path on Windows\n    # Works on Linux as well since \\\\ doesn't exist in the path in such cases\n    path = str(custom_folder.absolute()).replace(\"\\\\\", r\"\\\\\")\n    result = invoke(f\"project deploy testnet --path={path}\", cwd=cwd, input=\"\\n\".join(input_answers))\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_deploy_shutil_command_not_found(tmp_path_factory: TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    (cwd / ALGOKIT_CONFIG).write_text(\n        \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n    \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    (cwd / \".env\").touch()\n\n    result = invoke([\"project\", \"deploy\"], cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\n@pytest.mark.parametrize(\n    (\"alias\", \"env_var_name\"),\n    [\n        (\"deployer\", \"DEPLOYER_MNEMONIC\"),\n        (\"dispenser\", \"DISPENSER_MNEMONIC\"),\n    ],\n)\ndef test_deploy_dispenser_alias(\n    alias: str,\n    env_var_name: str,\n    tmp_path_factory: TempPathFactory,\n    proc_mock: ProcMock,\n    monkeypatch: pytest.MonkeyPatch,\n    mock_keyring: dict[str, str],\n    which_mock: WhichMock,\n) -> None:\n    env_config = f\"\"\"\n{env_var_name}=GENERIC_ENV_A\n    \"\"\".strip()\n\n    monkeypatch.setenv(env_var_name, \"GENERIC_ENV_A\")\n\n    config_with_deploy_name = f\"\"\"\n[project.deploy]\ncommand = \"command_a\"\nenvironment_secrets = [\n    \"{env_var_name}\"\n]\n    \"\"\".strip()\n\n    dummy_account_pk, dummy_account_addr = generate_account()  # type: ignore[no-untyped-call]\n    mock_keyring[alias] = json.dumps({\"alias\": alias, \"address\": dummy_account_addr, \"private_key\": dummy_account_pk})\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias])\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_deploy_name, encoding=\"utf-8\")\n    (cwd / \".env\").write_text(env_config, encoding=\"utf-8\")\n    which_mock.add(\"command_a\")\n    result = invoke([\"project\", \"deploy\", f\"--{alias}\", alias], cwd=cwd)\n\n    assert proc_mock.called[1].env\n    passed_env_vars = proc_mock.called[1].env\n\n    assert passed_env_vars[env_var_name] == from_private_key(dummy_account_pk)  # type: ignore[no-untyped-call]\n\n    verify(result.output, options=NamerFactory.with_parameters(alias))\n\n\ndef test_deploy_with_extra_args(tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock) -> None:\n    config_with_deploy = \"\"\"\n[project.deploy]\ncommand = \"command_a\"\n    \"\"\".strip()\n\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / ALGOKIT_CONFIG).write_text(config_with_deploy, encoding=\"utf-8\")\n    (cwd / \".env\").touch()\n\n    cmd_resolved = which_mock.add(\"command_a\")\n    proc_mock.set_output([cmd_resolved], [\"command executed\"])\n\n    extra_args = [\"--arg1 value1 --arg2 value2\"]\n    result = invoke([\"project\", \"deploy\", \"--\", *extra_args], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert proc_mock.called[1].command == [cmd_resolved, *sanitize_extra_args(extra_args)]\n    verify(result.output)\n\n\ndef test_deploy_with_extra_args_and_custom_command(\n    tmp_path_factory: TempPathFactory, proc_mock: ProcMock, which_mock: WhichMock\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \".env\").touch()\n\n    custom_command = \"custom_command\"\n    cmd_resolved = which_mock.add(custom_command)\n    proc_mock.set_output([cmd_resolved], [\"custom command executed\"])\n\n    extra_args = [\"--custom-arg1 custom-value1 --custom-arg2 custom-value2\"]\n    result = invoke([\"project\", \"deploy\", \"localnet\", \"--command\", custom_command, \"--\", *extra_args], cwd=cwd)\n\n    assert result.exit_code == 0\n    assert proc_mock.called[1].command == [cmd_resolved, *sanitize_extra_args(extra_args)]\n    verify(result.output)\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_config_empty_array.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: No generic deploy command specified in '.algokit.toml' file.\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_config_invalid_syntax.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Error parsing .algokit.toml file: Invalid statement (at line 1, column 1)\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Error parsing .algokit.toml file: Invalid statement (at line 1, column 1)\nError: No generic deploy command specified in '.algokit.toml' file.\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_config_name_no_base.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDEBUG: Using default environment config for algod and indexer for network localnet\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}'\n/bin/command_a: picked localnet\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_config_name_overrides.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_c\nLoading deployment environment variables...\nDEBUG: Using default environment config for algod and indexer for network testnet\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_c' in '{current_working_directory}'\n/bin/command_c: picked testnet\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_deploy_only_base_deploy_config.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}'\n/bin/command_a: picked base deploy command\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_env_and_name_correct_set.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_b\nLoading deployment environment variables...\nDEBUG: Using default environment config for algod and indexer for network localnet\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_b' in '{current_working_directory}'\n/bin/command_b: picked localnet\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_algokit_env_name_missing.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nError: No such file: {current_working_directory}/.env.customnet\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_ci_flag_interactivity_mode_via_cli.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nError: Error: missing DEPLOYER_MNEMONIC environment variable\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_ci_flag_interactivity_mode_via_env.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nError: Error: missing DEPLOYER_MNEMONIC environment variable\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_bad_exit_code.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsing deploy command: /bin/gm\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/gm' in '{current_working_directory}'\n/bin/gm: it is not morning\nError: Deployment command exited with error code = -1\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_invocation_and_command_splitting.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: <sys.executable> -c print(' test_command_invocation ')\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '<sys.executable> -c print(' test_command_invocation ')' in '{current_working_directory}'\n<sys.executable>: test_command_invocation\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_not_executable.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsing deploy command: /bin/gm\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/gm' in '{current_working_directory}'\nError: Failed to execute deploy command '/bin/gm', permission denied\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_not_found_and_no_config.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nError: Failed to resolve command path, 'gm' wasn't found\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_splitting_from_config.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: <sys.executable> -c print(' test_command_invocation ')\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '<sys.executable> -c print(' test_command_invocation ')' in '{current_working_directory}'\n<sys.executable>: test_command_invocation\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_command_without_splitting_from_config.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: <sys.executable> -c print(' test_command_invocation ')\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '<sys.executable> -c print(' test_command_invocation ')' in '{current_working_directory}'\n<sys.executable>: test_command_invocation\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_custom_project_dir.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Deploying from project directory: {current_working_directory}/custom_folder\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/custom_folder/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDEBUG: Using default environment config for algod and indexer for network testnet\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}/custom_folder'\n/bin/command_a: picked base deploy command\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_dispenser_alias.deployer.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDEBUG: Loaded deployer alias mnemonic as DEPLOYER_MNEMONIC environment variable\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}'\n/bin/command_a: STDOUT\n/bin/command_a: STDERR\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_dispenser_alias.dispenser.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDEBUG: Loaded dispenser alias mnemonic as DISPENSER_MNEMONIC environment variable\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}'\n/bin/command_a: STDOUT\n/bin/command_a: STDERR\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_shutil_command_not_found.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: Failed to resolve command path, 'command_a' wasn't found\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_windows_command_not_found.approved.txt",
    "content": "DEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nError: Failed to parse command 'command_a': Command not found: command_a\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_with_extra_args.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a --arg1 value1 --arg2 value2\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a --arg1 value1 --arg2 value2' in '{current_working_directory}'\n/bin/command_a: command executed\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_deploy_with_extra_args_and_custom_command.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nUsing deploy command: /bin/custom_command --custom-arg1 custom-value1 --custom-arg2 custom-value2\nLoading deployment environment variables...\nDEBUG: Using default environment config for algod and indexer for network localnet\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/custom_command --custom-arg1 custom-value1 --custom-arg2 custom-value2' in '{current_working_directory}'\n/bin/custom_command: custom command executed\n"
  },
  {
    "path": "tests/project/deploy/test_deploy.test_secrets_prompting_via_stdin.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: STDOUT\nDEBUG: poetry: STDERR\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: Deploying from project directory: {current_working_directory}\nDEBUG: Loading deploy command from project config\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nUsing deploy command: /bin/command_a\nLoading deployment environment variables...\nDeploying smart contracts from AlgoKit compliant repository 🚀\nDEBUG: Running '/bin/command_a' in '{current_working_directory}'\n/bin/command_a: picked base deploy command\n"
  },
  {
    "path": "tests/project/link/application.json",
    "content": "{\n    \"hints\": {\n        \"hello(string)string\": {\n            \"call_config\": {\n                \"no_op\": \"CALL\"\n            }\n        },\n        \"hello_world_check(string)void\": {\n            \"call_config\": {\n                \"no_op\": \"CALL\"\n            }\n        }\n    },\n    \"source\": {\n        \"approval\": \"I3ByYWdtYSB2ZXJzaW9uIDgKaW50Y2Jsb2NrIDAgMQp0eG4gTnVtQXBwQXJncwppbnRjXzAgLy8gMAo9PQpibnogbWFpbl9sNgp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweDAyYmVjZTExIC8vICJoZWxsbyhzdHJpbmcpc3RyaW5nIgo9PQpibnogbWFpbl9sNQp0eG5hIEFwcGxpY2F0aW9uQXJncyAwCnB1c2hieXRlcyAweGJmOWMxZWRmIC8vICJoZWxsb193b3JsZF9jaGVjayhzdHJpbmcpdm9pZCIKPT0KYm56IG1haW5fbDQKZXJyCm1haW5fbDQ6CnR4biBPbkNvbXBsZXRpb24KaW50Y18wIC8vIE5vT3AKPT0KdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KJiYKYXNzZXJ0CnR4bmEgQXBwbGljYXRpb25BcmdzIDEKY2FsbHN1YiBoZWxsb3dvcmxkY2hlY2tfMwppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sNToKdHhuIE9uQ29tcGxldGlvbgppbnRjXzAgLy8gTm9PcAo9PQp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQomJgphc3NlcnQKdHhuYSBBcHBsaWNhdGlvbkFyZ3MgMQpjYWxsc3ViIGhlbGxvXzIKc3RvcmUgMApwdXNoYnl0ZXMgMHgxNTFmN2M3NSAvLyAweDE1MWY3Yzc1CmxvYWQgMApjb25jYXQKbG9nCmludGNfMSAvLyAxCnJldHVybgptYWluX2w2Ogp0eG4gT25Db21wbGV0aW9uCmludGNfMCAvLyBOb09wCj09CmJueiBtYWluX2wxMgp0eG4gT25Db21wbGV0aW9uCnB1c2hpbnQgNCAvLyBVcGRhdGVBcHBsaWNhdGlvbgo9PQpibnogbWFpbl9sMTEKdHhuIE9uQ29tcGxldGlvbgpwdXNoaW50IDUgLy8gRGVsZXRlQXBwbGljYXRpb24KPT0KYm56IG1haW5fbDEwCmVycgptYWluX2wxMDoKdHhuIEFwcGxpY2F0aW9uSUQKaW50Y18wIC8vIDAKIT0KYXNzZXJ0CmNhbGxzdWIgZGVsZXRlXzEKaW50Y18xIC8vIDEKcmV0dXJuCm1haW5fbDExOgp0eG4gQXBwbGljYXRpb25JRAppbnRjXzAgLy8gMAohPQphc3NlcnQKY2FsbHN1YiB1cGRhdGVfMAppbnRjXzEgLy8gMQpyZXR1cm4KbWFpbl9sMTI6CnR4biBBcHBsaWNhdGlvbklECmludGNfMCAvLyAwCj09CmFzc2VydAppbnRjXzEgLy8gMQpyZXR1cm4KCi8vIHVwZGF0ZQp1cGRhdGVfMDoKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX1VQREFUQUJMRSAvLyBUTVBMX1VQREFUQUJMRQovLyBDaGVjayBhcHAgaXMgdXBkYXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGRlbGV0ZQpkZWxldGVfMToKcHJvdG8gMCAwCnR4biBTZW5kZXIKZ2xvYmFsIENyZWF0b3JBZGRyZXNzCj09Ci8vIHVuYXV0aG9yaXplZAphc3NlcnQKcHVzaGludCBUTVBMX0RFTEVUQUJMRSAvLyBUTVBMX0RFTEVUQUJMRQovLyBDaGVjayBhcHAgaXMgZGVsZXRhYmxlCmFzc2VydApyZXRzdWIKCi8vIGhlbGxvCmhlbGxvXzI6CnByb3RvIDEgMQpwdXNoYnl0ZXMgMHggLy8gIiIKcHVzaGJ5dGVzIDB4NDg2NTZjNmM2ZjJjMjAgLy8gIkhlbGxvLCAiCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApjb25jYXQKZnJhbWVfYnVyeSAwCmZyYW1lX2RpZyAwCmxlbgppdG9iCmV4dHJhY3QgNiAwCmZyYW1lX2RpZyAwCmNvbmNhdApmcmFtZV9idXJ5IDAKcmV0c3ViCgovLyBoZWxsb193b3JsZF9jaGVjawpoZWxsb3dvcmxkY2hlY2tfMzoKcHJvdG8gMSAwCmZyYW1lX2RpZyAtMQpleHRyYWN0IDIgMApwdXNoYnl0ZXMgMHg1NzZmNzI2YzY0IC8vICJXb3JsZCIKPT0KYXNzZXJ0CnJldHN1Yg==\",\n        \"clear\": \"I3ByYWdtYSB2ZXJzaW9uIDgKcHVzaGludCAwIC8vIDAKcmV0dXJu\"\n    },\n    \"state\": {\n        \"global\": {\n            \"num_byte_slices\": 0,\n            \"num_uints\": 0\n        },\n        \"local\": {\n            \"num_byte_slices\": 0,\n            \"num_uints\": 0\n        }\n    },\n    \"schema\": {\n        \"global\": {\n            \"declared\": {},\n            \"reserved\": {}\n        },\n        \"local\": {\n            \"declared\": {},\n            \"reserved\": {}\n        }\n    },\n    \"contract\": {\n        \"name\": \"HelloWorldApp\",\n        \"methods\": [\n            {\n                \"name\": \"hello\",\n                \"args\": [\n                    {\n                        \"type\": \"string\",\n                        \"name\": \"name\"\n                    }\n                ],\n                \"returns\": {\n                    \"type\": \"string\"\n                },\n                \"desc\": \"Returns Hello, {name}\"\n            },\n            {\n                \"name\": \"hello_world_check\",\n                \"args\": [\n                    {\n                        \"type\": \"string\",\n                        \"name\": \"name\"\n                    }\n                ],\n                \"returns\": {\n                    \"type\": \"void\"\n                },\n                \"desc\": \"Asserts {name} is \\\"World\\\"\"\n            }\n        ],\n        \"networks\": {}\n    },\n    \"bare_call_config\": {\n        \"delete_application\": \"CALL\",\n        \"no_op\": \"CREATE\",\n        \"update_application\": \"CALL\"\n    }\n}"
  },
  {
    "path": "tests/project/link/test_link.py",
    "content": "import shutil\nfrom pathlib import Path\nfrom unittest.mock import MagicMock, Mock\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.typed_client_generation import AppSpecsNotFoundError\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\n\n@pytest.fixture\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    \"\"\"\n    Fixture to mock 'shutil.which' with predefined responses.\n    \"\"\"\n    which_mock = WhichMock()\n    which_mock.add(\"npx\")\n    mocker.patch(\"algokit.core.utils.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\n@pytest.fixture(autouse=True)\ndef client_generator_mock(mocker: MockerFixture) -> MagicMock:\n    \"\"\"\n    Fixture to mock 'shutil.which' with predefined responses.\n    \"\"\"\n\n    client_gen_mock = MagicMock()\n    mocker.patch(\"src.algokit.cli.generate.ClientGenerator.create_for_language\", return_value=client_gen_mock)\n    client_gen_mock.generate_all.return_value = None\n    return client_gen_mock\n\n\ndef _format_output(output: str, replacements: list[tuple[str, str]]) -> str:\n    \"\"\"\n    Modifies the output by replacing specified strings based on provided replacements.\n    Each replacement is a tuple where the first element is the target string to find,\n    and the second element is the string to replace it with. This function also ensures\n    that lines starting with \"DEBUG\" are fully removed from the output.\n    \"\"\"\n    for old, new in replacements:\n        output = output.replace(old, new)\n    output = output.replace(\"\\\\\", \"/\")\n    return \"\\n\".join([line for line in output.split(\"\\n\") if not line.startswith(\"DEBUG\")])\n\n\ndef _create_project_config(\n    project_dir: Path,\n    project_type: str,\n    project_name: str,\n    command: str,\n    description: str,\n    with_app_spec: bool = False,  # noqa: FBT001, FBT002\n) -> None:\n    \"\"\"\n    Generates .algokit.toml configuration file in project directory.\n    \"\"\"\n    project_config = f\"\"\"\n[project]\ntype = '{project_type}'\nname = '{project_name}'\nartifacts = 'dist'\n\n[project.run]\nhello = {{ commands = ['{command}'], description = '{description}' }}\n    \"\"\".strip()\n    (project_dir / \".algokit.toml\").write_text(project_config, encoding=\"utf-8\")\n\n    if project_type == \"contract\":\n        (project_dir / \"dist\").mkdir()\n        if with_app_spec:\n            app_spec_example_path = Path(__file__).parent / \"application.json\"\n            shutil.copy(app_spec_example_path, project_dir / \"dist\" / \"application.json\")\n\n\ndef _create_workspace_project(\n    *,\n    workspace_dir: Path,\n    projects: list[dict[str, str]],\n    mock_command: bool = False,\n    which_mock: WhichMock | None = None,\n    proc_mock: ProcMock | None = None,\n    custom_project_order: list[str] | None = None,\n    with_app_spec: bool = True,\n) -> None:\n    \"\"\"\n    Sets up a workspace and its subprojects.\n    \"\"\"\n    workspace_dir.mkdir()\n    custom_project_order = custom_project_order if custom_project_order else [\"contract_project\", \"frontend_project\"]\n    (workspace_dir / \".algokit.toml\").write_text(\n        f\"\"\"\n[project]\ntype = 'workspace'\nprojects_root_path = 'projects'\n\n[project.run]\nhello = {custom_project_order}\n        \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    (workspace_dir / \"projects\").mkdir()\n    for project in projects:\n        project_dir = workspace_dir / \"projects\" / project[\"dir\"]\n        project_dir.mkdir()\n        if mock_command and proc_mock and which_mock:\n            resolved_mocked_cmd = which_mock.add(project[\"command\"])\n            proc_mock.set_output([resolved_mocked_cmd], [\"picked \" + project[\"command\"]])\n\n        _create_project_config(\n            project_dir,\n            project[\"type\"],\n            project[\"name\"],\n            project[\"command\"],\n            project[\"description\"],\n            with_app_spec=with_app_spec,\n        )\n\n\ndef _cwd_with_workspace(\n    tmp_path_factory: TempPathFactory,\n    which_mock: WhichMock,\n    proc_mock: ProcMock,\n    num_projects: int = 1,\n    with_app_spec: bool = True,  # noqa: FBT002, FBT001\n) -> Path:\n    \"\"\"\n    Generates a workspace with specified number of projects.\n    \"\"\"\n\n    def _generate_projects(num: int) -> list[dict[str, str]]:\n        return [\n            {\n                \"dir\": f\"project{i + 1}\",\n                \"type\": \"frontend\" if i == 0 else \"contract\",\n                \"name\": f\"contract_project_{i + 1}\",\n                \"command\": f\"command_{chr(97 + i)}\",\n                \"description\": \"Prints hello\",\n            }\n            for i in range(num)\n        ]\n\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = _generate_projects(num_projects)\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n        mock_command=True,\n        which_mock=which_mock,\n        proc_mock=proc_mock,\n        with_app_spec=with_app_spec,\n    )\n\n    return cwd\n\n\ndef test_link_command_by_name_success(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock, client_generator_mock: MagicMock\n) -> None:\n    \"\"\"\n    Verifies 'project link' command success for a specific project name.\n    \"\"\"\n    cwd_with_workspace = _cwd_with_workspace(tmp_path_factory, which_mock, proc_mock, num_projects=5)\n    result = invoke(\"project link --project-name contract_project_3\", cwd=cwd_with_workspace / \"projects\" / \"project1\")\n\n    assert result.exit_code == 0\n    client_generator_mock.generate_all.assert_called_once()\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_link_command_all_success(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock, client_generator_mock: MagicMock\n) -> None:\n    \"\"\"\n    Confirms 'project link' command links all projects successfully.\n    \"\"\"\n    contract_projects_count = 4\n    frontend_projects_count = 1\n    cwd_with_workspace = _cwd_with_workspace(\n        tmp_path_factory, which_mock, proc_mock, num_projects=contract_projects_count + frontend_projects_count\n    )\n    result = invoke(\"project link --all\", cwd=cwd_with_workspace / \"projects\" / \"project1\")\n\n    assert result.exit_code == 0\n    assert client_generator_mock.generate_all.call_count == contract_projects_count\n\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_link_command_multiple_names_success(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock, client_generator_mock: MagicMock\n) -> None:\n    \"\"\"\n    Ensures 'project link' command success for multiple specified project names.\n    \"\"\"\n    projects_count = 5\n    cwd_with_workspace = _cwd_with_workspace(tmp_path_factory, which_mock, proc_mock, num_projects=projects_count)\n    result = invoke(\n        \"project link --project-name contract_project_3 --project-name contract_project_5\",\n        cwd=cwd_with_workspace / \"projects\" / \"project1\",\n    )\n\n    assert result.exit_code == 0\n\n    expected_call_count = 2\n    assert client_generator_mock.generate_all.call_count == expected_call_count\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_link_command_multiple_names_no_specs_success(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock, client_generator_mock: MagicMock\n) -> None:\n    \"\"\"\n    Ensures 'project link' command success for multiple specified project names.\n    \"\"\"\n    cwd_with_workspace = _cwd_with_workspace(\n        tmp_path_factory, which_mock, proc_mock, num_projects=5, with_app_spec=False\n    )\n    client_generator_mock.generate_all.side_effect = Mock(side_effect=AppSpecsNotFoundError())\n\n    result = invoke(\n        \"project link --project-name contract_project_3 --project-name contract_project_5\",\n        cwd=cwd_with_workspace / \"projects\" / \"project1\",\n    )\n\n    assert result.exit_code == 0\n    assert client_generator_mock.generate_all.call_count == 2  # noqa: PLR2004\n\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_link_command_name_not_found(\n    tmp_path_factory: TempPathFactory,\n    which_mock: WhichMock,\n    proc_mock: ProcMock,\n) -> None:\n    \"\"\"\n    Ensures 'project link' command success for project that does not exist.\n    \"\"\"\n    cwd_with_workspace = _cwd_with_workspace(tmp_path_factory, which_mock, proc_mock, num_projects=5)\n    result = invoke(\n        \"project link --project-name contract_project_13\",\n        cwd=cwd_with_workspace / \"projects\" / \"project1\",\n    )\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_link_command_empty_folder(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    \"\"\"\n    Ensures 'project link' command success for empty folder.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(\"project link --all\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output, [(str(cwd), \"<cwd>\")]))\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_all_success.approved.txt",
    "content": "1/4: Finished processing contract_project_2\n2/4: Finished processing contract_project_3\n3/4: Finished processing contract_project_4\n4/4: Finished processing contract_project_5\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_by_name_success.approved.txt",
    "content": "1/1: Finished processing contract_project_3\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_empty_folder.approved.txt",
    "content": "WARNING: No .algokit.toml config found. Skipping...\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_multiple_names_no_specs_success.approved.txt",
    "content": "WARNING: No application.json | *.arc32.json | *.arc56.json files found in <cwd>/projects/project3. Skipping...\n1/2: Finished processing contract_project_3\nWARNING: No application.json | *.arc32.json | *.arc56.json files found in <cwd>/projects/project5. Skipping...\n2/2: Finished processing contract_project_5\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_multiple_names_success.approved.txt",
    "content": "1/2: Finished processing contract_project_3\n2/2: Finished processing contract_project_5\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_command_name_not_found.approved.txt",
    "content": "WARNING: No contract_project_13 found. Skipping...\n"
  },
  {
    "path": "tests/project/link/test_link.test_link_runtime_error.approved.txt",
    "content": "ERROR: Couldn't parse contract name from /private/var/folders/t6/57q65mk543l7xw6_bdgx1bmc0000gn/T/pytest-of-aorumbayev/pytest-353/cwd5/algokit_project/projects/project3/dist/application.json\nError: Couldn't parse contract name from /private/var/folders/t6/57q65mk543l7xw6_bdgx1bmc0000gn/T/pytest-of-aorumbayev/pytest-353/cwd5/algokit_project/projects/project3/dist/application.json\n"
  },
  {
    "path": "tests/project/link/test_link.test_list_command_from_workspace_success.approved.txt",
    "content": "✅ 1/1: Exported typed clients from contract_project_3 typed clients to dist\n"
  },
  {
    "path": "tests/project/list/test_list.py",
    "content": "from pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\n\n@pytest.fixture\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    mocker.patch(\"algokit.core.utils.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\ndef _format_output(output: str, replacements: list[tuple[str, str]], remove_debug: bool = True) -> str:  # noqa: FBT002, FBT001\n    \"\"\"\n    Modifies the output by replacing specified strings based on provided replacements.\n    Each replacement is a tuple where the first element is the target string to find,\n    and the second element is the string to replace it with. This function also ensures\n    that lines starting with \"DEBUG\" are fully removed from the output.\n    \"\"\"\n    for old, new in replacements:\n        output = output.replace(old, new)\n    return \"\\n\".join([line for line in output.split(\"\\n\") if not (remove_debug and line.startswith(\"DEBUG\"))]).replace(\n        \"\\\\\", r\"/\"\n    )\n\n\ndef _create_project_config(\n    project_dir: Path, project_type: str, project_name: str, command: str, description: str\n) -> None:\n    \"\"\"\n    Creates a .algokit.toml configuration file in the specified project directory.\n\n    Args:\n        project_dir (Path): The directory of the project.\n        project_type (str): The type of the project.\n        project_name (str): The name of the project.\n        command (str): The command associated with the project.\n        description (str): A description of the project.\n    \"\"\"\n    project_config = f\"\"\"\n[project]\ntype = '{project_type}'\nname = '{project_name}'\n\n[project.run]\nhello = {{ commands = ['{command}'], description = '{description}' }}\n    \"\"\".strip()\n    (project_dir / \".algokit.toml\").write_text(project_config, encoding=\"utf-8\")\n\n\ndef _create_workspace_project(\n    *,\n    workspace_dir: Path,\n    projects: list[dict[str, str]],\n    mock_command: bool = False,\n    which_mock: WhichMock | None = None,\n    proc_mock: ProcMock | None = None,\n    custom_project_order: list[str] | None = None,\n) -> None:\n    \"\"\"\n    Creates a workspace project and its subprojects within the specified directory.\n\n    Args:\n        workspace_dir (Path): The directory of the workspace.\n        projects (list[dict[str, str]]): A list of dictionaries, each representing a project with\n        keys for directory, type, name, command, and description.\n        mock_command (bool, optional): Indicates whether to mock the command. Defaults to False.\n        which_mock (WhichMock | None, optional): The mock object for the 'which' command. Defaults to None.\n        proc_mock (ProcMock | None, optional): The mock object for the process execution. Defaults to None.\n        custom_project_order (list[str] | None, optional): Specifies a custom order for project execution.\n        Defaults to None.\n    \"\"\"\n    workspace_dir.mkdir()\n    custom_project_order = custom_project_order if custom_project_order else [\"contract_project\", \"frontend_project\"]\n    (workspace_dir / \".algokit.toml\").write_text(\n        f\"\"\"\n[project]\ntype = 'workspace'\nprojects_root_path = 'projects'\n\n[project.run]\nhello = {custom_project_order}\n        \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    (workspace_dir / \"projects\").mkdir()\n    for project in projects:\n        project_dir = workspace_dir / \"projects\" / project[\"dir\"]\n        project_dir.mkdir()\n        if mock_command and proc_mock and which_mock:\n            resolved_mocked_cmd = which_mock.add(project[\"command\"])\n            proc_mock.set_output([resolved_mocked_cmd], [\"picked \" + project[\"command\"]])\n\n        _create_project_config(\n            project_dir, project[\"type\"], project[\"name\"], project[\"command\"], project[\"description\"]\n        )\n\n\ndef _cwd_with_workspace(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock, num_projects: int = 1\n) -> Path:\n    \"\"\"\n    Creates a workspace with a specified number of standalone projects, each with a single command.\n    Projects are generated in a loop based on the number specified.\n    \"\"\"\n\n    def _generate_projects(num: int) -> list[dict[str, str]]:\n        return [\n            {\n                \"dir\": f\"project{i + 1}\",\n                \"type\": \"contract\",\n                \"name\": f\"contract_project_{i + 1}\",\n                \"command\": f\"command_{chr(97 + i)}\",\n                \"description\": \"Prints hello\",\n            }\n            for i in range(num)\n        ]\n\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = _generate_projects(num_projects)\n    _create_workspace_project(\n        workspace_dir=cwd, projects=projects, mock_command=True, which_mock=which_mock, proc_mock=proc_mock\n    )\n\n    return cwd\n\n\ndef test_list_command_from_workspace_success(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    \"\"\"\n    Test to ensure the 'project list' command executes successfully within a workspace containing multiple projects.\n\n    This test simulates a workspace environment with 20 projects and verifies that the\n    command lists all projects without errors.\n\n    Args:\n        tmp_path_factory (TempPathFactory): A fixture to create temporary directories.\n        which_mock (WhichMock): A mock for the 'which' command.\n        proc_mock (ProcMock): A mock for process execution.\n    \"\"\"\n    cwd_with_workspace = _cwd_with_workspace(tmp_path_factory, which_mock, proc_mock, num_projects=20)\n    result = invoke(f\"project list {cwd_with_workspace}\".split(), cwd=cwd_with_workspace)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output, [(str(cwd_with_workspace), \"<cwd>\")]))\n\n\ndef test_list_command_from_empty_folder(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    \"\"\"\n    Test to verify that the 'project list' command executes successfully in an empty directory.\n\n    This test ensures that executing the command in a directory without any projects or workspace\n      configuration does not result in errors.\n\n    Args:\n        tmp_path_factory (TempPathFactory): A fixture to create temporary directories.\n    \"\"\"\n    empty_cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(f\"project list {empty_cwd}\".split(), cwd=empty_cwd)\n\n    assert result.exit_code == 0\n    verify(\n        _format_output(\n            result.output,\n            [(str(empty_cwd.parent), \"<cwd>\"), (str(empty_cwd.parent.parent), \"<cwd>\")],\n            remove_debug=False,\n        )\n    )\n\n\ndef test_list_command_no_args(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    \"\"\"\n    Test to ensure the 'project list' command executes successfully without specifying a directory.\n\n    This test checks that the command can be executed in an empty directory without passing any\n    arguments, and it completes without errors.\n\n    Args:\n        tmp_path_factory (TempPathFactory): A fixture to create temporary directories.\n    \"\"\"\n    empty_cwd = tmp_path_factory.mktemp(\"cwd\")\n    result = invoke(\"project list\", cwd=empty_cwd)\n\n    assert result.exit_code == 0\n    verify(\n        _format_output(\n            result.output,\n            [(str(empty_cwd.parent), \"<cwd>\"), (str(empty_cwd.parent.parent), \"<cwd>\")],\n            remove_debug=False,\n        )\n    )\n"
  },
  {
    "path": "tests/project/list/test_list.test_list_command_from_empty_folder.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from <cwd>/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from <cwd>/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: No AlgoKit workspace found. Check [project.type] definition at .algokit.toml\n"
  },
  {
    "path": "tests/project/list/test_list.test_list_command_from_workspace_success.approved.txt",
    "content": "workspace: {current_working_directory} 📁\n  - contract_project_1 ({current_working_directory}/projects/project1) 📜\n  - contract_project_2 ({current_working_directory}/projects/project2) 📜\n  - contract_project_3 ({current_working_directory}/projects/project3) 📜\n  - contract_project_4 ({current_working_directory}/projects/project4) 📜\n  - contract_project_5 ({current_working_directory}/projects/project5) 📜\n  - contract_project_6 ({current_working_directory}/projects/project6) 📜\n  - contract_project_7 ({current_working_directory}/projects/project7) 📜\n  - contract_project_8 ({current_working_directory}/projects/project8) 📜\n  - contract_project_9 ({current_working_directory}/projects/project9) 📜\n  - contract_project_10 ({current_working_directory}/projects/project10) 📜\n  - contract_project_11 ({current_working_directory}/projects/project11) 📜\n  - contract_project_12 ({current_working_directory}/projects/project12) 📜\n  - contract_project_13 ({current_working_directory}/projects/project13) 📜\n  - contract_project_14 ({current_working_directory}/projects/project14) 📜\n  - contract_project_15 ({current_working_directory}/projects/project15) 📜\n  - contract_project_16 ({current_working_directory}/projects/project16) 📜\n  - contract_project_17 ({current_working_directory}/projects/project17) 📜\n  - contract_project_18 ({current_working_directory}/projects/project18) 📜\n  - contract_project_19 ({current_working_directory}/projects/project19) 📜\n  - contract_project_20 ({current_working_directory}/projects/project20) 📜\n"
  },
  {
    "path": "tests/project/list/test_list.test_list_command_no_args.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from <cwd>/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nDEBUG: Attempting to load project config from <cwd>/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\nWARNING: No AlgoKit workspace found. Check [project.type] definition at .algokit.toml\n"
  },
  {
    "path": "tests/project/list/test_list.test_list_command_verbose_from_workspace_success.approved.txt",
    "content": "contract_project_1: ({current_working_directory}/projects/project1) 📜\ncontract_project_2: ({current_working_directory}/projects/project2) 📜\ncontract_project_3: ({current_working_directory}/projects/project3) 📜\ncontract_project_4: ({current_working_directory}/projects/project4) 📜\ncontract_project_5: ({current_working_directory}/projects/project5) 📜\ncontract_project_6: ({current_working_directory}/projects/project6) 📜\ncontract_project_7: ({current_working_directory}/projects/project7) 📜\ncontract_project_8: ({current_working_directory}/projects/project8) 📜\ncontract_project_9: ({current_working_directory}/projects/project9) 📜\ncontract_project_10: ({current_working_directory}/projects/project10) 📜\ncontract_project_11: ({current_working_directory}/projects/project11) 📜\ncontract_project_12: ({current_working_directory}/projects/project12) 📜\ncontract_project_13: ({current_working_directory}/projects/project13) 📜\ncontract_project_14: ({current_working_directory}/projects/project14) 📜\ncontract_project_15: ({current_working_directory}/projects/project15) 📜\ncontract_project_16: ({current_working_directory}/projects/project16) 📜\ncontract_project_17: ({current_working_directory}/projects/project17) 📜\ncontract_project_18: ({current_working_directory}/projects/project18) 📜\ncontract_project_19: ({current_working_directory}/projects/project19) 📜\ncontract_project_20: ({current_working_directory}/projects/project20) 📜\n"
  },
  {
    "path": "tests/project/list/test_list.test_run_command_from_workspace_success.approved.txt",
    "content": "DEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nℹ️  project1\nℹ️  project2\nℹ️  project3\nℹ️  project4\nℹ️  project5\nℹ️  project6\nℹ️  project7\nℹ️  project8\nℹ️  project9\nℹ️  project10\nℹ️  project11\nℹ️  project12\nℹ️  project13\nℹ️  project14\nℹ️  project15\nℹ️  project16\nℹ️  project17\nℹ️  project18\nℹ️  project19\nℹ️  project20\n"
  },
  {
    "path": "tests/project/run/__init__.py",
    "content": ""
  },
  {
    "path": "tests/project/run/test_run.py",
    "content": "import sys\nfrom collections.abc import Callable\nfrom pathlib import Path\n\nimport pytest\nfrom _pytest.tmpdir import TempPathFactory\nfrom pytest_mock import MockerFixture\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\nfrom tests.utils.which_mock import WhichMock\n\nDirWithAppSpecFactory = Callable[[Path], Path]\n\nPYTHON_EXECUTABLE = sys.executable\n# Escaping the python executable path for use in config files on Windows platforms\nPYTHON_EXECUTABLE_ESCAPED = PYTHON_EXECUTABLE.replace(\"\\\\\", \"\\\\\\\\\")\n\n\ndef _format_output(output: str) -> str:\n    \"\"\"\n    Strips lines from the output that start with the specified string.\n\n    Args:\n        output (str): The output string to process.\n        start_str (str): The string that identifies the start of lines to be stripped.\n\n    Returns:\n        str: The processed output with specified lines stripped.\n    \"\"\"\n    output = \"\\n\".join(\n        [line for line in output.split(\"\\n\") if not line.startswith(\"DEBUG\") and line.strip() != \"raise Exception()\"]\n    )\n    return output.replace(PYTHON_EXECUTABLE_ESCAPED, \"<sys.executable>\").replace(\"\\\\\", r\"\\\\\")\n\n\n@pytest.fixture(autouse=True)\ndef _disable_animation(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.utils.animate\", return_value=None)\n\n\n@pytest.fixture\ndef which_mock(mocker: MockerFixture) -> WhichMock:\n    which_mock = WhichMock()\n    mocker.patch(\"algokit.core.utils.shutil.which\").side_effect = which_mock.which\n    return which_mock\n\n\ndef _create_project_config(\n    project_dir: Path, project_type: str, project_name: str, command: str, description: str\n) -> None:\n    \"\"\"\n    Creates a .algokit.toml configuration file in the specified project directory.\n\n    Args:\n        project_dir (Path): The directory of the project.\n        project_type (str): The type of the project.\n        project_name (str): The name of the project.\n        command (str): The command associated with the project.\n        description (str): A description of the project.\n    \"\"\"\n    project_config = f\"\"\"\n[project]\ntype = '{project_type}'\nname = '{project_name}'\n\n[project.run]\nhello = {{ commands = ['{command}'], description = '{description}' }}\n    \"\"\".strip()\n    (project_dir / \".algokit.toml\").write_text(project_config, encoding=\"utf-8\")\n\n\ndef _create_workspace_project(\n    *,\n    workspace_dir: Path,\n    projects: list[dict[str, str]],\n    mock_command: bool = False,\n    which_mock: WhichMock | None = None,\n    proc_mock: ProcMock | None = None,\n    custom_project_order: list[str] | None = None,\n) -> None:\n    \"\"\"\n    Creates a workspace project and its subprojects within the specified directory.\n\n    Args:\n        workspace_dir (Path): The directory of the workspace.\n        projects (list[dict[str, str]]): A list of dictionaries, each representing a project with\n        keys for directory, type, name, command, and description.\n        mock_command (bool, optional): Indicates whether to mock the command. Defaults to False.\n        which_mock (WhichMock | None, optional): The mock object for the 'which' command. Defaults to None.\n        proc_mock (ProcMock | None, optional): The mock object for the process execution. Defaults to None.\n        custom_project_order (list[str] | None, optional): Specifies a custom order for project execution.\n        Defaults to None.\n    \"\"\"\n    workspace_dir.mkdir()\n    custom_project_order = custom_project_order if custom_project_order else [\"contract_project\", \"frontend_project\"]\n    (workspace_dir / \".algokit.toml\").write_text(\n        f\"\"\"\n[project]\ntype = 'workspace'\nprojects_root_path = 'projects'\n\n[project.run]\nhello = {custom_project_order}\n        \"\"\".strip(),\n        encoding=\"utf-8\",\n    )\n    (workspace_dir / \"projects\").mkdir()\n    for project in projects:\n        project_dir = workspace_dir / \"projects\" / project[\"dir\"]\n        project_dir.mkdir()\n        if mock_command and proc_mock and which_mock:\n            resolved_mocked_cmd = which_mock.add(project[\"command\"])\n            proc_mock.set_output([resolved_mocked_cmd], [\"picked \" + project[\"command\"]])\n\n        _create_project_config(\n            project_dir, project[\"type\"], project[\"name\"], project[\"command\"], project[\"description\"]\n        )\n\n\n@pytest.fixture\ndef cwd_with_workspace_sequential(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> Path:\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project1\",\n            \"type\": \"contract\",\n            \"name\": \"contract_project\",\n            \"command\": \"command_a\",\n            \"description\": \"Prints hello\",\n        },\n        {\n            \"dir\": \"project2\",\n            \"type\": \"frontend\",\n            \"name\": \"frontend_project\",\n            \"command\": \"command_b\",\n            \"description\": \"Prints hello\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd, projects=projects, mock_command=True, which_mock=which_mock, proc_mock=proc_mock\n    )\n\n    return cwd\n\n\n@pytest.fixture\ndef cwd_with_workspace(tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock) -> Path:\n    \"\"\"\n    Creates a standalone project with a single command.\n    Single project is specified due to the fact that these are run concurrently,\n    hence output stability is not guaranteed\n    \"\"\"\n\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project1\",\n            \"type\": \"contract\",\n            \"name\": \"contract_project\",\n            \"command\": \"command_a\",\n            \"description\": \"Prints hello\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd, projects=projects, mock_command=True, which_mock=which_mock, proc_mock=proc_mock\n    )\n\n    return cwd\n\n\n@pytest.fixture\ndef cwd_with_standalone(tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock) -> Path:\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    cwd.mkdir()\n\n    which_mock.add(\"command_a\")\n    proc_mock.set_output([\"command_a\"], [\"picked command_a\"])\n    _create_project_config(cwd, \"contract\", \"contract_project\", \"command_a\", \"Prints hello contracts\")\n\n    return cwd\n\n\ndef test_run_command_from_workspace_success(\n    cwd_with_workspace: Path,\n) -> None:\n    \"\"\"\n    Verifies successful command execution within a workspace project.\n\n    Args:\n        cwd_with_workspace (Path): The path to the workspace directory.\n    \"\"\"\n    result = invoke(\"project run hello\", cwd=cwd_with_workspace)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_sequential_success(cwd_with_workspace_sequential: Path) -> None:\n    \"\"\"\n    Verifies successful sequential command execution within a workspace project.\n\n    Args:\n        cwd_with_workspace_sequential (Path): The path to the workspace directory.\n    \"\"\"\n    result = invoke(\"project run hello\", cwd=cwd_with_workspace_sequential)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_standalone(cwd_with_standalone: Path) -> None:\n    \"\"\"\n    Verifies successful command execution within a standalone project.\n\n    Args:\n        cwd_with_standalone (Path): The path to the standalone project directory.\n    \"\"\"\n    result = invoke(\"project run hello\", cwd=cwd_with_standalone)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_filtered(cwd_with_workspace_sequential: Path) -> None:\n    \"\"\"\n    Verifies successful command execution within a workspace project with filtering by project name.\n\n    Args:\n        cwd_with_workspace_sequential (Path): The path to the workspace directory.\n    \"\"\"\n    result = invoke(\"project run hello --project-name 'contract_project'\", cwd=cwd_with_workspace_sequential)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_list_all_commands_in_workspace(cwd_with_workspace_sequential: Path) -> None:\n    \"\"\"\n    Lists all commands available within a workspace project.\n\n    Args:\n        cwd_with_workspace_sequential (Path): The path to the workspace directory.\n    \"\"\"\n    result = invoke(\"project run hello --list\", cwd=cwd_with_workspace_sequential)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_filtered_no_project(cwd_with_workspace_sequential: Path) -> None:\n    \"\"\"\n    Verifies command execution within a workspace project when the specified project does not exist.\n\n    Args:\n        cwd_with_workspace_sequential (Path): The path to the workspace directory.\n    \"\"\"\n    result = invoke(\"project run hello --project-name contract_project2\", cwd=cwd_with_workspace_sequential)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_resolution_error(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    \"\"\"\n    Verifies the behavior when a command resolution error occurs within a workspace project.\n\n    Args:\n        tmp_path_factory (pytest.TempPathFactory): Pytest fixture to create temporary directories.\n    \"\"\"\n\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project2\",\n            \"type\": \"frontend\",\n            \"name\": \"frontend_project\",\n            \"command\": \"failthiscommand\",\n            \"description\": \"Prints hello\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n    )\n\n    result = invoke(\"project run hello\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_execution_error(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    \"\"\"\n    Verifies the behavior when a command execution error occurs within a workspace project.\n\n    Args:\n        tmp_path_factory (pytest.TempPathFactory): Pytest fixture to create temporary directories.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project2\",\n            \"type\": \"frontend\",\n            \"name\": \"frontend_project\",\n            \"command\": PYTHON_EXECUTABLE_ESCAPED + ' -c \"raise Exception()\"',\n            \"description\": \"Prints hello\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n    )\n\n    result = invoke(\"project run hello\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_standalone_resolution_error(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    \"\"\"\n    Verifies the behavior when a command resolution error occurs within a standalone project.\n\n    Args:\n        tmp_path_factory (pytest.TempPathFactory): Pytest fixture to create temporary directories.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project2\",\n            \"type\": \"frontend\",\n            \"name\": \"frontend_project\",\n            \"command\": \"failthiscommand\",\n            \"description\": \"Prints hello\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n    )\n\n    result = invoke(\"project run hello\", cwd=cwd / \"projects\" / \"project2\")\n\n    assert result.exit_code == 1\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_standalone_execution_error(tmp_path_factory: pytest.TempPathFactory) -> None:\n    \"\"\"\n    Verifies the behavior when a command execution error occurs within a standalone project.\n\n    Args:\n        tmp_path_factory (pytest.TempPathFactory): Pytest fixture to create temporary directories.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    cwd.mkdir()\n    _create_project_config(\n        cwd,\n        \"contract\",\n        \"contract_project\",\n        PYTHON_EXECUTABLE_ESCAPED + ' -c \"raise Exception()\"',\n        \"Prints hello contracts\",\n    )\n\n    result = invoke(\"project run hello\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(_format_output(result.output))\n\n\ndef test_run_command_from_workspace_partially_sequential(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    \"\"\"\n    Verifies successful execution of commands in a partially sequential order within a workspace project.\n\n    Args:\n        tmp_path_factory (TempPathFactory): Pytest fixture to create temporary directories.\n        which_mock (WhichMock): Mock object for the 'which' command.\n        proc_mock (ProcMock): Mock object for process execution.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = []\n    for i in range(1, 6):\n        projects.append(\n            {\n                \"dir\": f\"project{i}\",\n                \"type\": \"contract\",\n                \"name\": f\"contract_project_{i}\",\n                \"command\": f\"hello{i}\",\n                \"description\": \"Prints hello\",\n            }\n        )\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n        mock_command=True,\n        which_mock=which_mock,\n        proc_mock=proc_mock,\n        custom_project_order=[\"contract_project_1\", \"contract_project_4\"],\n    )\n\n    result = invoke(\"project run hello\", cwd=cwd)\n    assert result.exit_code == 0\n    order_of_execution = [line for line in result.output.split(\"\\n\") if line.startswith(\"✅\")]\n    assert \"contract_project_1\" in order_of_execution[0]\n    assert \"contract_project_4\" in order_of_execution[1]\n\n\ndef test_run_command_from_standalone_pass_env(\n    tmp_path_factory: TempPathFactory,\n) -> None:\n    \"\"\"\n    Verifies successful command execution within a standalone project with environment variables passed.\n\n    Args:\n        tmp_path_factory (TempPathFactory): Pytest fixture to create temporary directories.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    cwd.mkdir()\n    (cwd / \"print_env.py\").write_text('import os; print(os.environ.get(\"HELLO\"))')\n\n    _create_project_config(\n        cwd,\n        \"contract\",\n        \"contract_project\",\n        PYTHON_EXECUTABLE_ESCAPED + \" print_env.py\",\n        \"Prints hello contracts\",\n    )\n    result = invoke(\"project run hello\", cwd=cwd, env={\"HELLO\": \"Hello World from env variable!\"})\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n\ndef test_run_command_help_works_without_path_resolution(\n    tmp_path_factory: TempPathFactory,\n    which_mock: WhichMock,\n    proc_mock: ProcMock,\n) -> None:\n    \"\"\"\n    Verifies that the help command works without path resolution.\n    \"\"\"\n\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = []\n    for i in range(1, 6):\n        projects.append(\n            {\n                \"dir\": f\"project{i}\",\n                \"type\": \"contract\",\n                \"name\": f\"contract_project_{i}\",\n                \"command\": f\"hello{i}\",\n                \"description\": \"Prints hello\",\n            }\n        )\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n        mock_command=False,\n        which_mock=which_mock,\n        proc_mock=proc_mock,\n        custom_project_order=[\"contract_project_1\", \"contract_project_4\"],\n    )\n\n    result = invoke(\"project run --help\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n\n    assert invoke(\"project run hello\", cwd=cwd).exit_code == 1\n\n\ndef test_run_command_from_workspace_with_sequential_flag(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = []\n    for i in range(1, 6):\n        projects.append(\n            {\n                \"dir\": f\"project{i}\",\n                \"type\": \"contract\",\n                \"name\": f\"contract_project_{i}\",\n                \"command\": f\"hello{i}\",\n                \"description\": \"Prints hello\",\n            }\n        )\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n        mock_command=True,\n        which_mock=which_mock,\n        proc_mock=proc_mock,\n    )\n\n    result = invoke(\"project run hello --sequential\", cwd=cwd)\n    assert result.exit_code == 0\n    order_of_execution = [line for line in result.output.split(\"\\n\") if line.startswith(\"✅\")]\n    for i in range(5):\n        assert f\"contract_project_{i + 1}\" in order_of_execution[i]\n\n\ndef test_run_command_from_workspace_with_order_and_sequential_flag(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = []\n    for i in range(1, 6):\n        projects.append(\n            {\n                \"dir\": f\"project{i}\",\n                \"type\": \"contract\",\n                \"name\": f\"contract_project_{i}\",\n                \"command\": f\"hello{i}\",\n                \"description\": \"Prints hello\",\n            }\n        )\n    _create_workspace_project(\n        workspace_dir=cwd,\n        projects=projects,\n        mock_command=True,\n        which_mock=which_mock,\n        proc_mock=proc_mock,\n        custom_project_order=[\"contract_project_4\"],\n    )\n\n    result = invoke(\"project run hello --sequential\", cwd=cwd)\n    assert result.exit_code == 0\n    order_of_execution = [line for line in result.output.split(\"\\n\") if line.startswith(\"✅\")]\n    assert \"contract_project_4\" in order_of_execution[0]\n\n\ndef test_run_command_from_standalone_with_extra_args(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    \"\"\"\n    Verifies successful command execution within a standalone project with extra arguments.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    cwd.mkdir()\n\n    which_mock.add(\"echo\")\n    proc_mock.set_output([\"echo\", \"Hello\", \"extra\", \"args\"], [\"Hello extra args\"])\n    _create_project_config(cwd, \"contract\", \"contract_project\", \"echo Hello\", \"Prints hello with extra args\")\n\n    result = invoke(\"project run hello -- extra args\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n    assert \"Hello extra args\" in result.output\n\n\ndef test_run_command_from_workspace_with_extra_args(\n    tmp_path_factory: TempPathFactory, which_mock: WhichMock, proc_mock: ProcMock\n) -> None:\n    \"\"\"\n    Verifies successful command execution within a workspace project with extra arguments.\n    \"\"\"\n    cwd = tmp_path_factory.mktemp(\"cwd\") / \"algokit_project\"\n    projects = [\n        {\n            \"dir\": \"project1\",\n            \"type\": \"contract\",\n            \"name\": \"contract_project\",\n            \"command\": \"echo Hello\",\n            \"description\": \"Prints hello with extra args\",\n        },\n    ]\n    _create_workspace_project(\n        workspace_dir=cwd, projects=projects, mock_command=True, which_mock=which_mock, proc_mock=proc_mock\n    )\n\n    which_mock.add(\"echo\")\n    proc_mock.set_output([\"echo\", \"Hello\", \"extra\", \"args\"], [\"Hello extra args\"])\n\n    result = invoke(\"project run hello -- extra args\", cwd=cwd)\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n    assert \"Hello extra args\" in result.output\n\n\ndef test_run_command_from_workspace_with_extra_args_and_project_filter(cwd_with_workspace_sequential: Path) -> None:\n    \"\"\"\n    Verifies successful command execution within a workspace project with extra arguments and project filtering.\n    \"\"\"\n    result = invoke(\n        \"project run hello --project-name 'contract_project' -- extra args\", cwd=cwd_with_workspace_sequential\n    )\n\n    assert result.exit_code == 0\n    verify(_format_output(result.output))\n    assert \"frontend_project\" not in result.output\n"
  },
  {
    "path": "tests/project/run/test_run.test_list_all_commands_in_workspace.approved.txt",
    "content": "ℹ️  Project: contract_project, Command name: hello, Command(s): command_a\nℹ️  Project: frontend_project, Command name: hello, Command(s): command_b\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_standalone.approved.txt",
    "content": "Running `hello` command in {current_working_directory}...\nCommand Executed: 'command_a'\nOutput: STDOUT\nSTDERR\n✅ contract_project: 'command_a' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_standalone_execution_error.approved.txt",
    "content": "Running `hello` command in {current_working_directory}...\nERROR: \n····················· project run 'hello' command output: ······················\nTraceback (most recent call last):\n  File \"<string>\", line 1, in <module>\nException\n\nError: 'hello' failed executing '<sys.executable> -c raise Exception()' with exit code = 1\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_standalone_pass_env.approved.txt",
    "content": "Running `hello` command in {current_working_directory}...\nCommand Executed: '<sys.executable> print_env.py'\nOutput: Hello World from env variable!\n\n✅ contract_project: '<sys.executable> print_env.py' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_standalone_resolution_error.approved.txt",
    "content": "Running `hello` command in {current_working_directory}...\nERROR: 'hello' failed executing: 'failthiscommand'\nError: Failed to resolve command path, 'failthiscommand' wasn't found\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_standalone_with_extra_args.approved.txt",
    "content": "Running `hello` command in {current_working_directory}...\nCommand Executed: 'echo Hello'\nOutput: STDOUT\nSTDERR\nExtra Args: 'extra args'\n✅ contract_project: 'echo Hello' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_execution_error.approved.txt",
    "content": "Running commands sequentially.\n⏳ frontend_project: 'hello' command in progress...\nERROR: \n····················· project run 'hello' command output: ······················\nTraceback (most recent call last):\n  File \"<string>\", line 1, in <module>\nException\n\nERROR: ❌ frontend_project: 'hello' failed executing '<sys.executable> -c raise Exception()' with exit code = 1\nError: failed to execute 'hello' command in 'frontend_project'\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_filtered.approved.txt",
    "content": "Running commands sequentially.\n⏳ contract_project: 'hello' command in progress...\n✅ contract_project: 'command_a' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_filtered_no_project.approved.txt",
    "content": "Running commands sequentially.\nWARNING: Missing projects: contract_project2. Proceeding with available ones.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_resolution_error.approved.txt",
    "content": "Running commands sequentially.\n⏳ frontend_project: 'hello' command in progress...\nERROR: 'hello' failed executing: 'failthiscommand'\nERROR: ❌ frontend_project: Failed to resolve command path, 'failthiscommand' wasn't found\nError: failed to execute 'hello' command in 'frontend_project'\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_sequential_success.approved.txt",
    "content": "Running commands sequentially.\n⏳ contract_project: 'hello' command in progress...\n✅ contract_project: 'command_a' executed successfully.\n⏳ frontend_project: 'hello' command in progress...\n✅ frontend_project: 'command_b' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_success.approved.txt",
    "content": "Running commands sequentially.\n⏳ contract_project: 'hello' command in progress...\n✅ contract_project: 'command_a' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_with_extra_args.approved.txt",
    "content": "Running commands sequentially.\n⏳ contract_project: 'hello' command in progress...\n✅ contract_project: 'echo Hello extra args' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_from_workspace_with_extra_args_and_project_filter.approved.txt",
    "content": "Running commands sequentially.\n⏳ contract_project: 'hello' command in progress...\n✅ contract_project: 'command_a extra args' executed successfully.\n"
  },
  {
    "path": "tests/project/run/test_run.test_run_command_help_works_without_path_resolution.approved.txt",
    "content": "Usage: algokit project run [OPTIONS] COMMAND [ARGS]...\n\n  Define custom commands and manage their execution in you projects.\n\nOptions:\n  -h, --help  Show this message and exit.\n\nCommands:\n  hello  Run all \"hello\" commands in the workspace project.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_account_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nDEBUG: `test_alias` does not exist\nDEBUG: Failed to get alias keys from keyring\nAlias 'test_alias' added successfully.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_address_successful.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nDEBUG: Failed to get alias keys from keyring\nAlias 'test_alias' added successfully.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_alias_exists.approved.txt",
    "content": "Alias 'test_alias' already exists. Overwrite? (y, n) [n]: y\nDEBUG: Failed to get alias keys from keyring\nAlias 'test_alias' added successfully.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_alias_generic_error.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nError: Failed to add alias\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_alias_limit_error.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nDEBUG: Failed to add alias to keyring\nError: Reached the max of 50 aliases.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_alias_mnemonic_differs.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nWarning: Address from the mnemonic doesn't match the provided address. It won't work unless the account has been rekeyed.\nDEBUG: `test_alias` does not exist\nDEBUG: Failed to get alias keys from keyring\nAlias 'test_alias' added successfully.\n"
  },
  {
    "path": "tests/tasks/TestAddAlias.test_wallet_add_invalid_address.approved.txt",
    "content": "Error: `invalid_address` is an invalid account address\n"
  },
  {
    "path": "tests/tasks/TestGetAlias.test_wallet_get_alias_not_found.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nError: Alias `test_alias` does not exist.\n"
  },
  {
    "path": "tests/tasks/TestGetAlias.test_wallet_get_alias_successful.approved.txt",
    "content": "Address for alias `test_alias`: LAAX2VEIRZKS33PUIJOASGEO2V57TBJTXSRY4WTFPAA3OD72FUTKFMGMSM\n"
  },
  {
    "path": "tests/tasks/TestIpfsLogin.test_ipfs_login_exists.approved.txt",
    "content": "WARNING: You are already logged in!\n"
  },
  {
    "path": "tests/tasks/TestIpfsLogin.test_ipfs_login_successful.approved.txt",
    "content": "Follow the instructions on https://docs.pinata.cloud/docs/getting-started to create an account and obtain a JWT.\nEnter pinata JWT: \nRepeat for confirmation: \nLogin successful\n"
  },
  {
    "path": "tests/tasks/TestIpfsLogout.test_ipfs_logout.approved.txt",
    "content": "Logout successful\n"
  },
  {
    "path": "tests/tasks/TestIpfsUpload.test_ipfs_upload_http_error.approved.txt",
    "content": "HTTP Request: POST https://api.pinata.cloud/pinning/pinFileToIPFS \"HTTP/1.1 500 Internal Server Error\"\nDEBUG: Pinata error: 500. {\"ok\":false,\"cid\":\"test\"}\nError: PinataInternalServerError('Pinata error: 500')\n"
  },
  {
    "path": "tests/tasks/TestIpfsUpload.test_ipfs_upload_successful.approved.txt",
    "content": "HTTP Request: POST https://api.pinata.cloud/pinning/pinFileToIPFS \"HTTP/1.1 200 OK\"\nFile uploaded successfully!\n CID: test\n"
  },
  {
    "path": "tests/tasks/TestListAliases.test_wallet_list_aliases_not_found.approved.txt",
    "content": "DEBUG: Failed to get alias keys from keyring\nYou don't have any aliases stored yet. Create one using `algokit task wallet add`.\n"
  },
  {
    "path": "tests/tasks/TestListAliases.test_wallet_list_aliases_successful.approved.txt",
    "content": "[\n  {\n    \"alias\": \"test_alias_1\",\n    \"address\": \"test_address_1\",\n    \"has_private_key\": false\n  },\n  {\n    \"alias\": \"test_alias_2\",\n    \"address\": \"test_address_2\",\n    \"has_private_key\": true\n  }\n]\n"
  },
  {
    "path": "tests/tasks/TestRemoveAlias.test_wallet_remove_alias_generic_error.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nError: Alias `test_alias` does not exist.\n"
  },
  {
    "path": "tests/tasks/TestRemoveAlias.test_wallet_remove_alias_not_found.approved.txt",
    "content": "DEBUG: `test_alias` does not exist\nError: Alias `test_alias` does not exist.\n"
  },
  {
    "path": "tests/tasks/TestRemoveAlias.test_wallet_remove_alias_successful.approved.txt",
    "content": "🚨 This is a destructive action that will remove the `test_alias` alias. Are you sure? (y, n) [n]: y\nAlias `test_alias` removed successfully.\n"
  },
  {
    "path": "tests/tasks/TestResetAliases.test_wallet_reset_aliases_generic_error.approved.txt",
    "content": "🚨 This is a destructive action that will clear all aliases. Are you sure? (y, n) [n]: y\nError: Failed to remove alias test_alias_1\n"
  },
  {
    "path": "tests/tasks/TestResetAliases.test_wallet_reset_aliases_not_found.approved.txt",
    "content": "DEBUG: Failed to get alias keys from keyring\nWarning: No aliases available to reset.\n"
  },
  {
    "path": "tests/tasks/TestResetAliases.test_wallet_reset_aliases_successful.approved.txt",
    "content": "🚨 This is a destructive action that will clear all aliases. Are you sure? (y, n) [n]: y\nAll aliases have been cleared.\n"
  },
  {
    "path": "tests/tasks/__init__.py",
    "content": ""
  },
  {
    "path": "tests/tasks/conftest.py",
    "content": "from algokit_utils import SigningAccount\nfrom algosdk import transaction\n\nDUMMY_SUGGESTED_PARAMS = transaction.SuggestedParams(  # type: ignore[no-untyped-call]\n    fee=0,\n    first=33652328,\n    last=33653328,\n    gen=\"testnet-v1.0\",\n    gh=\"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n    min_fee=1000,\n    flat_fee=True,\n    consensus_version=\"https://github.com/algorandfoundation/specs/tree/abd3d4823c6f77349fc04c3af7b1e99fe4df699f\",\n)\nDUMMY_ACCOUNT = SigningAccount(\n    private_key=\"iLsfFiRDwi0ijFdvdyO1PGkYxooOanbJSgpJ4pPKjKZluk70pvuPX4dYD1Jir85uZP+AImM/8SBmdPRpBSTFAg==\",\n    address=\"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n)\nDUMMY_TEAL_FILE_CONTENT = \"\"\"\n#pragma version 8\nintcblock 0 1\nbytecblock 0x\ntxn NumAppArgs\nintc_0 // 0\n==\nbnz main_l4\ntxna ApplicationArgs 0\npushbytes 0x02bece11 // \"hello(string)string\"\n==\nbnz main_l3\nerr\nmain_l3:\ntxn OnCompletion\nintc_0 // NoOp\n==\ntxn ApplicationID\nintc_0 // 0\n!=\n&&\nassert\ncallsub hellocaster_3\nintc_1 // 1\nreturn\nmain_l4:\ntxn OnCompletion\nintc_0 // NoOp\n==\nbnz main_l10\ntxn OnCompletion\npushint 4 // UpdateApplication\n==\nbnz main_l9\ntxn OnCompletion\npushint 5 // DeleteApplication\n==\nbnz main_l8\nerr\nmain_l8:\ntxn ApplicationID\nintc_0 // 0\n!=\nassert\ncallsub delete_1\nintc_1 // 1\nreturn\nmain_l9:\ntxn ApplicationID\nintc_0 // 0\n!=\nassert\ncallsub update_0\nintc_1 // 1\nreturn\nmain_l10:\ntxn ApplicationID\nintc_0 // 0\n==\nassert\nintc_1 // 1\nreturn\n\n// update\nupdate_0:\nproto 0 0\ntxn Sender\nglobal CreatorAddress\n==\n// unauthorized\nassert\nintc_0 // 0\nreturn\n\n// delete\ndelete_1:\nproto 0 0\ntxn Sender\nglobal CreatorAddress\n==\n// unauthorized\nassert\nintc_0 // 0\n// Check app is deletable\nassert\nretsub\n\n// hello\nhello_2:\nproto 1 1\nbytec_0 // \"\"\npushbytes 0x48656c6c6f2c20 // \"Hello, \"\nframe_dig -1\nextract 2 0\nconcat\nframe_bury 0\nframe_dig 0\nlen\nitob\nextract 6 0\nframe_dig 0\nconcat\nframe_bury 0\nretsub\n\n// hello_caster\nhellocaster_3:\nproto 0 0\nbytec_0 // \"\"\ndup\ntxna ApplicationArgs 1\nframe_bury 1\nframe_dig 1\ncallsub hello_2\nframe_bury 0\npushbytes 0x151f7c75 // 0x151f7c75\nframe_dig 0\nconcat\nlog\nretsub\n\"\"\"\n"
  },
  {
    "path": "tests/tasks/test_analyze.py",
    "content": "import re\nfrom collections.abc import Generator\nfrom pathlib import Path\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom algokit.cli.tasks.analyze import has_template_vars\nfrom tests.tasks.conftest import DUMMY_TEAL_FILE_CONTENT\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\nfrom tests.utils.proc_mock import ProcMock\n\n\ndef _format_snapshot(output: str, targets: list[str], replacement: str = \"dummy\") -> str:\n    from algokit.core.utils import get_base_python_path\n\n    python_base_path = get_base_python_path()\n    if python_base_path is None:\n        pytest.fail(\"Python base detection failed, this should work (even in CI)\")\n\n    output = output.replace(python_base_path, \"python_base_path\")\n\n    for target in targets:\n        output = output.replace(target, replacement)\n\n    # If output contains more than one new line trim them to have at most one whitespace in between\n\n    output = re.sub(r\"^(pipx:|DEBUG: pipx:).*\", \"\", output, flags=re.MULTILINE)\n    return re.sub(r\"\\n\\s*\\n\", \"\\n\\n\", output)\n\n\ndef _normalize_path(path: Path) -> str:\n    return str(path.absolute()).replace(\"\\\\\", r\"\\\\\")\n\n\n@pytest.fixture(autouse=True)\ndef _disable_animation(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.utils.animate\", return_value=None)\n\n\n@pytest.fixture(autouse=True)\ndef cwd(tmp_path_factory: pytest.TempPathFactory) -> Generator[Path, None, None]:\n    cwd = tmp_path_factory.mktemp(\"cwd\", numbered=True)\n\n    with (\n        patch(\"algokit.core.tasks.analyze.TEALER_REPORTS_ROOT\", return_value=cwd),\n        patch(\"algokit.core.tasks.analyze.TEALER_SNAPSHOTS_ROOT\", return_value=cwd),\n        patch(\"algokit.core.tasks.analyze.TEALER_DOT_FILES_ROOT\", return_value=cwd),\n    ):\n        yield cwd\n\n\n@pytest.fixture\ndef generate_report_filename_mock() -> Generator[MagicMock, None, None]:\n    with patch(\"algokit.cli.tasks.analyze.generate_report_filename\", return_value=\"dummy_report.json\") as mock:\n        yield mock\n\n\n@pytest.mark.usefixtures(\"generate_report_filename_mock\")\ndef test_analyze_single_file(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(f\"task analyze {_normalize_path(teal_file)} --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd)\n\n    assert result.exit_code == 1\n    result.output = _format_snapshot(\n        result.output,\n        [\n            str(cwd),\n        ],\n    )\n    verify(result.output)\n\n\ndef test_analyze_multiple_files(\n    cwd: Path,\n    generate_report_filename_mock: MagicMock,\n) -> None:\n    generate_report_filename_mock.side_effect = [f\"dummy_{i}.teal\" for i in range(5)]\n    teal_folder = cwd / \"dummy_contracts\"\n    teal_folder.mkdir()\n    for i in range(5):\n        teal_file = teal_folder / f\"dummy_{i}.teal\"\n        teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(\n        f\"task analyze {_normalize_path(teal_folder)} --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd\n    )\n\n    assert result.exit_code == 1\n    for i in range(5):\n        result.output = result.output.replace(str(teal_folder / f\"dummy_{i}.teal\"), f\"dummy_contracts/dummy_{i}.teal\")\n    result.output = _format_snapshot(\n        result.output,\n        [\n            str(cwd),\n        ],\n    )\n    verify(result.output)\n\n\ndef test_analyze_multiple_files_recursive(\n    cwd: Path,\n    generate_report_filename_mock: MagicMock,\n) -> None:\n    teal_root_folder = cwd / \"dummy_contracts\"\n    generate_report_filename_mock.side_effect = [teal_root_folder / f\"subfolder_{i}/dummy.teal\" for i in range(5)]\n\n    for i in range(5):\n        teal_folder = teal_root_folder / f\"subfolder_{i}\"\n        teal_folder.mkdir(parents=True)\n        teal_file = teal_folder / \"dummy.teal\"\n        teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(\n        f\"task analyze {_normalize_path(teal_root_folder)} --recursive --output {_normalize_path(cwd)}\",\n        input=\"y\\n\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 1\n    for i in range(5):\n        result.output = re.sub(r\"^File: .*\", f\"File:  {i}_dummy.teal\", result.output, flags=re.MULTILINE)\n    result.output = _format_snapshot(\n        result.output,\n        [str(cwd)],\n        \"dummy_file.teal\",\n    )\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"generate_report_filename_mock\")\ndef test_exclude_vulnerabilities(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(\n        f\"task analyze {_normalize_path(teal_file)} --exclude is-deletable \"\n        f\"--exclude rekey-to --exclude missing-fee-check --output {_normalize_path(cwd)}\",\n        input=\"y\\n\",\n        cwd=cwd,\n    )\n\n    assert result.exit_code == 0\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n\n\ndef test_analyze_skipping_tmpl_vars(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(\n        DUMMY_TEAL_FILE_CONTENT.replace(\"pushint 4 // UpdateApplication\", \"pushint TMPL_VAR // UpdateApplication\")\n    )\n    result = invoke(f\"task analyze {_normalize_path(teal_file)}\", input=\"y\\n\", cwd=cwd)\n\n    assert result.exit_code == 0\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n\n\ndef test_analyze_commented_tmpl_vars(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(\n        DUMMY_TEAL_FILE_CONTENT.replace(\"pushint 4 // UpdateApplication\", \"pushint TMPL_VAR // UpdateApplication\")\n    )\n    assert has_template_vars(teal_file)\n\n    teal_file.write_text(DUMMY_TEAL_FILE_CONTENT.replace(\"pushint 4 // UpdateApplication\", \"pushint 4 // TMPL_VAR\"))\n    assert not has_template_vars(teal_file)\n\n\ndef test_analyze_abort_disclaimer(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.touch()\n    result = invoke(f\"task analyze {_normalize_path(teal_file)} --output {_normalize_path(cwd)}\", input=\"n\\n\", cwd=cwd)\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_analyze_error_in_tealer(\n    cwd: Path,\n    mocker: MockerFixture,\n) -> None:\n    mocker.patch(\"algokit.cli.tasks.analyze.run_tealer\", side_effect=Exception(\"dummy\"))\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.touch()\n    result = invoke(f\"task analyze {_normalize_path(teal_file)} --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd)\n\n    assert result.exit_code == 1\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"generate_report_filename_mock\")\ndef test_analyze_diff_flag(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(f\"task analyze {_normalize_path(teal_file)} --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd)\n    assert result.exit_code == 1\n\n    teal_file.write_text(\"\\n#pragma version 8\\nint 1\\nreturn\\n\")\n    result = invoke(\n        f\"task analyze {_normalize_path(teal_file)} --diff --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd\n    )\n    assert result.exit_code == 1\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"generate_report_filename_mock\")\ndef test_analyze_diff_flag_missing_old_report(\n    cwd: Path,\n) -> None:\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.write_text(DUMMY_TEAL_FILE_CONTENT)\n    result = invoke(\n        f\"task analyze {_normalize_path(teal_file)} --diff --output {_normalize_path(cwd)}\", input=\"y\\n\", cwd=cwd\n    )\n    assert result.exit_code == 1\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n\n\ndef test_analyze_error_no_pipx(cwd: Path, mocker: MockerFixture, proc_mock: ProcMock) -> None:\n    proc_mock.should_fail_on(\"tealer --version\")\n    mocker.patch(\"algokit.core.utils.get_candidate_pipx_commands\", return_value=[])\n\n    teal_file = cwd / \"dummy.teal\"\n    teal_file.touch()\n    result = invoke(f\"task analyze {_normalize_path(teal_file)}\", input=\"y\\n\", cwd=cwd)\n\n    assert result.exit_code == 1\n    result.output = _format_snapshot(result.output, [str(cwd)])\n    verify(result.output)\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_abort_disclaimer.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: n\nAborted!\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_diff_flag.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_report.json detect --contracts {current_working_directory}/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_report.json\nERROR: Diff detected in {current_working_directory}/dummy.teal! Please check the content of the snapshot report {current_working_directory}/dummy_report.json against the latest received report at {current_working_directory}/dummy_report.received.json.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_diff_flag_missing_old_report.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nUnable to provide the diff since {current_working_directory}/dummy.teal report is missing. Please run the task without the --diff flag first.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_error_in_tealer.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nAn error occurred while analyzing {current_working_directory}/dummy.teal. Please make sure the files supplied are valid TEAL code before trying again.\nAborted!\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_error_no_pipx.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: No such file or directory: tealer\nTealer not found; attempting to install it...\nError: Unable to find pipx install so that `tealer` static analyzer can be installed; please install pipx via https://pypa.github.io/pipx/ and then try `algokit task analyze ...` again.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_multiple_files.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_0.teal detect --contracts {current_working_directory}/dummy_contracts/dummy_0.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/dummy_0.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_0.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_1.teal detect --contracts {current_working_directory}/dummy_contracts/dummy_1.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/dummy_1.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_1.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_2.teal detect --contracts {current_working_directory}/dummy_contracts/dummy_2.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/dummy_2.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_2.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_3.teal detect --contracts {current_working_directory}/dummy_contracts/dummy_3.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/dummy_3.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_3.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_4.teal detect --contracts {current_working_directory}/dummy_contracts/dummy_4.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/dummy_4.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_4.teal\n\nFile: dummy_0.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile: dummy_1.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile: dummy_2.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile: dummy_3.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile: dummy_4.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nTotal issues:\nHigh: 15\nFinished analyzing 5 files.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_multiple_files_recursive.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_contracts/subfolder_0/dummy.teal detect --contracts {current_working_directory}/dummy_contracts/subfolder_0/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/subfolder_0/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_contracts/subfolder_0/dummy.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_contracts/subfolder_1/dummy.teal detect --contracts {current_working_directory}/dummy_contracts/subfolder_1/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/subfolder_1/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_contracts/subfolder_1/dummy.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_contracts/subfolder_2/dummy.teal detect --contracts {current_working_directory}/dummy_contracts/subfolder_2/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/subfolder_2/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_contracts/subfolder_2/dummy.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_contracts/subfolder_3/dummy.teal detect --contracts {current_working_directory}/dummy_contracts/subfolder_3/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/subfolder_3/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_contracts/subfolder_3/dummy.teal\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_contracts/subfolder_4/dummy.teal detect --contracts {current_working_directory}/dummy_contracts/subfolder_4/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy_contracts/subfolder_4/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_contracts/subfolder_4/dummy.teal\n\nFile:  4_dummy.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile:  4_dummy.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile:  4_dummy.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile:  4_dummy.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nFile:  4_dummy.teal\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nTotal issues:\nHigh: 15\nFinished analyzing 5 files.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_single_file.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_report.json detect --contracts {current_working_directory}/dummy.teal' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_report.json\n\nFile: dummy_report.json\n\nDetector: is-deletable\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#deletable-application\nExecution Paths (#Lines):\n2-8->26-30->31-34->35-38->40-45->76-86->46-47\n\nDetector: missing-fee-check\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#missing-fee-field-validation\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nDetector: rekey-to\nImpact: High\nDetails: https://github.com/crytic/tealer/wiki/Detector-Documentation#rekeyable-logicsig\nExecution Paths (#Lines):\n2-8->9-12->14-23->107-114->89-104->115-120->24-25,\n2-8->26-30->31-34->35-38->40-45->76-86->46-47,\n2-8->26-30->56-62\n\nTotal issues:\nHigh: 3\nFinished analyzing 1 files.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_analyze_skipping_tmpl_vars.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nWarning: Skipping {current_working_directory}/dummy.teal due to template variables. Substitute them before scanning.\n"
  },
  {
    "path": "tests/tasks/test_analyze.test_exclude_vulnerabilities.approved.txt",
    "content": "DEBUG: Running 'tealer --version' in '{current_working_directory}'\nDEBUG: tealer: 0.1.2\nWarning: This task uses `tealer` to suggest improvements for your TEAL programs, but remember to always test your smart contracts code, follow modern software engineering practices and use the guidelines for smart contract development. This should not be used as a substitute for an actual audit. Do you understand? [Y/n]: y\nDEBUG: Running 'tealer --json {current_working_directory}/dummy_report.json detect --contracts {current_working_directory}/dummy.teal --exclude is-deletable, missing-fee-check, rekey-to' in '{current_working_directory}'\nDEBUG: tealer: Reading contract from file: \"{current_working_directory}/dummy.teal\"\nDEBUG: tealer: json output is written to {current_working_directory}/dummy_report.json\n"
  },
  {
    "path": "tests/tasks/test_asset.py",
    "content": "import json\n\nfrom algokit_utils import BulkAssetOptInOutResult\nfrom algosdk import account, mnemonic\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef _generate_account() -> tuple[str, str]:\n    pk, addr = account.generate_account()  # type: ignore[no-untyped-call]\n    return pk, addr\n\n\ndef _get_mnemonic_from_private_key(private_key: str) -> str:\n    return str(mnemonic.from_private_key(private_key))  # type: ignore[no-untyped-call]\n\n\ndef test_opt_in_no_args() -> None:\n    result = invoke(\"task opt-in\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_opt_in_invalid_network() -> None:\n    _, addr = _generate_account()\n    asset_id = 123\n    result = invoke(f\"task opt-in {addr} {asset_id}  --network invalid-network\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_opt_in_to_assets_from_account_address_successful(mocker: MockerFixture) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_in.return_value = [\n        BulkAssetOptInOutResult(asset_id=123, transaction_id=\"dummy_txn_id\")\n    ]\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.assets.validate_account_balance_to_opt_in\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n    asset_id = 123\n    result = invoke(\n        f\"task opt-in -a {dummy_account_address} {asset_id} --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_opt_in_of_assets_from_account_alias_successful(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_in.return_value = [\n        BulkAssetOptInOutResult(asset_id=123, transaction_id=\"dummy_txn_id\")\n    ]\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.assets.validate_account_balance_to_opt_in\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": dummy_account_address,\n            \"private_key\": dummy_account_pk,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n    result = invoke(\n        f\"task opt-in -a {alias_name} {123} --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_opt_in_to_assets_from_account_address_failed(mocker: MockerFixture) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_in.side_effect = Exception(\"dummy error\")\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.assets.validate_account_balance_to_opt_in\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n    asset_id = 123\n    result = invoke(\n        f\"task opt-in -a {dummy_account_address} {asset_id} --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_opt_out_no_args() -> None:\n    result = invoke(\"task opt-out\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_opt_out_invalid_network() -> None:\n    _, addr = _generate_account()\n    asset_id = 123\n    result = invoke(f\"task opt-out {asset_id} {addr}  --network invalid-network\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_opt_out_of_assets_from_account_address_successful(mocker: MockerFixture) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_out.return_value = [\n        BulkAssetOptInOutResult(asset_id=123, transaction_id=\"dummy_txn_id\")\n    ]\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n    asset_id = 123\n    result = invoke(\n        f\"task opt-out -a {dummy_account_address} {asset_id} --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_opt_out_of_all_assets_from_account_address_successful(mocker: MockerFixture) -> None:\n    dummy_account_info = {\"assets\": [{\"asset-id\": 1, \"amount\": 0}]}\n    mocker.patch(\"algokit.cli.tasks.assets.get_account_info\", return_value=dummy_account_info)\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_out.return_value = [\n        BulkAssetOptInOutResult(asset_id=123, transaction_id=\"dummy_txn_id\")\n    ]\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n    result = invoke(\n        f\"task opt-out -a {dummy_account_address} --network localnet --all\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_opt_out_of_assets_from_account_alias_successful(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_out.return_value = [\n        BulkAssetOptInOutResult(asset_id=123, transaction_id=\"dummy_txn_id\")\n    ]\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": dummy_account_address,\n            \"private_key\": dummy_account_pk,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n    result = invoke(\n        f\"task opt-out -a {alias_name} 123 --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_opt_out_assets_from_account_address_failed(mocker: MockerFixture) -> None:\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.asset.bulk_opt_out.side_effect = Exception(\"dummy error\")\n    algorand_mock = mocker.patch(\"algokit.cli.tasks.assets.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.assets.validate_address\")\n    dummy_account_pk, dummy_account_address = _generate_account()\n    asset_id = 123\n    result = invoke(\n        f\"task opt-out -a {dummy_account_address} {asset_id} --network localnet\",\n        input=_get_mnemonic_from_private_key(dummy_account_pk),\n    )\n\n    assert result.exit_code == 1\n    verify(result.output)\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_in_invalid_network.approved.txt",
    "content": "Usage: algokit task opt-in [OPTIONS] ASSET_IDS...\nTry 'algokit task opt-in -h' for help.\n\nError: Invalid value for '-n' / '--network': 'invalid-network' is not one of 'localnet', 'testnet', 'mainnet'.\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_in_no_args.approved.txt",
    "content": "Usage: algokit task opt-in [OPTIONS] ASSET_IDS...\nTry 'algokit task opt-in -h' for help.\n\nError: Missing argument 'ASSET_IDS...'.\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_in_of_assets_from_account_alias_successful.approved.txt",
    "content": "Performing opt-in. This may take a few seconds...\nSuccessfully performed opt-in.\nCheck opt-in status for asset 123 at: https://explore.algokit.io/localnet/asset/dummy_txn_id\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_in_to_assets_from_account_address_failed.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nPerforming opt-in. This may take a few seconds...\nDEBUG: dummy error\nError: Failed to perform opt-in\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_in_to_assets_from_account_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nPerforming opt-in. This may take a few seconds...\nSuccessfully performed opt-in.\nCheck opt-in status for asset 123 at: https://explore.algokit.io/localnet/asset/dummy_txn_id\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_assets_from_account_address_failed.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nPerforming opt-out. This may take a few seconds...\nDEBUG: dummy error\nError: Failed to perform opt-out.\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_invalid_network.approved.txt",
    "content": "Usage: algokit task opt-out [OPTIONS] [ASSET_IDS]...\nTry 'algokit task opt-out -h' for help.\n\nError: Invalid value for '-n' / '--network': 'invalid-network' is not one of 'localnet', 'testnet', 'mainnet'.\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_no_args.approved.txt",
    "content": "Usage: algokit task opt-out [OPTIONS] [ASSET_IDS]...\nTry 'algokit task opt-out -h' for help.\n\nError: Missing option '--account' / '-a'.\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_of_all_assets_from_account_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nPerforming opt-out. This may take a few seconds...\nSuccessfully performed opt-out.\nCheck opt-in status for asset 123 at: https://explore.algokit.io/localnet/transaction/dummy_txn_id\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_of_assets_from_account_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nPerforming opt-out. This may take a few seconds...\nSuccessfully performed opt-out.\nCheck opt-in status for asset 123 at: https://explore.algokit.io/localnet/transaction/dummy_txn_id\n"
  },
  {
    "path": "tests/tasks/test_asset.test_opt_out_of_assets_from_account_alias_successful.approved.txt",
    "content": "Performing opt-out. This may take a few seconds...\nSuccessfully performed opt-out.\nCheck opt-in status for asset 123 at: https://explore.algokit.io/localnet/transaction/dummy_txn_id\n"
  },
  {
    "path": "tests/tasks/test_ipfs.py",
    "content": "import pytest\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.tasks.ipfs import ALGOKIT_PINATA_TOKEN_KEY\nfrom tests.utils.approvals import TokenScrubber, verify\nfrom tests.utils.click_invoker import invoke\n\nscrubber = TokenScrubber({})\n\n\n@pytest.fixture(autouse=True)\ndef _disable_animation(mocker: MockerFixture) -> None:\n    mocker.patch(\"algokit.core.utils.animate\", return_value=None)\n\n\nclass TestIpfsLogin:\n    def test_ipfs_login_exists(self, mock_keyring: dict[str, str]) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = \"test\"\n\n        result = invoke(\"task ipfs login\", input=\"test\\ntest\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_ipfs_login_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = None\n        result = invoke(\"task ipfs login\", input=\"test\\ntest\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n        assert mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] == \"test\"\n\n\nclass TestIpfsLogout:\n    def test_ipfs_logout(self, mock_keyring: dict[str, str | None]) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = \"test\"\n        result = invoke(\"task ipfs logout\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n        assert mock_keyring.get(ALGOKIT_PINATA_TOKEN_KEY) is None\n\n\nclass TestIpfsUpload:\n    def test_ipfs_upload_successful(\n        self, tmp_path_factory: pytest.TempPathFactory, httpx_mock: HTTPXMock, mock_keyring: dict[str, str | None]\n    ) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = \"test\"\n        cwd = tmp_path_factory.mktemp(\"cwd\")\n        (cwd / \"dummy.txt\").write_text(\"dummy text to upload\")\n\n        httpx_mock.add_response(status_code=200, json={\"ok\": True, \"IpfsHash\": \"test\"})\n        result = invoke(\"task ipfs upload --file dummy.txt\", cwd=cwd)\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output, scrubber=scrubber)\n\n    def test_ipfs_not_logged_in(\n        self, tmp_path_factory: pytest.TempPathFactory, mock_keyring: dict[str, str | None]\n    ) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = None\n        cwd = tmp_path_factory.mktemp(\"cwd\")\n        (cwd / \"dummy.txt\").write_text(\"dummy text to upload\")\n\n        result = invoke(\"task ipfs upload --file dummy.txt\", cwd=cwd)\n\n        # Assert\n        assert result.exit_code == 1\n        assert \"You are not logged in\" in result.output\n\n    def test_ipfs_upload_http_error(\n        self,\n        tmp_path_factory: pytest.TempPathFactory,\n        httpx_mock: HTTPXMock,\n        mock_keyring: dict[str, str | None],\n    ) -> None:\n        mock_keyring[ALGOKIT_PINATA_TOKEN_KEY] = \"test\"\n        cwd = tmp_path_factory.mktemp(\"cwd\")\n        (cwd / \"dummy.txt\").write_text(\"dummy text to upload\")\n\n        httpx_mock.add_response(status_code=500, json={\"ok\": False, \"cid\": \"test\"})\n        result = invoke(\"task ipfs upload --file dummy.txt\", cwd=cwd)\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output, scrubber=scrubber)\n"
  },
  {
    "path": "tests/tasks/test_mint.py",
    "content": "import json\nimport re\nfrom pathlib import Path\n\nimport click\nimport pytest\nfrom algosdk.mnemonic import from_private_key\nfrom approvaltests.namer import NamerFactory\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.cli.tasks.mint import _get_and_validate_asset_name, _get_and_validate_decimals\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.tasks.conftest import DUMMY_ACCOUNT, DUMMY_SUGGESTED_PARAMS\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\n@pytest.mark.parametrize((\"account_type\", \"mutability\"), [(\"alias\", \"mutable\"), (\"address\", \"immutable\")])\n@pytest.mark.parametrize(\"network\", [\"localnet\", \"testnet\", \"mainnet\"])\ndef test_mint_token_successful(\n    *,\n    mocker: MockerFixture,\n    tmp_path_factory: pytest.TempPathFactory,\n    mock_keyring: dict[str, str | int],\n    account_type: str,\n    mutability: str,\n    network: str,\n) -> None:\n    # Arrange\n    is_mutable = mutability == \"mutable\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    account = \"\"\n    prompt_input = None\n    if account_type == \"address\":\n        account = DUMMY_ACCOUNT.address\n        prompt_input = from_private_key(DUMMY_ACCOUNT.private_key)  # type: ignore[no-untyped-call]\n    else:\n        account = \"my_alias\"\n        mock_keyring[account] = json.dumps(\n            {\n                \"alias\": account,\n                \"address\": DUMMY_ACCOUNT.address,\n                \"private_key\": DUMMY_ACCOUNT.private_key,\n            }\n        )\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([account])\n    (cwd / \"image.png\").touch()\n\n    mocker.patch(\n        \"algokit.core.tasks.mint.mint.upload_to_pinata\",\n        side_effect=[\n            \"bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\",\n            \"bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\",\n        ],\n    )\n    mocker.patch(\"algokit.core.tasks.mint.mint.wait_for_confirmation\", return_value={\"asset-index\": 123})\n    mocker.patch(\n        \"algokit.cli.tasks.mint.get_pinata_jwt\",\n        return_value=\"dummy_key\",\n    )\n    mocker.patch(\n        \"algokit.cli.tasks.mint.validate_balance\",\n    )\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transaction.return_value = \"dummy_tx_id\"\n    algod_mock.suggested_params.return_value = DUMMY_SUGGESTED_PARAMS\n    mocker.patch(\"algokit.cli.tasks.mint.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(\n        f\"\"\"task mint --creator {account} --name test --unit tst --total 1 --decimals 0\n        --image image.png -n {network} --{\"mutable\" if is_mutable else \"immutable\"} --nft\"\"\",\n        input=prompt_input,\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    if is_mutable:\n        # Reserve value must be set since its a mutable asset\n        assert re.search(r'\"reserve\": \".{58}\"', result.output) is not None, (\n            \"Reserve key not found or addr length is not 58\"\n        )\n    else:\n        assert re.search(r'\"reserve\": \"\"', result.output) is not None, \"Reserve key must be empty\"\n    verify(result.output, options=NamerFactory.with_parameters(account_type, is_mutable, network))\n\n\n@pytest.mark.parametrize(\"decimals\", [\"decimals_given_params\", \"no_decimals_given\"])\ndef test_mint_token_successful_on_decimals(\n    *,\n    mocker: MockerFixture,\n    tmp_path_factory: pytest.TempPathFactory,\n    mock_keyring: dict[str, str | int],\n    decimals: str,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    if decimals == \"no_decimals_given\":\n        include_decimals_argument = False\n        prompt_input = \"2\"\n    elif decimals == \"decimals_given_params\":\n        include_decimals_argument = True\n        prompt_input = None\n\n    account = \"my_alias\"\n    mock_keyring[account] = json.dumps(\n        {\n            \"alias\": account,\n            \"address\": DUMMY_ACCOUNT.address,\n            \"private_key\": DUMMY_ACCOUNT.private_key,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([account])\n\n    (cwd / \"image.png\").touch()\n\n    mocker.patch(\n        \"algokit.core.tasks.mint.mint.upload_to_pinata\",\n        side_effect=[\n            \"bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\",\n            \"bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\",\n        ],\n    )\n    mocker.patch(\"algokit.core.tasks.mint.mint.wait_for_confirmation\", return_value={\"asset-index\": 123})\n    mocker.patch(\n        \"algokit.cli.tasks.mint.get_pinata_jwt\",\n        return_value=\"dummy_key\",\n    )\n    mocker.patch(\n        \"algokit.cli.tasks.mint.validate_balance\",\n    )\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transaction.return_value = \"dummy_tx_id\"\n    algod_mock.suggested_params.return_value = DUMMY_SUGGESTED_PARAMS\n    mocker.patch(\"algokit.cli.tasks.mint.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(\n        f\"\"\"task mint --creator {account} --name test --unit tst --total 100\n        {\"--decimals 2 \" if include_decimals_argument else \"\"}\n        --image image.png -n localnet --mutable --nft\"\"\",\n        input=prompt_input,\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output, options=NamerFactory.with_parameters(decimals))\n\n\ndef test_mint_token_pure_fractional_nft_ft_validation(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    network = \"localnet\"\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    account = DUMMY_ACCOUNT.address\n    prompt_input = from_private_key(DUMMY_ACCOUNT.private_key)  # type: ignore[no-untyped-call]\n    (cwd / \"image.png\").touch()\n\n    # Act\n    nft_result = invoke(\n        f\"\"\"task mint --creator {account} --name test --unit tst --total 222 --decimals 12\n        --image image.png -n {network} --mutable --nft\"\"\",\n        input=prompt_input,\n        cwd=cwd,\n    )\n\n    # Assert\n    assert nft_result.exit_code == 1\n\n\ndef test_mint_token_pinata_error(\n    mocker: MockerFixture,\n    httpx_mock: HTTPXMock,\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    account = \"\"\n    account = DUMMY_ACCOUNT.address\n    prompt_input = from_private_key(DUMMY_ACCOUNT.private_key)  # type: ignore[no-untyped-call]\n    (cwd / \"image.png\").touch()\n    httpx_mock.add_response(status_code=403, json={\"ok\": False})\n\n    mocker.patch(\n        \"algokit.cli.tasks.mint.get_pinata_jwt\",\n        return_value=\"dummy_key\",\n    )\n    mocker.patch(\n        \"algokit.cli.tasks.mint.validate_balance\",\n    )\n    algod_mock = mocker.MagicMock()\n    mocker.patch(\"algokit.cli.tasks.mint.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(\n        f\"\"\"task mint --creator {account} --name test --unit tst --total 1 --decimals 0\n        --image image.png -n localnet --mutable --nft\"\"\",\n        input=prompt_input,\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_mint_token_no_pinata_jwt_error(\n    mocker: MockerFixture,\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    account = \"\"\n    account = DUMMY_ACCOUNT.address\n    prompt_input = from_private_key(DUMMY_ACCOUNT.private_key)  # type: ignore[no-untyped-call]\n    (cwd / \"image.png\").touch()\n\n    mocker.patch(\n        \"algokit.cli.tasks.mint.get_pinata_jwt\",\n        return_value=None,\n    )\n    # Act\n    result = invoke(\n        f\"\"\"task mint --creator {account} --name test --unit tst --total 1 --decimals 0\n        --image image.png -n localnet --mutable --nft\"\"\",\n        cwd=cwd,\n        input=prompt_input,\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_mint_token_acfg_token_metadata_mismatch_on_name(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"metadata.json\").write_text(\n        \"\"\"\n        {\n        \"name\": \"test2\",\n        \"decimals\": 2,\n        \"description\": \"Stars\",\n        \"properties\": {\n            \"author\": \"Al\",\n            \"traits\": {\n            \"position\": \"center\",\n            \"colors\": 4\n            }\n        }\n        }\n        \"\"\"\n    )\n    context = click.Context(click.Command(\"mint\"))\n    context.params[\"token_metadata_path\"] = Path(cwd / \"metadata.json\")\n    param = click.Option([\"--name\"])\n    name = \"test\"\n\n    with pytest.raises(\n        click.BadParameter, match=\"Token name in metadata JSON must match CLI argument providing token name!\"\n    ):\n        _get_and_validate_asset_name(context, param, name)\n\n\ndef test_mint_token_acfg_token_metadata_mismatch_on_decimals(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"metadata.json\").write_text(\n        \"\"\"\n        {\n        \"name\": \"test2\",\n        \"decimals\": 2,\n        \"description\": \"Stars\",\n        \"properties\": {\n            \"author\": \"Al\",\n            \"traits\": {\n            \"position\": \"center\",\n            \"colors\": 4\n            }\n        }\n        }\n        \"\"\"\n    )\n    context = click.Context(click.Command(\"mint\"))\n    context.params[\"token_metadata_path\"] = Path(cwd / \"metadata.json\")\n    param = click.Option([\"--decimals\"])\n    decimals = 0\n\n    with pytest.raises(\n        click.BadParameter, match=\"The value for decimals in the metadata JSON must match the decimals argument\"\n    ):\n        _get_and_validate_decimals(context, param, decimals)\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_acfg_token_metadata_mismatch.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nDEBUG: Token name in metadata JSON must match CLI argument providing token name!\nError: Failed to mint the asset!\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_generic_error.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_no_pinata_jwt_error.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nError: You are not logged in! Please login using `algokit task ipfs login`.\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_pinata_error.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nUploading image to pinata...\nHTTP Request: POST https://api.pinata.cloud/pinning/pinFileToIPFS \"HTTP/1.1 403 Forbidden\"\nDEBUG: Pinata error: 403. {\"ok\":false}\nError: PinataForbiddenError('Pinata error: 403')\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.address.False.localnet.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nUploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"ipfs://bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe#arc3\",\n    \"manager\": \"\",\n    \"reserve\": \"\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/localnet/asset/123\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.address.False.mainnet.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nUploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"ipfs://bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe#arc3\",\n    \"manager\": \"\",\n    \"reserve\": \"\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/mainnet/asset/123\nCheck transaction status at: https://explore.algokit.io/mainnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.address.False.testnet.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nUploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"ipfs://bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe#arc3\",\n    \"manager\": \"\",\n    \"reserve\": \"\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/testnet/asset/123\nCheck transaction status at: https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.alias.True.localnet.approved.txt",
    "content": "Uploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"template-ipfs://{ipfscid:1:raw:reserve:sha2-256}#arc3\",\n    \"manager\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"reserve\": \"WNQLRZXLXINUJI6X5EOBU5K5FJ6VOERJ4SVGZTMZGLHA25KSLVY55WGVYQ\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/localnet/asset/123\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.alias.True.mainnet.approved.txt",
    "content": "Uploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"template-ipfs://{ipfscid:1:raw:reserve:sha2-256}#arc3\",\n    \"manager\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"reserve\": \"WNQLRZXLXINUJI6X5EOBU5K5FJ6VOERJ4SVGZTMZGLHA25KSLVY55WGVYQ\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/mainnet/asset/123\nCheck transaction status at: https://explore.algokit.io/mainnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful.alias.True.testnet.approved.txt",
    "content": "Uploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"template-ipfs://{ipfscid:1:raw:reserve:sha2-256}#arc3\",\n    \"manager\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"reserve\": \"WNQLRZXLXINUJI6X5EOBU5K5FJ6VOERJ4SVGZTMZGLHA25KSLVY55WGVYQ\",\n    \"total\": 1,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 0,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/testnet/asset/123\nCheck transaction status at: https://explore.algokit.io/testnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful_on_decimals.decimals_given_params.approved.txt",
    "content": "Uploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"template-ipfs://{ipfscid:1:raw:reserve:sha2-256}#arc3\",\n    \"manager\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"reserve\": \"WNQLRZXLXINUJI6X5EOBU5K5FJ6VOERJ4SVGZTMZGLHA25KSLVY55WGVYQ\",\n    \"total\": 100,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 2,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/localnet/asset/123\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_mint.test_mint_token_successful_on_decimals.no_decimals_given.approved.txt",
    "content": "Provide the asset decimals [0]: 2\nUploading image to pinata...\nImage uploaded to pinata: ipfs://bafkreifax6dswcxk4us2am3jxhd3swxew32oreaxzol7dnnqzhieepqg2y\nUploading metadata to pinata...\nMetadata uploaded to pinata: bafkreiftmc4on252dnckhv7jdqnhkxjkpvlrekpevjwm3gjszygxkus5oe\nDEBUG: Asset config params: {\n    \"sender\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"unit_name\": \"tst\",\n    \"asset_name\": \"test\",\n    \"url\": \"template-ipfs://{ipfscid:1:raw:reserve:sha2-256}#arc3\",\n    \"manager\": \"MW5E55FG7OHV7B2YB5JGFL6ONZSP7ABCMM77CIDGOT2GSBJEYUBOF3UYKA\",\n    \"reserve\": \"WNQLRZXLXINUJI6X5EOBU5K5FJ6VOERJ4SVGZTMZGLHA25KSLVY55WGVYQ\",\n    \"total\": 100,\n    \"freeze\": \"\",\n    \"clawback\": \"\",\n    \"note\": \"\",\n    \"decimals\": 2,\n    \"default_frozen\": false,\n    \"lease\": \"\",\n    \"rekey_to\": \"\",\n    \"strict_empty_address_check\": false\n}\n\nSuccessfully minted the asset!\nBrowse your asset at: https://explore.algokit.io/localnet/asset/123\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_nfd_lookup.py",
    "content": "import algosdk\nfrom pytest_httpx import HTTPXMock\n\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef test_nfd_lookup_by_domain_success(httpx_mock: HTTPXMock) -> None:\n    # Arrange\n    httpx_mock.add_response(\n        url=\"https://api.nf.domains/nfd/dummy.algo?view=brief&poll=false\",\n        json={\n            \"name\": \"dummy.algo\",\n            \"owner\": \"A\" * 58,\n            \"depositAccount\": \"A\" * 58,\n            \"properties\": {},\n        },\n    )\n\n    # Act\n    result = invoke(\"task nfd-lookup dummy.algo\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_nfd_lookup_by_address_success(httpx_mock: HTTPXMock) -> None:\n    # Arrange\n    _, dummy_wallet = algosdk.account.generate_account()  # type: ignore[no-untyped-call]\n    httpx_mock.add_response(\n        url=f\"https://api.nf.domains/nfd/lookup?address={dummy_wallet}&view=thumbnail&allowUnverified=false\",\n        json={\n            dummy_wallet: {\n                \"appID\": 222222222,\n                \"state\": \"owned\",\n                \"timeChanged\": \"2022-02-02\",\n                \"depositAccount\": \"A\" * 58,\n                \"name\": \"dummy.algo\",\n                \"owner\": \"A\" * 58,\n                \"properties\": {},\n                \"caAlgo\": [\"A\" * 58],\n            }\n        },\n    )\n\n    # Act\n    result = invoke(f\"task nfd-lookup {dummy_wallet}\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output.replace(dummy_wallet, \"A\" * 58))\n\n\ndef test_nfd_lookup_error(httpx_mock: HTTPXMock) -> None:\n    # Arrange\n    httpx_mock.add_response(\n        url=\"https://api.nf.domains/nfd/dummy.algo?view=brief&poll=false\",\n        status_code=400,\n        json={\"message\": \"Invalid request\"},\n    )\n\n    # Act\n    result = invoke(\"task nfd-lookup dummy.algo\")\n\n    # Assert\n    assert result.exit_code == 1\n    assert \"Invalid request\" in result.output\n\n\ndef test_nfd_lookup_invalid_input() -> None:\n    # Act\n    result = invoke(\"task nfd-lookup dummy\")\n\n    # Assert\n    assert result.exit_code == 1\n    assert \"Invalid input. Must be either a valid NFD domain or an Algorand address.\" in result.output\n"
  },
  {
    "path": "tests/tasks/test_nfd_lookup.test_nfd_lookup_by_address_success.approved.txt",
    "content": "HTTP Request: GET https://api.nf.domains/nfd/lookup?address=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&view=thumbnail&allowUnverified=false \"HTTP/1.1 200 OK\"\ndummy.algo\n"
  },
  {
    "path": "tests/tasks/test_nfd_lookup.test_nfd_lookup_by_domain_success.approved.txt",
    "content": "HTTP Request: GET https://api.nf.domains/nfd/dummy.algo?view=brief&poll=false \"HTTP/1.1 200 OK\"\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.py",
    "content": "import json\n\nimport click\nimport pytest\nfrom algosdk import encoding, transaction\nfrom pytest_mock import MockerFixture\n\nfrom tests.tasks.conftest import DUMMY_ACCOUNT, DUMMY_SUGGESTED_PARAMS\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef _generate_dummy_signed_txn(*, amount: int = 1, encode: bool = False) -> transaction.SignedTransaction | str:\n    unsigned_txn = transaction.PaymentTxn(  # type: ignore[no-untyped-call]\n        DUMMY_ACCOUNT.address, DUMMY_SUGGESTED_PARAMS, DUMMY_ACCOUNT.address, amt=amount\n    )\n    txn = unsigned_txn.sign(DUMMY_ACCOUNT.private_key)  # type: ignore[no-untyped-call]\n\n    if encode:\n        return str(encoding.msgpack_encode(txn))  # type: ignore[no-untyped-call]\n\n    return txn  # type: ignore[no-any-return]\n\n\ndef _generate_dummy_signed_txn_group() -> list[transaction.SignedTransaction]:\n    txns = [\n        transaction.PaymentTxn(DUMMY_ACCOUNT.address, DUMMY_SUGGESTED_PARAMS, DUMMY_ACCOUNT.address, amt=i)  # type: ignore[no-untyped-call]\n        for i in range(3)\n    ]\n    txns[0].fee = 3000\n\n    gid = transaction.calculate_group_id(txns)  # type: ignore[no-untyped-call]\n    signed_txns = []\n    for txn in txns:\n        txn.group = gid\n        signed_txns.append(txn.sign(DUMMY_ACCOUNT.private_key))  # type: ignore[no-untyped-call]\n\n    return signed_txns\n\n\ndef test_send_atomic_txn_group_successful(tmp_path_factory: pytest.TempPathFactory, mocker: MockerFixture) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    txns = _generate_dummy_signed_txn_group()\n    transaction.write_to_file(txns, str(cwd / \"dummy.txns\"))  # type: ignore[no-untyped-call]\n\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transactions.return_value = \"dummy_tx_id\"\n    mocker.patch(\"algokit.cli.tasks.send_transaction.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(\"task send --file dummy.txns\", input=\"y\", cwd=cwd)\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_send_from_transaction_successful(mocker: MockerFixture) -> None:\n    # Arrange\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transaction.return_value = \"dummy_tx_id\"\n    mocker.patch(\"algokit.cli.tasks.send_transaction.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(f\"task send --transaction {_generate_dummy_signed_txn(encode=True)}\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_send_from_file_successful(\n    mocker: MockerFixture,\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n\n    transaction.write_to_file(  # type: ignore[no-untyped-call]\n        [_generate_dummy_signed_txn(amount=i) for i in range(20)],\n        str(cwd / \"dummy.txns\"),\n    )\n\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transaction.side_effect = [f\"dummy_tx_id_{i}\" for i in range(20)]\n    mocker.patch(\"algokit.cli.tasks.send_transaction.load_algod_client\", return_value=algod_mock)\n\n    # Act\n    result = invoke(\"task send --file dummy.txns\", cwd=cwd)\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_send_from_piped_input_successful(\n    mocker: MockerFixture,\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    tmp_path_factory.mktemp(\"cwd\")\n\n    ## Below simulates stdout from algokit sign transaction\n    txns = [{\"content\": _generate_dummy_signed_txn(amount=i, encode=True), \"transaction_id\": str(i)} for i in range(20)]\n\n    algod_mock = mocker.MagicMock()\n    algod_mock.send_transaction.side_effect = [f\"dummy_tx_id_{i}\" for i in range(20)]\n    mocker.patch(\"algokit.cli.tasks.send_transaction.load_algod_client\", return_value=algod_mock)\n    mocker.patch(\"algokit.cli.tasks.send_transaction.stdin_has_content\", return_value=True)\n\n    # Act\n    result = invoke(\"task send \", input=json.dumps(txns))\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_mutually_exclusive_options() -> None:\n    # Act\n    result = invoke(\n        \"task send --file dummy.txns --transaction dummy.txn\",\n    )\n\n    # Assert\n    assert result.exit_code == click.exceptions.UsageError.exit_code\n    verify(result.output)\n\n\ndef test_file_decoding_no_txn_error(tmp_path_factory: pytest.TempPathFactory) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"dummy.txns\").touch()\n\n    # Act\n    result = invoke(\n        \"task send --file dummy.txns\",\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_decoding_error(tmp_path_factory: pytest.TempPathFactory) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"dummy.txns\").write_text(\"dummy\")\n\n    # Act\n    result = invoke(\n        \"task send --file dummy.txns\",\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_decoding_error.approved.txt",
    "content": "DEBUG: argument of type 'int' is not iterable\nError: Failed to decode transaction! If you are intending to send multiple transactions use `--file` instead.\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_file_decoding_no_txn_error.approved.txt",
    "content": "Error: No valid transactions found!\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_mutually_exclusive_options.approved.txt",
    "content": "Error: Illegal usage: 'file' is mutually exclusive with transaction.\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_send_atomic_txn_group_successful.approved.txt",
    "content": "Transaction group successfully sent with txid: dummy_tx_id\nCheck transaction group status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_send_from_file_successful.approved.txt",
    "content": "\nSending transaction 1/20\nTransaction successfully sent with txid: dummy_tx_id_0\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_0\n\nSending transaction 2/20\nTransaction successfully sent with txid: dummy_tx_id_1\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_1\n\nSending transaction 3/20\nTransaction successfully sent with txid: dummy_tx_id_2\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_2\n\nSending transaction 4/20\nTransaction successfully sent with txid: dummy_tx_id_3\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_3\n\nSending transaction 5/20\nTransaction successfully sent with txid: dummy_tx_id_4\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_4\n\nSending transaction 6/20\nTransaction successfully sent with txid: dummy_tx_id_5\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_5\n\nSending transaction 7/20\nTransaction successfully sent with txid: dummy_tx_id_6\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_6\n\nSending transaction 8/20\nTransaction successfully sent with txid: dummy_tx_id_7\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_7\n\nSending transaction 9/20\nTransaction successfully sent with txid: dummy_tx_id_8\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_8\n\nSending transaction 10/20\nTransaction successfully sent with txid: dummy_tx_id_9\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_9\n\nSending transaction 11/20\nTransaction successfully sent with txid: dummy_tx_id_10\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_10\n\nSending transaction 12/20\nTransaction successfully sent with txid: dummy_tx_id_11\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_11\n\nSending transaction 13/20\nTransaction successfully sent with txid: dummy_tx_id_12\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_12\n\nSending transaction 14/20\nTransaction successfully sent with txid: dummy_tx_id_13\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_13\n\nSending transaction 15/20\nTransaction successfully sent with txid: dummy_tx_id_14\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_14\n\nSending transaction 16/20\nTransaction successfully sent with txid: dummy_tx_id_15\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_15\n\nSending transaction 17/20\nTransaction successfully sent with txid: dummy_tx_id_16\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_16\n\nSending transaction 18/20\nTransaction successfully sent with txid: dummy_tx_id_17\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_17\n\nSending transaction 19/20\nTransaction successfully sent with txid: dummy_tx_id_18\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_18\n\nSending transaction 20/20\nTransaction successfully sent with txid: dummy_tx_id_19\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_19\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_send_from_piped_input_successful.approved.txt",
    "content": "\nSending transaction 1/20\nTransaction successfully sent with txid: dummy_tx_id_0\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_0\n\nSending transaction 2/20\nTransaction successfully sent with txid: dummy_tx_id_1\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_1\n\nSending transaction 3/20\nTransaction successfully sent with txid: dummy_tx_id_2\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_2\n\nSending transaction 4/20\nTransaction successfully sent with txid: dummy_tx_id_3\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_3\n\nSending transaction 5/20\nTransaction successfully sent with txid: dummy_tx_id_4\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_4\n\nSending transaction 6/20\nTransaction successfully sent with txid: dummy_tx_id_5\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_5\n\nSending transaction 7/20\nTransaction successfully sent with txid: dummy_tx_id_6\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_6\n\nSending transaction 8/20\nTransaction successfully sent with txid: dummy_tx_id_7\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_7\n\nSending transaction 9/20\nTransaction successfully sent with txid: dummy_tx_id_8\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_8\n\nSending transaction 10/20\nTransaction successfully sent with txid: dummy_tx_id_9\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_9\n\nSending transaction 11/20\nTransaction successfully sent with txid: dummy_tx_id_10\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_10\n\nSending transaction 12/20\nTransaction successfully sent with txid: dummy_tx_id_11\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_11\n\nSending transaction 13/20\nTransaction successfully sent with txid: dummy_tx_id_12\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_12\n\nSending transaction 14/20\nTransaction successfully sent with txid: dummy_tx_id_13\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_13\n\nSending transaction 15/20\nTransaction successfully sent with txid: dummy_tx_id_14\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_14\n\nSending transaction 16/20\nTransaction successfully sent with txid: dummy_tx_id_15\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_15\n\nSending transaction 17/20\nTransaction successfully sent with txid: dummy_tx_id_16\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_16\n\nSending transaction 18/20\nTransaction successfully sent with txid: dummy_tx_id_17\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_17\n\nSending transaction 19/20\nTransaction successfully sent with txid: dummy_tx_id_18\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_18\n\nSending transaction 20/20\nTransaction successfully sent with txid: dummy_tx_id_19\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id_19\n"
  },
  {
    "path": "tests/tasks/test_send_transaction.test_send_from_transaction_successful.approved.txt",
    "content": "\nSending transaction 1/1\nTransaction successfully sent with txid: dummy_tx_id\nCheck transaction status at: https://explore.algokit.io/localnet/transaction/dummy_tx_id\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.py",
    "content": "import json\n\nimport click\nimport pytest\nfrom algosdk import encoding, mnemonic, transaction\n\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.tasks.conftest import DUMMY_ACCOUNT, DUMMY_SUGGESTED_PARAMS\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef _generate_dummy_txn(sender: str, amount: int = 1) -> transaction.PaymentTxn:\n    return transaction.PaymentTxn(sender, DUMMY_SUGGESTED_PARAMS, sender, amt=amount)  # type: ignore[no-untyped-call]\n\n\ndef _get_mnemonic_from_private_key(private_key: str) -> str:\n    return str(mnemonic.from_private_key(private_key))  # type: ignore[no-untyped-call]\n\n\ndef test_sign_atomic_txn_group_successful(\n    tmp_path_factory: pytest.TempPathFactory, mock_keyring: dict[str, str]\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": DUMMY_ACCOUNT.address,\n            \"private_key\": DUMMY_ACCOUNT.private_key,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n    txn_a = _generate_dummy_txn(DUMMY_ACCOUNT.address)\n    txn_b = _generate_dummy_txn(DUMMY_ACCOUNT.address)\n    gid = transaction.calculate_group_id([txn_a, txn_b])  # type: ignore[no-untyped-call]\n    txn_a.group = gid\n    txn_b.group = gid\n    transaction.write_to_file([txn_a, txn_b], str(cwd / \"dummy.txns\"))  # type: ignore[no-untyped-call]\n\n    # Act\n    result = invoke(f\"task sign -a {alias_name} --file dummy.txns\", input=\"y\", cwd=cwd)\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_sign_from_stdin_with_alias_successful(mock_keyring: dict[str, str]) -> None:\n    # Arrange\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": DUMMY_ACCOUNT.address,\n            \"private_key\": DUMMY_ACCOUNT.private_key,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n    dummy_txn = _generate_dummy_txn(DUMMY_ACCOUNT.address)\n\n    # Act\n    txn = encoding.msgpack_encode({\"txn\": dummy_txn.dictify()})  # type: ignore[no-untyped-call]\n    result = invoke(f\"task sign -a {alias_name} --transaction {txn}\", input=\"y\")\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_sign_from_stdin_with_address_successful() -> None:\n    # Arrange\n    dummy_txn = _generate_dummy_txn(DUMMY_ACCOUNT.address)\n\n    # Act\n    txn = encoding.msgpack_encode({\"txn\": dummy_txn.dictify()})  # type: ignore[no-untyped-call]\n    result = invoke(\n        f\"task sign -a {DUMMY_ACCOUNT.address} --transaction {txn}\",\n        input=f\"{_get_mnemonic_from_private_key(DUMMY_ACCOUNT.private_key)}\\ny\",\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_sign_many_from_file_with_alias_successful(\n    tmp_path_factory: pytest.TempPathFactory, mock_keyring: dict[str, str]\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": DUMMY_ACCOUNT.address,\n            \"private_key\": DUMMY_ACCOUNT.private_key,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n    _generate_dummy_txn(DUMMY_ACCOUNT.address)\n    transaction.write_to_file(  # type: ignore[no-untyped-call]\n        [\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 1),\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 2),\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 3),\n        ],\n        str(cwd / \"dummy.txns\"),\n    )\n\n    # Act\n    result = invoke(f\"task sign -a {alias_name} --file dummy.txns\", input=\"y\", cwd=cwd)\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_sign_many_from_file_with_address_successful(\n    tmp_path_factory: pytest.TempPathFactory,\n) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    _generate_dummy_txn(DUMMY_ACCOUNT.address)\n    transaction.write_to_file(  # type: ignore[no-untyped-call]\n        [\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 1),\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 2),\n            _generate_dummy_txn(DUMMY_ACCOUNT.address, 3),\n        ],\n        str(cwd / \"dummy.txns\"),\n    )\n\n    # Act\n    result = invoke(\n        f\"task sign -a {DUMMY_ACCOUNT.address} --file dummy.txns\",\n        input=f\"{_get_mnemonic_from_private_key(DUMMY_ACCOUNT.private_key)}\\ny\",\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_mutually_exclusive_options() -> None:\n    # Arrange\n    _generate_dummy_txn(DUMMY_ACCOUNT.address)\n\n    # Act\n    result = invoke(\n        f\"task sign -a {DUMMY_ACCOUNT.address} --file dummy.txns --transaction dummy.txn\",\n        input=f\"{_get_mnemonic_from_private_key(DUMMY_ACCOUNT.private_key)}\\ny\",\n    )\n\n    # Assert\n    assert result.exit_code == click.exceptions.UsageError.exit_code\n    verify(result.output)\n\n\ndef test_file_decoding_errors(tmp_path_factory: pytest.TempPathFactory) -> None:\n    # Arrange\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    (cwd / \"dummy.txns\").touch()\n\n    # Act\n    result = invoke(\n        f\"task sign -a {DUMMY_ACCOUNT.address} --file dummy.txns\",\n        input=f\"{_get_mnemonic_from_private_key(DUMMY_ACCOUNT.private_key)}\\ny\",\n        cwd=cwd,\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_transaction_decoding_errors() -> None:\n    # Act\n    result = invoke(\n        f\"task sign -a {DUMMY_ACCOUNT.address} --transaction dummy\",\n        input=f\"{_get_mnemonic_from_private_key(DUMMY_ACCOUNT.private_key)}\\ny\",\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_file_decoding_errors.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nError: No valid transactions found!\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_mutually_exclusive_options.approved.txt",
    "content": "Error: Illegal usage: 'file' is mutually exclusive with transaction.\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_sign_atomic_txn_group_successful.approved.txt",
    "content": "[\n  {\n    \"transaction_id\": \"RUU4DMAYJ5TLIEZ3ZT3PWTDPUOHCGOFY26JAXMOQXE52J2W6D7UA\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"grp\": \"Vyhyt7HcQWkkBxcIR4zP+GkJLUAmN2H2D5Hx03fAVvo=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  },\n  {\n    \"transaction_id\": \"RUU4DMAYJ5TLIEZ3ZT3PWTDPUOHCGOFY26JAXMOQXE52J2W6D7UA\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"grp\": \"Vyhyt7HcQWkkBxcIR4zP+GkJLUAmN2H2D5Hx03fAVvo=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  }\n]\nWould you like to proceed with signing the above? (y, n) [n]: y\n[\n  {\n    \"transaction_id\": \"RUU4DMAYJ5TLIEZ3ZT3PWTDPUOHCGOFY26JAXMOQXE52J2W6D7UA\",\n    \"content\": \"gqNzaWfEQMSdIjL5dbUk+/lVCi0AWs84ikMH0gEErgYxuNHpY/dYkKIvY9XlhiszYNMhk2XlkvTCkrrWGOWmblX3Af43tg+jdHhuiaNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKjZ3JwxCBXKHK3sdxBaSQHFwhHjM/4aQktQCY3YfYPkfHTd8BW+qJsds4CAYJQo3JjdsQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKjc25kxCBluk70pvuPX4dYD1Jir85uZP+AImM/8SBmdPRpBSTFAqR0eXBlo3BheQ==\"\n  },\n  {\n    \"transaction_id\": \"RUU4DMAYJ5TLIEZ3ZT3PWTDPUOHCGOFY26JAXMOQXE52J2W6D7UA\",\n    \"content\": \"gqNzaWfEQMSdIjL5dbUk+/lVCi0AWs84ikMH0gEErgYxuNHpY/dYkKIvY9XlhiszYNMhk2XlkvTCkrrWGOWmblX3Af43tg+jdHhuiaNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKjZ3JwxCBXKHK3sdxBaSQHFwhHjM/4aQktQCY3YfYPkfHTd8BW+qJsds4CAYJQo3JjdsQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKjc25kxCBluk70pvuPX4dYD1Jir85uZP+AImM/8SBmdPRpBSTFAqR0eXBlo3BheQ==\"\n  }\n]\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_sign_from_stdin_with_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  }\n]\nWould you like to proceed with signing the above? (y, n) [n]: y\n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": \"gqNzaWfEQHHuPbhkAADyq8eKU/NiDCuJ+cnW9MrHT3iAAEPm+okmoio/rmgctA7QUpqqd4eF5aYlUcgz9EbU+uUXS1rFOg2jdHhuiKNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  }\n]\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_sign_from_stdin_with_alias_successful.approved.txt",
    "content": "[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  }\n]\nWould you like to proceed with signing the above? (y, n) [n]: y\n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": \"gqNzaWfEQHHuPbhkAADyq8eKU/NiDCuJ+cnW9MrHT3iAAEPm+okmoio/rmgctA7QUpqqd4eF5aYlUcgz9EbU+uUXS1rFOg2jdHhuiKNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  }\n]\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_sign_many_from_file_with_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  },\n  {\n    \"transaction_id\": \"MLS7IUJQVK7GABTCOLTG5QE7NWITX74D6YC3QZLN6Y5SZF23WZBQ\",\n    \"content\": {\n      \"amt\": 2,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  },\n  {\n    \"transaction_id\": \"NEI3KU7ALQ6PFOSTYEN3LSJX2CUDEESDH2NH2XUJGYHNR3UVB3OQ\",\n    \"content\": {\n      \"amt\": 3,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  }\n]\nWould you like to proceed with signing the above? (y, n) [n]: y\n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": \"gqNzaWfEQHHuPbhkAADyq8eKU/NiDCuJ+cnW9MrHT3iAAEPm+okmoio/rmgctA7QUpqqd4eF5aYlUcgz9EbU+uUXS1rFOg2jdHhuiKNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  },\n  {\n    \"transaction_id\": \"MLS7IUJQVK7GABTCOLTG5QE7NWITX74D6YC3QZLN6Y5SZF23WZBQ\",\n    \"content\": \"gqNzaWfEQMDbNg81bX8hvW+MHye1pJhtDxWvk/3Oec8lC8QZ5i0pn7LNnXTEniYzhECzjwxL11ENlTh6w66M2jl1f4YeZwijdHhuiKNhbXQComZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  },\n  {\n    \"transaction_id\": \"NEI3KU7ALQ6PFOSTYEN3LSJX2CUDEESDH2NH2XUJGYHNR3UVB3OQ\",\n    \"content\": \"gqNzaWfEQJPaDlnXKBkMacMv6SNJmDlRf2dBzVBZwhOelc/Q+uriONVXzP0pqmZqqCua/fo3DxH9tbKyCDcSYVmn9MsaLgqjdHhuiKNhbXQDomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  }\n]\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_sign_many_from_file_with_alias_successful.approved.txt",
    "content": "[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": {\n      \"amt\": 1,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  },\n  {\n    \"transaction_id\": \"MLS7IUJQVK7GABTCOLTG5QE7NWITX74D6YC3QZLN6Y5SZF23WZBQ\",\n    \"content\": {\n      \"amt\": 2,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  },\n  {\n    \"transaction_id\": \"NEI3KU7ALQ6PFOSTYEN3LSJX2CUDEESDH2NH2XUJGYHNR3UVB3OQ\",\n    \"content\": {\n      \"amt\": 3,\n      \"fv\": 33652328,\n      \"gen\": \"testnet-v1.0\",\n      \"gh\": \"SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI=\",\n      \"lv\": 33653328,\n      \"rcv\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"snd\": \"ZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQI=\",\n      \"type\": \"pay\"\n    }\n  }\n]\nWould you like to proceed with signing the above? (y, n) [n]: y\n[\n  {\n    \"transaction_id\": \"YGSOMX5QSASQALR5V4MH47L4GEHFJZVGSUETJAWUFM2I6PJAQL4Q\",\n    \"content\": \"gqNzaWfEQHHuPbhkAADyq8eKU/NiDCuJ+cnW9MrHT3iAAEPm+okmoio/rmgctA7QUpqqd4eF5aYlUcgz9EbU+uUXS1rFOg2jdHhuiKNhbXQBomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  },\n  {\n    \"transaction_id\": \"MLS7IUJQVK7GABTCOLTG5QE7NWITX74D6YC3QZLN6Y5SZF23WZBQ\",\n    \"content\": \"gqNzaWfEQMDbNg81bX8hvW+MHye1pJhtDxWvk/3Oec8lC8QZ5i0pn7LNnXTEniYzhECzjwxL11ENlTh6w66M2jl1f4YeZwijdHhuiKNhbXQComZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  },\n  {\n    \"transaction_id\": \"NEI3KU7ALQ6PFOSTYEN3LSJX2CUDEESDH2NH2XUJGYHNR3UVB3OQ\",\n    \"content\": \"gqNzaWfEQJPaDlnXKBkMacMv6SNJmDlRf2dBzVBZwhOelc/Q+uriONVXzP0pqmZqqCua/fo3DxH9tbKyCDcSYVmn9MsaLgqjdHhuiKNhbXQDomZ2zgIBfmijZ2VurHRlc3RuZXQtdjEuMKJnaMQgSGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAgGCUKNyY3bEIGW6TvSm+49fh1gPUmKvzm5k/4AiYz/xIGZ09GkFJMUCo3NuZMQgZbpO9Kb7j1+HWA9SYq/ObmT/gCJjP/EgZnT0aQUkxQKkdHlwZaNwYXk=\"\n  }\n]\n"
  },
  {
    "path": "tests/tasks/test_sign_transaction.test_transaction_decoding_errors.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nDEBUG: Invalid base64-encoded string: number of data characters (5) cannot be 1 more than a multiple of 4\nError: Failed to decode transaction! If you are intending to sign multiple transactions use `--file` instead.\n"
  },
  {
    "path": "tests/tasks/test_transfer.py",
    "content": "import json\n\nimport pytest\nfrom algokit_utils import SendAtomicTransactionComposerResults\nfrom algosdk import account, mnemonic\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\nclass TransactionMock:\n    def get_txid(self) -> str:\n        return \"dummy_txid\"\n\n\ndef _generate_account() -> tuple[str, str]:\n    pk, addr = account.generate_account()  # type: ignore[no-untyped-call]\n    return pk, addr\n\n\ndef _get_mnemonic_from_private_key(private_key: str) -> str:\n    return str(mnemonic.from_private_key(private_key))  # type: ignore[no-untyped-call]\n\n\ndef test_transfer_no_args() -> None:\n    result = invoke(\"task transfer\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"mock_keyring\")\ndef test_transfer_invalid_sender_account() -> None:\n    # Arrange\n    dummy_receiver = _generate_account()[1]\n\n    # Act\n    result = invoke(f\"task transfer -s invalid-address -r {dummy_receiver} -a 1\")\n\n    # Assert\n    assert result.exit_code != 0\n    verify(result.output)\n\n\n@pytest.mark.usefixtures(\"mock_keyring\")\ndef test_transfer_invalid_receiver_account() -> None:\n    # Arrange\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r invalid-address -a 1\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_transfer_no_amount() -> None:\n    # Arrange\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_address}\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_transfer_algo_from_address_successful(mocker: MockerFixture) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_payment.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_address} -a 1\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_algo_from_alias_successful(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_payment.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": dummy_sender_address,\n            \"private_key\": dummy_sender_pk,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {alias_name} -r {dummy_receiver_address} -a 1\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_asset_from_address_successful(mocker: MockerFixture) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_asset_transfer.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_address} -a 1 --id 1234\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_asset_from_address_to_alias_successful(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_asset_transfer.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    _generate_account()[1]\n\n    dummy_receiver_alias = \"dummy_receiver_alias\"\n    mock_keyring[dummy_receiver_alias] = json.dumps(\n        {\n            \"alias\": dummy_receiver_alias,\n            \"address\": dummy_sender_address,\n            \"private_key\": None,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([dummy_receiver_alias])\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_alias} -a 1 --id 1234\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_asset_from_alias_successful(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_asset_transfer.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": dummy_sender_address,\n            \"private_key\": dummy_sender_pk,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {alias_name} -r {dummy_receiver_address} -a 1 --id 1234\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_failed(mocker: MockerFixture, mock_keyring: dict[str, str]) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    algorand_mock.new_group.return_value = mocker.MagicMock(\n        add_payment=mocker.MagicMock(side_effect=Exception(\"dummy error\"))\n    )\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    alias_name = \"dummy_alias\"\n    mock_keyring[alias_name] = json.dumps(\n        {\n            \"alias\": alias_name,\n            \"address\": dummy_sender_address,\n            \"private_key\": dummy_sender_pk,\n        }\n    )\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {alias_name} -r {dummy_receiver_address} -a 1\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 1\n    verify(result.output)\n\n\ndef test_transfer_on_testnet(mocker: MockerFixture) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_payment.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_address} -a 1 -n testnet\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_transfer_on_mainnet(mocker: MockerFixture) -> None:\n    # Arrange\n    algorand_mock = mocker.MagicMock()\n    composer_mock = mocker.MagicMock()\n    composer_mock.add_payment.return_value = composer_mock\n    composer_mock.send.return_value = SendAtomicTransactionComposerResults(\n        group_id=\"dummy_group_id\",\n        confirmations=[],\n        tx_ids=[\"dummy_txid\"],\n        transactions=[],\n        returns=[],\n    )\n    algorand_mock.new_group.return_value = composer_mock\n    mocker.patch(\"algokit.cli.tasks.transfer.get_algorand_client_for_network\", return_value=algorand_mock)\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_address\")\n    mocker.patch(\"algokit.cli.tasks.transfer.validate_balance\")\n    dummy_sender_pk, dummy_sender_address = _generate_account()\n    dummy_receiver_address = _generate_account()[1]\n\n    # Act\n    result = invoke(\n        f\"task transfer -s {dummy_sender_address} -r {dummy_receiver_address} -a 1 -n mainnet\",\n        input=_get_mnemonic_from_private_key(dummy_sender_pk),\n    )\n\n    # Assert\n    assert result.exit_code == 0\n    verify(result.output)\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_algo_from_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://explore.algokit.io/localnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_algo_from_alias_successful.approved.txt",
    "content": "Successfully performed transfer. See details at https://explore.algokit.io/localnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_algo_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://testnet.algoexplorer.io/tx/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_asset_from_address_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://explore.algokit.io/localnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_asset_from_address_to_alias_successful.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://explore.algokit.io/localnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_asset_from_alias_successful.approved.txt",
    "content": "Successfully performed transfer. See details at https://explore.algokit.io/localnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_failed.approved.txt",
    "content": "DEBUG: dummy error\nError: Failed to perform transfer\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_invalid_receiver_account.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nDEBUG: `invalid-address` does not exist\nError: Alias `invalid-address` alias does not exist.\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_invalid_sender_accoount.approved.txt",
    "content": ""
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_invalid_sender_account.approved.txt",
    "content": "DEBUG: `invalid-address` does not exist\nError: Alias `invalid-address` alias does not exist.\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_no_amount.approved.txt",
    "content": "Usage: algokit task transfer [OPTIONS]\nTry 'algokit task transfer -h' for help.\n\nError: Missing option '--amount' / '-a'.\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_no_args.approved.txt",
    "content": "Usage: algokit task transfer [OPTIONS]\nTry 'algokit task transfer -h' for help.\n\nError: Missing option '--sender' / '-s'.\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_no_option.approved.txt",
    "content": ""
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_on_mainnet.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://explore.algokit.io/mainnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_transfer.test_transfer_on_testnet.approved.txt",
    "content": "Enter the mnemonic phrase (25 words separated by whitespace): \nSuccessfully performed transfer. See details at https://explore.algokit.io/testnet/transaction/dummy_txid\n"
  },
  {
    "path": "tests/tasks/test_vanity_address.py",
    "content": "import json\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\ndef test_vanity_address_no_options() -> None:\n    result = invoke(\"task vanity-address\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_vanity_address_invalid_keyword() -> None:\n    result = invoke(\"task vanity-address test\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_vanity_address_invalid_input_on_file() -> None:\n    result = invoke(\"task vanity-address TEST -o file\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_vanity_address_invalid_input_on_alias() -> None:\n    result = invoke(\"task vanity-address TEST -o alias\")\n\n    assert result.exit_code != 0\n    verify(result.output)\n\n\ndef test_vanity_address_on_default() -> None:\n    result = invoke(\"task vanity-address A\")\n\n    assert result.exit_code == 0\n    match = re.search(r\"'address': '([^']+)'\", result.output)\n    if match:\n        address = match.group(1)\n        assert address.startswith(\"A\")\n\n\ndef test_vanity_address_on_anywhere_match() -> None:\n    result = invoke(\"task vanity-address A -m anywhere\")\n\n    assert result.exit_code == 0\n    match = re.search(r\"'address': '([^']+)'\", result.output)\n    if match:\n        address = match.group(1)\n        assert \"A\" in address\n\n\ndef test_vanity_address_on_file(tmp_path_factory: pytest.TempPathFactory) -> None:\n    cwd = tmp_path_factory.mktemp(\"cwd\")\n    output_file_path = Path(cwd) / \"output.txt\"\n\n    path = str(output_file_path.absolute()).replace(\"\\\\\", r\"\\\\\")\n    result = invoke(f\"task vanity-address A -o file --file-path {path}\")\n\n    assert result.exit_code == 0\n    assert output_file_path.exists()\n    output = output_file_path.read_text()\n\n    # Ensure output address starts with A\n    output_match = re.search(r'\\\"address\\\": \"([^\"]+)\"', output)\n\n    if output_match:\n        address = output_match.group(1)\n        assert address.startswith(\"A\")\n\n\ndef test_vanity_address_on_alias(mock_keyring: dict[str, str]) -> None:\n    alias = \"test_alias\"\n\n    result = invoke(f\"task vanity-address A -o alias --alias {alias}\")\n\n    assert result.exit_code == 0\n    assert json.loads(mock_keyring[alias])[\"alias\"] == alias\n    assert json.loads(mock_keyring[alias])[\"address\"].startswith(\"A\")\n\n\ndef test_vanity_address_on_existing_alias(mock_keyring: dict[str, str]) -> None:\n    alias = \"test_alias\"\n    mock_keyring[alias] = json.dumps({\"alias\": alias, \"address\": \"B\", \"private_key\": None})\n    mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias])\n\n    result = invoke(f\"task vanity-address A -o alias --alias {alias}\", input=\"y\")\n\n    assert result.exit_code == 0\n    assert json.loads(mock_keyring[alias])[\"alias\"] == alias\n    assert json.loads(mock_keyring[alias])[\"address\"].startswith(\"A\")\n"
  },
  {
    "path": "tests/tasks/test_vanity_address.test_vanity_address_invalid_input_on_alias.approved.txt",
    "content": "Error: Please provide an alias using the '--alias' option when the output is set to 'alias'.\n"
  },
  {
    "path": "tests/tasks/test_vanity_address.test_vanity_address_invalid_input_on_file.approved.txt",
    "content": "Error: Please provide an output filename using the '--file-path' option when the output is set to 'file'.\n"
  },
  {
    "path": "tests/tasks/test_vanity_address.test_vanity_address_invalid_keyword.approved.txt",
    "content": "Error: Invalid KEYWORD. Allowed: uppercase letters A-Z and numbers 2-7.\n"
  },
  {
    "path": "tests/tasks/test_vanity_address.test_vanity_address_no_options.approved.txt",
    "content": "Usage: algokit task vanity-address [OPTIONS] KEYWORD\nTry 'algokit task vanity-address -h' for help.\n\nError: Missing argument 'KEYWORD'.\n"
  },
  {
    "path": "tests/tasks/test_wallet.py",
    "content": "import json\n\nimport algosdk\nimport pytest\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.tasks.wallet import WALLET_ALIASES_KEYRING_USERNAME\nfrom tests.utils.approvals import verify\nfrom tests.utils.click_invoker import invoke\n\n\nclass TestAddAlias:\n    def test_wallet_add_address_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address}\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert json.loads(str(mock_keyring[alias_name])) == {\n            \"alias\": alias_name,\n            \"address\": address,\n            \"private_key\": None,\n        }\n\n        verify(result.output)\n\n    def test_wallet_add_account_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        pk, address = algosdk.account.generate_account()  # type: ignore[no-untyped-call]\n        mnemonic = algosdk.mnemonic.from_private_key(pk)  # type: ignore[no-untyped-call]\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address} -m\", input=f\"{mnemonic}\\n\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert json.loads(str(mock_keyring[alias_name])) == {\"alias\": alias_name, \"address\": address, \"private_key\": pk}\n\n        verify(result.output)\n\n    def test_wallet_add_invalid_address(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = \"invalid_address\"\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address}\")\n\n        # Assert\n        assert result.exit_code == 1\n        assert alias_name not in mock_keyring\n\n        verify(result.output)\n\n    def test_wallet_add_alias_exists(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        bob_alias = \"test_alias\"\n        bob_address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mock_keyring[bob_alias] = json.dumps({\"alias\": bob_alias, \"address\": bob_address, \"private_key\": None})\n        alice_address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n\n        # Act\n        result = invoke(f\"task wallet add {bob_alias} -a {alice_address}\", input=\"y\\n\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert json.loads(str(mock_keyring[bob_alias])) == {\n            \"alias\": bob_alias,\n            \"address\": alice_address,\n            \"private_key\": None,\n        }\n\n        verify(result.output)\n\n    def test_wallet_add_alias_mnemonic_differs(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        pk = algosdk.account.generate_account()[0]  # type: ignore[no-untyped-call]\n        mnemonic = algosdk.mnemonic.from_private_key(pk)  # type: ignore[no-untyped-call]\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address} -m\", input=f\"{mnemonic}\\n\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert json.loads(str(mock_keyring[alias_name])) == {\"alias\": alias_name, \"address\": address, \"private_key\": pk}\n\n        verify(result.output)\n\n    def test_wallet_add_alias_limit_error(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        dummy_aliases = []\n        for i in range(50):\n            alias_name = f\"test_alias_{i}\"\n            dummy_aliases.append(alias_name)\n            address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n            mock_keyring[alias_name] = json.dumps({\"alias\": alias_name, \"address\": address, \"private_key\": None})\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps(dummy_aliases)\n\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address}\")\n\n        # Assert\n        assert result.exit_code == 1\n        assert alias_name not in mock_keyring\n\n        verify(result.output)\n\n    def test_wallet_add_alias_generic_error(self, mocker: MockerFixture, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mocker.patch(\"algokit.cli.tasks.wallet.add_alias\", side_effect=Exception(\"test error\"))\n\n        # Act\n        result = invoke(f\"task wallet add {alias_name} -a {address}\")\n\n        # Assert\n        assert result.exit_code == 1\n        assert alias_name not in mock_keyring\n\n        verify(result.output)\n\n\nclass TestGetAlias:\n    def test_wallet_get_address_alias_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mock_keyring[alias_name] = json.dumps({\"alias\": alias_name, \"address\": address, \"private_key\": None})\n\n        # Act\n        result = invoke(f\"task wallet get {alias_name}\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert result.output == f\"Address for alias `{alias_name}`: {address}\\n\"\n\n    def test_wallet_get_account_alias_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        pk, address = algosdk.account.generate_account()  # type: ignore[no-untyped-call]\n        mock_keyring[alias_name] = json.dumps({\"alias\": alias_name, \"address\": address, \"private_key\": pk})\n\n        # Act\n        result = invoke(f\"task wallet get {alias_name}\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert result.output == f\"Address for alias `{alias_name}`: {address} (🔐 includes private key)\\n\"\n\n    @pytest.mark.usefixtures(\"mock_keyring\")\n    def test_wallet_get_alias_not_found(\n        self,\n    ) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n\n        # Act\n        result = invoke(f\"task wallet get {alias_name}\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n\nclass TestListAliases:\n    def test_wallet_list_aliases_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mock_keyring[\"test_alias_1\"] = json.dumps(\n            {\"alias\": \"test_alias_1\", \"address\": \"test_address_1\", \"private_key\": None},\n        )\n        mock_keyring[\"test_alias_2\"] = json.dumps(\n            {\n                \"alias\": \"test_alias_2\",\n                \"address\": \"test_address_2\",\n                \"private_key\": \"blabla\",\n            }\n        )\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([\"test_alias_1\", \"test_alias_2\"])\n\n        # Act\n        result = invoke(\"task wallet list\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    @pytest.mark.usefixtures(\"mock_keyring\")\n    def test_wallet_list_aliases_not_found(self) -> None:\n        # Arrange\n\n        # Act\n        result = invoke(\"task wallet list\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n\nclass TestRemoveAlias:\n    def test_wallet_remove_alias_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        address = algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mock_keyring[alias_name] = json.dumps({\"alias\": alias_name, \"address\": address, \"private_key\": None})\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([alias_name])\n\n        # Act\n        result = invoke(f\"task wallet remove {alias_name}\", input=\"y\\n\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert alias_name not in mock_keyring\n\n        verify(result.output)\n\n    @pytest.mark.usefixtures(\"mock_keyring\")\n    def test_wallet_remove_alias_not_found(self) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n\n        # Act\n        result = invoke(f\"task wallet remove {alias_name}\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n    @pytest.mark.usefixtures(\"mock_keyring\")\n    def test_wallet_remove_alias_generic_error(self, mocker: MockerFixture) -> None:\n        # Arrange\n        alias_name = \"test_alias\"\n        mocker.patch(\"algokit.cli.tasks.wallet.remove_alias\", side_effect=Exception(\"test error\"))\n\n        # Act\n        result = invoke(f\"task wallet remove {alias_name}\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n\n\nclass TestResetAliases:\n    def test_wallet_reset_aliases_successful(self, mock_keyring: dict[str, str | None]) -> None:\n        # Arrange\n        algosdk.account.generate_account()[1]  # type: ignore[no-untyped-call]\n        mock_keyring[\"test_alias_1\"] = json.dumps(\n            {\"alias\": \"test_alias_1\", \"address\": \"test_address_1\", \"private_key\": None},\n        )\n        mock_keyring[\"test_alias_2\"] = json.dumps(\n            {\n                \"alias\": \"test_alias_2\",\n                \"address\": \"test_address_2\",\n                \"private_key\": \"blabla\",\n            }\n        )\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([\"test_alias_1\", \"test_alias_2\"])\n\n        # Act\n        result = invoke(\"task wallet reset\", input=\"y\\n\")\n\n        # Assert\n        assert result.exit_code == 0\n        assert mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] == \"[]\"\n\n        verify(result.output)\n\n    @pytest.mark.usefixtures(\"mock_keyring\")\n    def test_wallet_reset_aliases_not_found(self) -> None:\n        # Arrange\n\n        # Act\n        result = invoke(\"task wallet reset\")\n\n        # Assert\n        assert result.exit_code == 0\n        verify(result.output)\n\n    def test_wallet_reset_aliases_generic_error(\n        self, mocker: MockerFixture, mock_keyring: dict[str, str | None]\n    ) -> None:\n        # Arrange\n        mock_keyring[WALLET_ALIASES_KEYRING_USERNAME] = json.dumps([\"test_alias_1\"])\n        mock_keyring[\"test_alias_1\"] = json.dumps(\n            {\"alias\": \"test_alias_1\", \"address\": \"test_address_1\", \"private_key\": None},\n        )\n        mocker.patch(\"algokit.cli.tasks.wallet.remove_alias\", side_effect=Exception(\"test error\"))\n\n        # Act\n        result = invoke(\"task wallet reset\", input=\"y\\n\")\n\n        # Assert\n        assert result.exit_code == 1\n        verify(result.output)\n"
  },
  {
    "path": "tests/test_root.py",
    "content": "from approvaltests import verify\n\nfrom tests.utils.click_invoker import invoke\n\n\ndef test_help() -> None:\n    result = invoke(\"-h\")\n\n    assert result.exit_code == 0\n    verify(result.output)\n\n\ndef test_version() -> None:\n    result = invoke(\"--version\")\n\n    assert result.exit_code == 0\n"
  },
  {
    "path": "tests/test_root.test_help.approved.txt",
    "content": "Usage: algokit [OPTIONS] COMMAND [ARGS]...\n\n  AlgoKit is your one-stop shop to develop applications on the Algorand\n  blockchain.\n\n  If you are getting started, please see the quick start tutorial:\n  https://dev.algorand.co/getting-started/algokit-quick-start/.\n\nOptions:\n  --version             Show the version and exit.\n  -v, --verbose         Enable logging of DEBUG messages to the console.\n  --color / --no-color  Force enable or disable of console output styling.\n  --skip-version-check  Skip version checking and prompting.\n  -h, --help            Show this message and exit.\n\nCommands:\n  compile      Compile smart contracts and smart signatures written in a\n               supported high-level language to a format deployable on the\n               Algorand Virtual Machine (AVM).\n  completions  Install and Uninstall AlgoKit shell integrations.\n  config       Configure AlgoKit settings.\n  dispenser    Interact with the AlgoKit TestNet Dispenser.\n  doctor       Diagnose potential environment issues that may affect AlgoKit.\n  explore      Explore the specified network using lora.\n  generate     Generate code for an Algorand project.\n  goal         Run the Algorand goal CLI against the AlgoKit LocalNet.\n  init         Initializes a new project from a template; run from project\n               parent directory.\n  localnet     Manage the AlgoKit LocalNet.\n  project      Provides a suite of commands for managing your AlgoKit project.\n  task         Collection of useful tasks to help you develop on Algorand.\n"
  },
  {
    "path": "tests/utils/__init__.py",
    "content": ""
  },
  {
    "path": "tests/utils/app_dir_mock.py",
    "content": "import dataclasses\nfrom pathlib import Path\n\nfrom pytest_mock import MockerFixture\n\n\n@dataclasses.dataclass\nclass AppDirs:\n    app_config_dir: Path\n    app_state_dir: Path\n\n\ndef tmp_app_dir(mocker: MockerFixture, tmp_path: Path) -> AppDirs:\n    app_config_dir = tmp_path / \"config\"\n    app_config_dir.mkdir()\n    mocker.patch(\"algokit.core.sandbox.get_app_config_dir\").return_value = app_config_dir\n    mocker.patch(\"algokit.core.config_commands.js_package_manager.get_app_config_dir\").return_value = app_config_dir\n    mocker.patch(\"algokit.core.config_commands.py_package_manager.get_app_config_dir\").return_value = app_config_dir\n\n    app_state_dir = tmp_path / \"state\"\n    app_state_dir.mkdir()\n    mocker.patch(\"algokit.core.sandbox.get_app_state_dir\").return_value = app_state_dir\n\n    return AppDirs(app_config_dir=app_config_dir, app_state_dir=app_state_dir)\n"
  },
  {
    "path": "tests/utils/approvals.py",
    "content": "import re\nfrom typing import Any\n\nimport approvaltests\nfrom approvaltests.scrubbers.scrubbers import Scrubber, combine_scrubbers\n\nfrom algokit.core.utils import CLEAR_LINE, SPINNER_FRAMES\n\n__all__ = [\n    \"Scrubber\",\n    \"TokenScrubber\",\n    \"combine_scrubbers\",\n    \"normalize_path\",\n    \"verify\",\n]\n\n\ndef normalize_path(content: str, path: str, token: str) -> str:\n    return re.sub(\n        rf\"{token}\\S+\",\n        lambda m: m[0].replace(\"\\\\\", \"/\"),\n        content.replace(path, token).replace(path.replace(\"\\\\\", \"/\"), token),\n    )\n\n\ndef _normalize_platform_differences(data: str, poetry_version: str = \"99.99.99\") -> str:\n    \"\"\"Normalize platform-specific and version-specific differences.\"\"\"\n    result = data\n\n    # Normalize Git error messages across platforms (Arch Linux vs others)\n    result = re.sub(\n        r\"DEBUG: git: fatal: not a git repository \\(or any parent up to mount point /\\)\",\n        \"DEBUG: git: fatal: not a git repository (or any of the parent directories): .git\",\n        result,\n    )\n\n    # Remove Arch Linux specific Git filesystem boundary message\n    result = re.sub(\n        r\"DEBUG: git: Stopping at filesystem boundary \\(GIT_DISCOVERY_ACROSS_FILESYSTEM not set\\)\\.\\n\", \"\", result\n    )\n\n    # Normalize Poetry version output to avoid test failures on version updates\n    result = re.sub(\n        r\"DEBUG: poetry: Poetry \\(version \\d+\\.\\d+\\.\\d+\\)\", f\"DEBUG: poetry: Poetry (version {poetry_version})\", result\n    )\n\n    # Normalize msgpack/Python TypeError messages for 'in' operator\n    # C-extension msgpack (Python 3.10-3.13) says \"is not a container or iterable\"\n    # Pure-Python msgpack (Python 3.14+, no wheel available) uses native \"is not iterable\"\n    return re.sub(\n        r\"argument of type 'int' is not a container or iterable\",\n        \"argument of type 'int' is not iterable\",\n        result,\n    )\n\n\nclass TokenScrubber(Scrubber):  # type: ignore[misc]\n    def __init__(self, tokens: dict[str, str]):\n        self._tokens = tokens\n\n    def __call__(self, data: str) -> str:\n        result = data\n        for token, search in self._tokens.items():\n            result = result.replace(search, \"{\" + token + \"}\")\n\n        # Normalize platform and version differences for consistent tests\n        result = _normalize_platform_differences(result)\n\n        # Normalize SPINNER_FRAMES\n        for frame in SPINNER_FRAMES:\n            result = result.replace(frame, \"\")\n        # Normalize CLEAR_LINE\n        return result.replace(CLEAR_LINE, \" \")\n\n\ndef verify(\n    data: Any,  # noqa: ANN401\n    *,\n    options: approvaltests.Options | None = None,\n    scrubber: Scrubber | None = None,\n    poetry_version: str = \"99.99.99\",\n    **kwargs: Any,\n) -> None:\n    options = options or approvaltests.Options()\n    if scrubber is not None:\n        options = options.add_scrubber(scrubber)\n    kwargs.setdefault(\"encoding\", \"utf-8\")\n    normalised_data = str(data).replace(\"\\r\\n\", \"\\n\")\n\n    # Apply global platform/version normalization with configurable poetry version\n    normalised_data = _normalize_platform_differences(normalised_data, poetry_version)\n\n    approvaltests.verify(\n        data=normalised_data,\n        options=options,\n        # Don't normalise newlines\n        newline=\"\",\n        **kwargs,\n    )\n"
  },
  {
    "path": "tests/utils/click_invoker.py",
    "content": "import dataclasses\nimport logging\nimport os\nfrom collections.abc import Mapping\nfrom pathlib import Path\n\nimport click\nimport click.testing\nfrom click.testing import CliRunner\n\nfrom tests.utils.approvals import normalize_path\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclasses.dataclass\nclass ClickInvokeResult:\n    exit_code: int\n    output: str\n    exception: BaseException | None\n\n\ndef invoke(\n    args: str | list[str],\n    *,\n    cwd: Path | None = None,\n    skip_version_check: bool = True,\n    env: Mapping[str, str | None] | None = None,\n    input: str | None = None,  # noqa: A002\n) -> ClickInvokeResult:\n    from algokit.cli import algokit\n\n    runner = CliRunner()\n    prior_cwd = Path.cwd()\n    assert isinstance(algokit, click.BaseCommand)\n    if cwd is not None:\n        os.chdir(cwd)\n    try:\n        test_args = \"-v --no-color\"\n        if skip_version_check:\n            test_args = f\"{test_args} --skip-version-check\"\n        if isinstance(args, str):\n            result = runner.invoke(algokit, f\"{test_args} {args}\", env=env, input=input)\n        else:\n            result = runner.invoke(algokit, args=[*test_args.split(), *args], env=env, input=input)\n        if result.exc_info and not isinstance(result.exc_info[1], SystemExit):\n            logger.error(\"Click invocation error\", exc_info=result.exc_info)\n        output = normalize_path(result.stdout, str(cwd or prior_cwd), \"{current_working_directory}\")\n        return ClickInvokeResult(exit_code=result.exit_code, output=output, exception=result.exception)\n    finally:\n        if cwd is not None:\n            os.chdir(prior_cwd)\n"
  },
  {
    "path": "tests/utils/proc_mock.py",
    "content": "import dataclasses\nfrom collections.abc import Callable, Sequence\nfrom io import StringIO\nfrom typing import IO, Any, TypeVar\n\n\nclass PopenMock:\n    def __init__(self, stdout: str, returncode: int = 0, min_poll_calls: int = 1):\n        self._returncode = returncode\n        self._stdout = StringIO(stdout)\n        self._remaining_poll_calls = min_poll_calls\n\n    def __enter__(self) -> \"PopenMock\":\n        return self\n\n    def __exit__(self, *args: object) -> None:\n        # TODO: we should change the structure of this mocking a bit,\n        #       and check that I/O cleanup was called\n        pass\n\n    @property\n    def returncode(self) -> int:\n        return self._returncode or 0\n\n    @property\n    def stdout(self) -> IO[str] | None:\n        return self._stdout\n\n    def wait(self) -> int:\n        return self._returncode\n\n    def poll(self) -> int | None:\n        if self._remaining_poll_calls > 0:\n            self._remaining_poll_calls -= 1\n            return None\n        return self._returncode\n\n\n@dataclasses.dataclass\nclass CommandMockData:\n    raise_not_found: bool = False\n    raise_permission_denied: bool = False\n    exit_code: int = 0\n    output_lines: list[str] = dataclasses.field(default_factory=lambda: [\"STDOUT\", \"STDERR\"])\n    side_effect: Any | None = None\n    side_effect_args: dict[str, Any] | None = None\n\n\n@dataclasses.dataclass(kw_only=True)\nclass PopenArgs:\n    command: list[str]\n    env: dict[str, str] | None\n\n\nclass ProcMock:\n    def __init__(self) -> None:\n        self._mock_data: dict[tuple[str, ...], CommandMockData] = {}\n        self.called: list[PopenArgs] = []\n\n    def _add_mock_data(self, cmd: list[str] | str, data: CommandMockData) -> None:\n        cmd_list = tuple(cmd.split() if isinstance(cmd, str) else cmd)\n        if cmd_list in self._mock_data:\n            # update if exact match already exists\n            self._mock_data[cmd_list] = data\n            return\n        # otherwise we quickly check to make sure we won't get surprising results due to ordering,\n        # since if another command is a prefix of the one attempted to be added, and it comes before, this won't work\n        without_overlapping_prefixes = {\n            existing_cmd_prefix: data\n            for existing_cmd_prefix, data in self._mock_data.items()\n            if not sequence_starts_with(existing_cmd_prefix, cmd_list)\n        }\n        without_overlapping_prefixes[cmd_list] = data\n        self._mock_data = without_overlapping_prefixes\n\n    def should_fail_on(self, cmd: list[str] | str) -> None:\n        self._add_mock_data(cmd, CommandMockData(raise_not_found=True))\n\n    def should_deny_on(self, cmd: list[str] | str) -> None:\n        self._add_mock_data(cmd, CommandMockData(raise_permission_denied=True))\n\n    def should_bad_exit_on(self, cmd: list[str] | str, exit_code: int = -1, output: list[str] | None = None) -> None:\n        if exit_code == 0:\n            raise ValueError(\"zero is considered a good exit code\")\n\n        mock_data = CommandMockData(\n            exit_code=exit_code,\n        )\n        if output is not None:\n            mock_data.output_lines = output\n        self._add_mock_data(cmd, mock_data)\n\n    def set_output(\n        self,\n        cmd: list[str] | str,\n        output: list[str],\n        side_effect: Callable[[Any], None] | None = None,\n        side_effect_args: dict[str, Any] | None = None,\n    ) -> None:\n        \"\"\"\n        Set the output of a command, and optionally include a side effect to be executed when the command is run. The\n        side_effect allows you to run additional logic on top of the execution of the proc_mock object. You can pass\n        arguments using side_effect_args. A common scenario includes generating dummy files as a side effect of\n        invoking the command.\n\n        Args:\n            cmd: The command to set the output for\n            output: The output to return when the command is run\n            side_effect: A callable to be called when the command is run\n            side_effect_args: Key value paired arguments to pass to the side_effect function (optional)\n        \"\"\"\n\n        self._add_mock_data(\n            cmd, CommandMockData(output_lines=output, side_effect=side_effect, side_effect_args=side_effect_args)\n        )\n\n    def popen(self, cmd: list[str], env: dict[str, str] | None = None, *_args: Any, **_kwargs: Any) -> PopenMock:\n        self.called.append(PopenArgs(command=cmd, env=env))\n        for i in reversed(range(len(cmd))):\n            prefix = cmd[: i + 1]\n            try:\n                mock_data = self._mock_data[tuple(prefix)]\n            except KeyError:\n                pass\n            else:\n                break\n        else:\n            mock_data = CommandMockData()\n\n        if mock_data.raise_not_found:\n            raise FileNotFoundError(f\"No such file or directory: {cmd[0]}\")\n        if mock_data.raise_permission_denied:\n            raise PermissionError(f\"I'm sorry Dave I can't do {cmd[0]}\")\n        exit_code = mock_data.exit_code\n        output = \"\\n\".join(mock_data.output_lines)\n\n        if mock_data.side_effect:\n            side_effect_args = mock_data.side_effect_args or {}\n            mock_data.side_effect(**side_effect_args)\n\n        return PopenMock(output, exit_code)\n\n\nT = TypeVar(\"T\")\n\n\ndef sequence_starts_with(seq: Sequence[T], test: Sequence[T]) -> bool:\n    \"\"\"Like startswith, but for a generic sequence\"\"\"\n    test_len = len(test)\n    if len(seq) < test_len:\n        return False\n    return seq[:test_len] == test\n"
  },
  {
    "path": "tests/utils/which_mock.py",
    "content": "class WhichMock:\n    def __init__(self) -> None:\n        self.paths: dict[str, str] = {}\n\n    def add(self, cmd: str, path: str | None = None) -> str:\n        path = path or f\"/bin/{cmd}\"\n        self.paths[cmd] = path\n        return path\n\n    def remove(self, cmd: str) -> None:\n        self.paths.pop(cmd, None)\n\n    def which(self, cmd: str) -> str | None:\n        return self.paths.get(cmd)\n"
  },
  {
    "path": "tests/version_check/__init__.py",
    "content": ""
  },
  {
    "path": "tests/version_check/test_version_check.py",
    "content": "import os\nfrom importlib import metadata\nfrom time import time\n\nimport pytest\nfrom approvaltests.scrubbers.scrubbers import Scrubber, combine_scrubbers\nfrom pytest_httpx import HTTPXMock\nfrom pytest_mock import MockerFixture\n\nfrom algokit.core.conf import PACKAGE_NAME\nfrom algokit.core.config_commands.version_prompt import LATEST_URL, VERSION_CHECK_INTERVAL\nfrom tests.utils.app_dir_mock import AppDirs\nfrom tests.utils.approvals import normalize_path, verify\nfrom tests.utils.click_invoker import invoke\n\nCURRENT_VERSION = metadata.version(PACKAGE_NAME)\nNEW_VERSION = \"999.99.99\"\n\n\ndef make_scrubber(app_dir_mock: AppDirs) -> Scrubber:\n    return combine_scrubbers(\n        lambda x: normalize_path(x, str(app_dir_mock.app_config_dir), \"{app_config}\"),\n        lambda x: normalize_path(x, str(app_dir_mock.app_state_dir), \"{app_state}\"),\n        lambda x: x.replace(CURRENT_VERSION, \"{current_version}\"),\n        lambda x: x.replace(NEW_VERSION, \"{new_version}\"),\n    )\n\n\n@pytest.fixture(autouse=True)\ndef _setup(mocker: MockerFixture, app_dir_mock: AppDirs) -> None:\n    mocker.patch(\n        \"algokit.core.config_commands.version_prompt.get_app_config_dir\"\n    ).return_value = app_dir_mock.app_config_dir\n    mocker.patch(\n        \"algokit.core.config_commands.version_prompt.get_app_state_dir\"\n    ).return_value = app_dir_mock.app_state_dir\n    # make bootstrap env a no-op\n    mocker.patch(\"algokit.cli.project.bootstrap.bootstrap_env\")\n\n\ndef test_version_check_queries_github_when_no_cache(app_dir_mock: AppDirs, httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_response(url=LATEST_URL, json={\"tag_name\": f\"v{NEW_VERSION}\"})\n\n    # bootstrap env is a nice simple command we can use to test the version check side effects\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\n@pytest.mark.parametrize(\n    (\"current_version\", \"latest_version\", \"warning_expected\"),\n    [\n        (\"0.2.0\", \"0.3.0\", True),\n        (\"0.25.0\", \"0.30.0\", True),\n        (\"0.3.0\", \"0.29.0\", True),\n        (\"999.99.99\", \"1000.00.00\", True),\n        (\"999.99.99-beta\", \"1000.00.00\", True),\n        (\"999.99.99-alpha\", \"999.99.99-beta\", True),\n        (\"0.25.0\", \"1.0.0\", True),\n        (\"0.29.0\", \"1.0.0\", True),\n        (\"0.3.0\", \"1.0.0\", True),\n        (\"0.3.0\", \"0.2.0\", False),\n        (\"0.3.0\", \"0.3.0\", False),\n        (\"0.30.0\", \"0.25.0\", False),\n        (\"0.29.0\", \"0.3.0\", False),\n        (\"0.30.0\", \"0.30.0\", False),\n        (\"1.0.0\", \"0.25.0\", False),\n        (\"1.0.0\", \"0.29.0\", False),\n        (\"1.0.0\", \"0.3.0\", False),\n        (\"1.0.0\", \"1.0.0\", False),\n        (\"999.99.99\", \"998.0.0\", False),\n        (\"999.99.99\", \"999.99.0\", False),\n        (\"999.99.99\", \"999.99.99\", False),\n        (\"999.99.99-beta\", \"998.99.99\", False),\n        (\"999.99.99-beta\", \"999.99.99-alpha\", False),\n    ],\n)\ndef test_version_check_only_warns_if_newer_version_is_found(\n    app_dir_mock: AppDirs, mocker: MockerFixture, current_version: str, latest_version: str, *, warning_expected: bool\n) -> None:\n    mocker.patch(\n        \"algokit.core.config_commands.version_prompt.get_current_package_version\"\n    ).return_value = current_version\n    version_cache = app_dir_mock.app_state_dir / \"last-version-check\"\n    version_cache.write_text(latest_version, encoding=\"utf-8\")\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n\n    if warning_expected:\n        assert f\"version {latest_version} is available\" in result.output\n    else:\n        assert f\"version {latest_version} is available\" not in result.output\n\n\ndef test_version_check_uses_cache(app_dir_mock: AppDirs) -> None:\n    version_cache = app_dir_mock.app_state_dir / \"last-version-check\"\n    version_cache.write_text(\"1234.56.78\", encoding=\"utf-8\")\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\ndef test_version_check_queries_github_when_cache_out_of_date(app_dir_mock: AppDirs, httpx_mock: HTTPXMock) -> None:\n    httpx_mock.add_response(url=LATEST_URL, json={\"tag_name\": f\"v{NEW_VERSION}\"})\n    version_cache = app_dir_mock.app_state_dir / \"last-version-check\"\n    version_cache.write_text(\"1234.56.78\", encoding=\"utf-8\")\n    modified_time = time() - VERSION_CHECK_INTERVAL - 1\n    os.utime(version_cache, (modified_time, modified_time))\n\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\ndef test_version_check_respects_disable_config(app_dir_mock: AppDirs) -> None:\n    (app_dir_mock.app_config_dir / \"disable-version-prompt\").touch()\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\ndef test_version_check_respects_skip_option(app_dir_mock: AppDirs) -> None:\n    result = invoke(\"--skip-version-check project bootstrap env\", skip_version_check=False)\n\n    assert result.exit_code == 0\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\ndef test_version_check_disable_version_check(app_dir_mock: AppDirs) -> None:\n    disable_version_check_path = app_dir_mock.app_config_dir / \"disable-version-prompt\"\n    result = invoke(\"config version-prompt disable\")\n\n    assert result.exit_code == 0\n    assert disable_version_check_path.exists()\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\ndef test_version_check_enable_version_check(app_dir_mock: AppDirs) -> None:\n    disable_version_check_path = app_dir_mock.app_config_dir / \"disable-version-prompt\"\n    disable_version_check_path.touch()\n    result = invoke(\"config version-prompt enable\")\n\n    assert result.exit_code == 0\n    assert not disable_version_check_path.exists()\n    verify(result.output, scrubber=make_scrubber(app_dir_mock))\n\n\n@pytest.mark.parametrize(\n    (\"method\", \"message\"),\n    [\n        (\"snap\", \"snap refresh algokit\"),\n        (\"brew\", \"brew upgrade algokit\"),\n        (\"winget\", \"winget upgrade algokit\"),\n        (None, \"the tool used to install AlgoKit\"),\n    ],\n)\ndef test_version_prompt_according_to_distribution_method(\n    mocker: MockerFixture, app_dir_mock: AppDirs, method: str, message: str\n) -> None:\n    mocker.patch(\"algokit.core.config_commands.version_prompt._get_distribution_method\").return_value = method\n    mocker.patch(\"algokit.core.config_commands.version_prompt.is_binary_mode\").return_value = True\n    mocker.patch(\"algokit.core.config_commands.version_prompt.get_current_package_version\").return_value = \"1.0.0\"\n    version_cache = app_dir_mock.app_state_dir / \"last-version-check\"\n    version_cache.write_text(\"2.0.0\", encoding=\"utf-8\")\n\n    result = invoke(\"project bootstrap env\", skip_version_check=False)\n    assert result.exit_code == 0\n    assert message in result.output\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_disable_version_check.approved.txt",
    "content": "🚫 Will stop checking for new versions\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_enable_version_check.approved.txt",
    "content": "📡 Resuming check for new versions\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_queries_github_when_cache_out_of_date.approved.txt",
    "content": "DEBUG: 1234.56.78 found in cache {app_state}/last-version-check\nHTTP Request: GET https://api.github.com/repos/algorandfoundation/algokit-cli/releases/latest \"HTTP/1.1 200 OK\"\nDEBUG: Latest version tag: v{new_version}\nYou are using AlgoKit version {current_version}, however version {new_version} is available. Please update using the tool used to install AlgoKit.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_queries_github_when_no_cache.approved.txt",
    "content": "DEBUG: {app_state}/last-version-check inaccessible\nHTTP Request: GET https://api.github.com/repos/algorandfoundation/algokit-cli/releases/latest \"HTTP/1.1 200 OK\"\nDEBUG: Latest version tag: v{new_version}\nYou are using AlgoKit version {current_version}, however version {new_version} is available. Please update using the tool used to install AlgoKit.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_respects_disable_config.approved.txt",
    "content": "DEBUG: Version prompt disabled\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_respects_skip_option.approved.txt",
    "content": "DEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n"
  },
  {
    "path": "tests/version_check/test_version_check.test_version_check_uses_cache.approved.txt",
    "content": "DEBUG: 1234.56.78 found in cache {app_state}/last-version-check\nYou are using AlgoKit version {current_version}, however version 1234.56.78 is available. Please update using the tool used to install AlgoKit.\nDEBUG: Running 'poetry --version' in '{current_working_directory}'\nDEBUG: poetry: Poetry (version 99.99.99)\nDEBUG: Attempting to load project config from {current_working_directory}/.algokit.toml\nDEBUG: No .algokit.toml file found in the project directory.\n"
  }
]