[
  {
    "path": ".gitattributes",
    "content": "pnpm-lock.yaml linguist-generated\n"
  },
  {
    "path": ".github/workflows/frontend.yml",
    "content": "name: Frontend code quality check\n\non:\n  push:\n    branches: [main, \"*dev\"]\n  pull_request:\n    branches: [main, \"*dev\"]\n    types: [opened, synchronize]\n  workflow_dispatch:\n\njobs:\n  checks:\n    runs-on: ubuntu-slim\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v5\n\n      - name: Install pnpm\n        uses: pnpm/action-setup@v4\n        with:\n          version: 10\n\n      - name: Setup Node\n        uses: actions/setup-node@v6\n        with:\n          node-version: latest\n          cache: pnpm\n\n      - name: Setup uv\n        uses: astral-sh/setup-uv@v7\n        with:\n          enable-cache: true\n\n      - name: Cache postinstall artifacts\n        uses: actions/cache@v5\n        with:\n          path: frontend/ui/.cache\n          key: ${{ runner.os }}-frontend-postinstall-cache\n\n      - name: Prepare train folder\n        run: |\n          mkdir -p train\n          echo '[]' > train/infer.json\n          echo '{}' > train/contrib.json\n          echo '??' > train/model.onnx\n        working-directory: frontend/service\n\n      - name: Install dependencies\n        run: pnpm install\n\n      - name: Prettier check\n        run: pnpm exec prettier --check .\n\n      - name: Type check\n        run: pnpm run check\n        working-directory: frontend/ui\n"
  },
  {
    "path": ".github/workflows/prek.yml",
    "content": "name: Prek check\n\non:\n  push:\n    branches: [main, \"*dev\"]\n  pull_request:\n    branches: [main, \"*dev\"]\n    types: [opened, synchronize]\n  workflow_dispatch:\n\njobs:\n  prek:\n    runs-on: ubuntu-slim\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v5\n\n      - name: Prek check\n        uses: j178/prek-action@v1\n"
  },
  {
    "path": ".github/workflows/py-code-check.yml",
    "content": "name: Python code quality check\n\non:\n  push:\n    branches: [main, \"*dev\"]\n  pull_request:\n    branches: [main, \"*dev\"]\n    types: [opened, synchronize]\n  workflow_dispatch:\n\njobs:\n  ruff:\n    runs-on: ubuntu-slim\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v5\n\n      - name: Ruff check\n        uses: astral-sh/ruff-action@v3\n        with:\n          args: \"format --check --diff ./python\"\n          version: \"latest\"\n"
  },
  {
    "path": ".github/workflows/py-weekly-update.yml",
    "content": "name: Weekly updates for symbols\n\non:\n  workflow_dispatch:\n  schedule:\n    # weekly, Monday\n    - cron: \"0 0 * * 1\"\n  push:\n    paths:\n      # on new manual mapping added\n      - \"python/tex_to_typ.yaml\"\njobs:\n  check-typst-symbol-page:\n    if: ${{ github.event_name != 'push' }}\n    runs-on: ubuntu-slim\n    outputs:\n      should_run: ${{ steps.check.outputs.changed }}\n    permissions:\n      contents: write\n    env:\n      GH_TOKEN: ${{ secrets.PAT_TOKEN }} # Required for 'gh variable set'\n      TARGET_URL: \"https://typst.app/docs/reference/symbols/sym/\"\n      GH_REPO: ${{ github.repository }}\n      VAR_NAME: \"TYPST_ETAG\"\n    steps:\n      - name: Check ETag and Update\n        id: check\n        shell: bash\n        run: |\n          CURRENT_ETAG=$(curl -I -s \"$TARGET_URL\" | grep -i \"^etag:\" | sed 's/etag: //I' | tr -d '\\r')\n\n          if [ -z \"$CURRENT_ETAG\" ]; then\n            echo \"::error::Could not fetch ETag. Exiting.\"\n            exit 1\n          fi\n\n          STORED_ETAG=$(gh variable get \"$VAR_NAME\" --json value -q .value 2>/dev/null || echo \"\")\n\n          echo \"Stored:  '$STORED_ETAG'\"\n          echo \"Current: '$CURRENT_ETAG'\"\n\n          if [ \"$CURRENT_ETAG\" != \"$STORED_ETAG\" ]; then\n            echo \"changed=true\" >> \"$GITHUB_OUTPUT\"\n            echo \"⚡ ETag changed from '$STORED_ETAG' to '$CURRENT_ETAG'\"\n            gh variable set \"$VAR_NAME\" --body \"$CURRENT_ETAG\"\n          else\n            echo \"changed=false\" >> \"$GITHUB_OUTPUT\"\n            echo \"😴 No change detected.\"\n          fi\n\n  gen-info:\n    needs: check-typst-symbol-page\n    if: ${{ !failure() && (needs.check-typst-symbol-page.outputs.should_run == 'true' || github.event_name == 'push') }}\n    name: dataset-update\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v5\n\n      - name: Setup uv\n        uses: astral-sh/setup-uv@v7\n        with:\n          enable-cache: true\n\n      - name: Run Generation\n        env:\n          HF_TOKEN: ${{ secrets.HF_TOKEN }}\n        run: |\n          echo \"ETag changed. Generating data.\"\n          uv run --no-project --with 'typer' --with 'msgspec' --with 'datasets' --with 'polars' --with 'beautifulsoup4' --with 'lxml' --python 3.13 python/proc_data.py --convert-data\n\n      - name: Cache Hugging Face datasets\n        uses: actions/cache@v4\n        with:\n          path: ~/.cache/huggingface\n          key: huggingface-datasets-${{ hashFiles('python/tex_to_typ.yaml') }}\n          restore-keys: |\n            huggingface-datasets-\n\n      - name: Upload Data Artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: generated-data\n          path: |\n            build/data/infer.json\n            build/data/contrib.json\n\n  trigger-training:\n    needs: gen-info\n    runs-on: ubuntu-latest\n    env:\n      TITLE: \"Detypify Auto Training\"\n      KAGGLE_API_TOKEN: ${{ secrets.KAGGLE_KEY }}\n    steps:\n      - uses: actions/checkout@v5\n\n      - name: Download Data Artifacts\n        uses: actions/download-artifact@v4\n        with:\n          name: generated-data\n          path: build/data\n\n      - name: Set up Notebook Kernel\n        shell: bash\n        env:\n          REPO_URL: \"https://github.com/${{ github.repository }}\"\n          BRANCH_NAME: ${{ github.ref_name }}\n        run: |\n          python -m pip install --upgrade kaggle --user\n          OUTPUT_NOTEBOOK=\"script.ipynb\"\n\n          # Notice the escaped variables (\\$) in the third block so GitHub Actions doesn't pre-evaluate them\n          cat <<EOM > \"$OUTPUT_NOTEBOOK\"\n          {\n            \"cells\": [\n              {\n                \"cell_type\": \"code\",\n                \"execution_count\": null,\n                \"metadata\": {},\n                \"outputs\": [],\n                \"source\": [\n                  \"!git clone --branch $BRANCH_NAME $REPO_URL.git detypify\"\n                ]\n              },\n              {\n                \"cell_type\": \"code\",\n                \"execution_count\": null,\n                \"metadata\": {},\n                \"outputs\": [],\n                \"source\": [\n                  \"!cd detypify && pip install uv && uv --quiet sync && uv run python/train.py\"\n                ]\n              },\n              {\n                \"cell_type\": \"code\",\n                \"execution_count\": null,\n                \"metadata\": {},\n                \"outputs\": [],\n                \"source\": [\n                  \"!for f in detypify/build/train/*/version_*/ckpts/*.onnx; do model=\\$(echo \\$f | cut -d/ -f4); cp \\$f /kaggle/working/\\$model.onnx && echo \\\"Copied \\$f -> /kaggle/working/\\$model.onnx\\\"; done\"\n                ]\n              }\n            ],\n            \"metadata\": {\n              \"kernelspec\": {\n                \"display_name\": \"Python 3\",\n                \"language\": \"python\",\n                \"name\": \"python3\"\n              },\n              \"language_info\": {\n                \"name\": \"python\",\n                \"version\": \"3.13\"\n              }\n            },\n            \"nbformat\": 4,\n            \"nbformat_minor\": 2\n          }\n          EOM\n\n      - name: Push Kernel\n        shell: bash\n        run: |\n          kaggle kernels init -p .\n\n          KAGGLE_USERNAME=$(jq -r '.id' kernel-metadata.json | cut -d'/' -f1)\n\n          KERNEL_SLUG=$(echo \"$TITLE\" | tr '[:upper:]' '[:lower:]' | sed 's/ /-/g')\n          KERNEL_ID=\"$KAGGLE_USERNAME/$KERNEL_SLUG\"\n\n          echo \"KERNEL_ID=$KERNEL_ID\" >> $GITHUB_ENV\n\n          jq --arg id \"$KERNEL_ID\" \\\n             --arg title \"$TITLE\" \\\n             --arg code_file \"script.ipynb\" \\\n             '.id = $id | .title = $title | .code_file = $code_file | .language = \"python\" | .kernel_type = \"notebook\" | .enable_gpu = true | .enable_internet = true' \\\n             kernel-metadata.json > kernel-metadata.json.tmp && mv kernel-metadata.json.tmp kernel-metadata.json\n\n          cat kernel-metadata.json\n\n          # 6. Push the kernel\n          kaggle kernels push -p . --accelerator 'NvidiaTeslaT4'\n\n      - name: Check status\n        shell: bash\n        run: |\n          echo \"Checking status for $KERNEL_ID...\"\n\n          MAX_RETRIES=240\n          COUNT=0\n\n          while [ $COUNT -lt $MAX_RETRIES ]; do\n              status=$(kaggle kernels status \"$KERNEL_ID\" 2>&1)\n              echo \"Status: $status\"\n\n              if [[ \"$status\" == *\"ERROR\"* || \"$status\" == *\"CANCEL\"* ]]; then\n                  echo \"::error::Kernel execution failed.\"\n                  exit 1\n              elif [[ \"$status\" == *\"COMPLETE\"* ]]; then\n                  echo \"Kernel execution completed successfully!\"\n                  exit 0\n              fi\n\n              sleep 60\n              COUNT=$((COUNT+1))\n          done\n\n          echo \"::error::Kernel execution timed out after 4 hours.\"\n          exit 1\n\n      - name: Download artifacts\n        shell: bash\n        run: |\n          mkdir -p artifacts\n          # Use the exact KERNEL_ID (owner/slug) that Kaggle CLI expects\n          kaggle kernels output \"$KERNEL_ID\" -p artifacts\n      - name: Upload Trained Models to GitHub\n        uses: actions/upload-artifact@v4\n        with:\n          name: trained-onnx-models\n          path: artifacts/build/train\n"
  },
  {
    "path": ".gitignore",
    "content": ".venv/\n__pycache__/\n\nnode_modules/\n.wrangler/\n.cache/\n\nbuild/\ntrain/\ndist/\n*.woff2\n/*.zsh\n/*.zip\n\n# Ref: https://github.com/astral-sh/uv/issues/6349\nuv.lock\n"
  },
  {
    "path": ".prettierignore",
    "content": "/data\n/external\n\n*.md\n*.yml\n*.yaml\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 QuarticCat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n    <img src=\"./assets/logo.svg\" alt=\"logo\" width=\"150\"/>\n    <h1>Detypify</h1>\n    <p>\n        Can't find some Typst symbol?\n        <a href=\"https://detypify.quarticcat.com/\">Draw it!</a>\n    </p>\n</div>\n\n## Features\n\n- **PWA**: installable and works offline\n- **Tiny model**: fast to load and run\n- **Decent symbol set**: support 400+ symbols\n\nYou can also use it in [Tinymist](https://github.com/Myriad-Dreamin/tinymist).\n\n## Development\n\n### File Structure\n\n```text\n- python     # training scripts\n- frontend\n  - service  # inference lib\n  - ui       # web UI\n  - worker   # Cloudflare worker\n```\n\nCheck corresponding folders for more information.\n\nBefore you build frontend projects, make sure you have the `train` folder in [frontend/service](./frontend/service) by either:\n\n- Train your own one, or\n- Download from [NPM](https://www.npmjs.com/package/detypify-service?activeTab=code).\n\n### Logo\n\nSource: [assets/manuscript.svg](./assets/manuscript.svg) (requires [*NewComputerModernMath*](https://ctan.org/pkg/newcomputermodern) font)\n\nTo compile it for production:\n\n```console\n$ cd assets\n$ inkscape manuscript.svg --export-text-to-path --export-filename=logo.svg  # convert text to path\n$ bunx svgo --multipass logo.svg  # optimize SVG\n```\n\n## License\n\nMIT\n"
  },
  {
    "path": "frontend/service/README.md",
    "content": "# Detypify Service\n\nIntegrate Detypify into your own projects.\n\n## Example\n\n```typescript\nimport { Detypify, inferSyms } from \"detypify-service\";\n\nconst session = await Detypify.create();\nconst storkes = [[[0, 0], [1, 1]]];\nconst scores = await session.infer(strokes);\nconst candidates = Array.from(scores.keys());\ncandidates.sort((a, b) => scores[b] - scores[a]);\nconsole.log(inferSyms[candidates[0]]);\n```\n\n## API Reference\n\n- `ortEnv`: Re-export of [`onnxruntime-web.env`](https://onnxruntime.ai/docs/tutorials/web/env-flags-and-session-options.html). Used to configure onnxruntime.\n\n  Note: Recent `onnxruntime-web` builds ship bundled wasm by default; use the `onnxruntime-web-use-extern-wasm` export condition to opt into external wasm loading (see the official `exports` in https://github.com/microsoft/onnxruntime/blob/main/js/web/package.json).\n\n- `inferSyms`: Model's output symbol data.\n\n- `contribSyms`: Mapping from Typst symbol names to characters.\n\n- `Detypify`: The main type.\n\n  - Use `Detypify.create()` to create an instance.\n\n  - Use `instance.infer(strokes)` to inference scores of each symbol.\n\n    The higher `scores[i]` is, the more likely your strokes is `inferSyms[i]`.\n"
  },
  {
    "path": "frontend/service/package.json",
    "content": "{\n    \"name\": \"detypify-service\",\n    \"type\": \"module\",\n    \"version\": \"0.3.0\",\n    \"description\": \"Typst symbol classifier\",\n    \"license\": \"MIT\",\n    \"repository\": \"github:QuarticCat/detypify\",\n    \"homepage\": \"https://detypify.quarticcat.com/\",\n    \"author\": \"QuarticCat <QuarticCat@pm.me> (https://github.com/QuarticCat)\",\n    \"keywords\": [\n        \"js\",\n        \"typst\",\n        \"detypify\",\n        \"onnxruntime\"\n    ],\n    \"exports\": {\n        \".\": \"./dist/index.mjs\",\n        \"./package.json\": \"./package.json\"\n    },\n    \"main\": \"./dist/index.mjs\",\n    \"module\": \"./dist/index.mjs\",\n    \"types\": \"./dist/index.d.mts\",\n    \"files\": [\n        \"dist\",\n        \"train/*.json\",\n        \"train/*.onnx\"\n    ],\n    \"scripts\": {\n        \"dev\": \"tsdown --watch\",\n        \"build\": \"tsdown\",\n        \"prepare\": \"tsdown\"\n    },\n    \"dependencies\": {\n        \"onnxruntime-web\": \"=1.23.2\"\n    },\n    \"devDependencies\": {\n        \"tsdown\": \"^0.21.4\",\n        \"typescript\": \"^5.9.3\"\n    }\n}\n"
  },
  {
    "path": "frontend/service/src/index.ts",
    "content": "import contribSymsRaw from \"../train/contrib.json\";\nimport inferSymsRaw from \"../train/infer.json\";\nimport { InferenceSession, Tensor } from \"onnxruntime-web/wasm\";\n\nexport { env as ortEnv } from \"onnxruntime-web/wasm\";\n\nconst modelUrl = new URL(\"../train/model.onnx\", import.meta.url).href;\n\nexport interface SymbolInfo {\n    char: string;\n    names: string[];\n    shorthand?: string;\n    mathShorthand?: string;\n    markupShorthand?: string;\n}\n\nexport type Point = [number, number];\nexport type Stroke = Point[];\nexport type Strokes = Stroke[];\n\n/**\n * Model's output symbol data.\n */\nexport const inferSyms = inferSymsRaw as SymbolInfo[];\n\n/**\n * Mapping from Typst symbol names to characters.\n */\nexport const contribSyms = contribSymsRaw as Record<string, string>;\n\n/**\n * Normalize strokes and draw them to canvas.\n */\nexport function drawStrokes(strokes: Strokes): HTMLCanvasElement {\n    const canvas = document.createElement(\"canvas\");\n    canvas.width = canvas.height = 224;\n\n    const ctx = canvas.getContext(\"2d\", { willReadFrequently: true });\n    if (!ctx) {\n        throw new Error(\"Failed to get 2D canvas context.\");\n    }\n    ctx.fillStyle = \"black\";\n    ctx.strokeStyle = \"white\";\n    ctx.lineWidth = 8;\n\n    // Find bounding rect.\n    let minX = Infinity;\n    let maxX = -Infinity;\n    let minY = Infinity;\n    let maxY = -Infinity;\n    for (const stroke of strokes) {\n        for (const [x, y] of stroke) {\n            minX = Math.min(minX, x);\n            maxX = Math.max(maxX, x);\n            minY = Math.min(minY, y);\n            maxY = Math.max(maxY, y);\n        }\n    }\n\n    // Normalize.\n    const padding = 10;\n    const targetSize = canvas.width - 2 * padding;\n    let width = Math.max(maxX - minX, maxY - minY);\n    const scale = width > 1e-6 ? targetSize / width : 1;\n    const centerX = (minX + maxX) / 2;\n    const centerY = (minY + maxY) / 2;\n\n    // Draw to canvas.\n    ctx.fillRect(0, 0, canvas.width, canvas.height);\n    for (const stroke of strokes) {\n        ctx.beginPath();\n        for (const [x, y] of stroke) {\n            const targetX = (x - centerX) * scale + canvas.width / 2;\n            const targetY = (y - centerY) * scale + canvas.width / 2;\n            ctx.lineTo(Math.round(targetX), Math.round(targetY));\n        }\n        ctx.stroke();\n    }\n\n    return canvas;\n}\n\n/**\n * Typst symbol classifier.\n */\nexport class Detypify {\n    private sess: InferenceSession;\n\n    constructor(sess: InferenceSession) {\n        this.sess = sess;\n    }\n\n    /**\n     * Load ONNX runtime and the model.\n     */\n    static async create(): Promise<Detypify> {\n        return new Detypify(await InferenceSession.create(modelUrl));\n    }\n\n    /**\n     * Inference scores of each symbol.\n     *\n     * The higher `scores[i]` is, the more likely your strokes is `inferSyms[i]`.\n     */\n    async infer(strokes: Strokes): Promise<Float32Array> {\n        const canvas = drawStrokes(strokes);\n        const ctx = canvas.getContext(\"2d\", { willReadFrequently: true });\n        if (!ctx) {\n            throw new Error(\"Failed to get 2D canvas context.\");\n        }\n\n        // To greyscale.\n        const rgba = ctx.getImageData(0, 0, canvas.width, canvas.height).data;\n        const grey = new Float32Array(rgba.length / 4);\n        for (let i = 0; i < grey.length; ++i) {\n            grey[i] = rgba[i * 4] / 255;\n        }\n\n        // Infer.\n        const tensor = new Tensor(\"float32\", grey, [1, 1, canvas.width, canvas.height]);\n        const outputs = await this.sess.run({ [this.sess.inputNames[0]]: tensor });\n        return outputs[this.sess.outputNames[0]].data as Float32Array;\n    }\n}\n"
  },
  {
    "path": "frontend/ui/README.md",
    "content": "# Detypify UI\n\nA frontend for the Detypify web experience.\n\n## Development\n\n```console\n$ bun run dev      # start Vite dev server\n$ bun run build    # build for production\n$ bun run preview  # preview production build locally\n$ bun run check    # run type checks\n```\n"
  },
  {
    "path": "frontend/ui/index.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n    <head>\n        <meta charset=\"UTF-8\" />\n        <title>Detypify</title>\n        <meta name=\"description\" content=\"Typst symbol classifier\" />\n        <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1\" />\n        <link rel=\"preload\" href=\"/NewCMMath-Detypify.woff2\" as=\"font\" type=\"font/woff2\" crossorigin />\n        <link rel=\"preconnect\" href=\"https://cdn.jsdelivr.net\" crossorigin />\n        <link rel=\"dns-prefetch\" href=\"https://cdn.jsdelivr.net\" />\n        <link\n            rel=\"modulepreload\"\n            href=\"https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/ort-wasm-simd-threaded.mjs\"\n            crossorigin\n        />\n        <link\n            rel=\"preload\"\n            href=\"https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/ort-wasm-simd-threaded.wasm\"\n            as=\"fetch\"\n            type=\"application/wasm\"\n            crossorigin\n        />\n        <!-- TODO: The ONNX file is not preloaded. -->\n    </head>\n    <body class=\"bg-white text-gray-900 dark:bg-gray-800 dark:text-gray-100\">\n        <div id=\"app\"></div>\n        <script type=\"module\" src=\"/src/main.ts\"></script>\n    </body>\n</html>\n"
  },
  {
    "path": "frontend/ui/package.json",
    "content": "{\n    \"name\": \"detypify-ui\",\n    \"private\": true,\n    \"version\": \"0.3.0\",\n    \"type\": \"module\",\n    \"scripts\": {\n        \"dev\": \"vite\",\n        \"build\": \"vite build\",\n        \"preview\": \"vite preview\",\n        \"check\": \"svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json\",\n        \"postinstall\": \"node scripts/subset-font.ts\"\n    },\n    \"dependencies\": {\n        \"detypify-service\": \"workspace:*\"\n    },\n    \"devDependencies\": {\n        \"@sveltejs/vite-plugin-svelte\": \"^7.0.0\",\n        \"@tailwindcss/vite\": \"^4.2.2\",\n        \"@tsconfig/svelte\": \"^5.0.8\",\n        \"@types/node\": \"^25.5.0\",\n        \"@vite-pwa/assets-generator\": \"^1.0.2\",\n        \"flowbite\": \"^4.0.1\",\n        \"flowbite-svelte\": \"^1.31.0\",\n        \"flowbite-svelte-icons\": \"^3.1.0\",\n        \"svelte\": \"^5.54.0\",\n        \"svelte-check\": \"^4.4.5\",\n        \"tailwindcss\": \"^4.2.2\",\n        \"typescript\": \"~5.9.3\",\n        \"vite\": \"^8.0.1\",\n        \"vite-plugin-pwa\": \"^1.2.0\"\n    }\n}\n"
  },
  {
    "path": "frontend/ui/public/CNAME",
    "content": "detypify.quarticcat.com\n"
  },
  {
    "path": "frontend/ui/public/robots.txt",
    "content": "User-agent: *\nAllow: /\n"
  },
  {
    "path": "frontend/ui/scripts/subset-font.ts",
    "content": "import { contribSyms } from \"detypify-service\";\nimport { execSync } from \"node:child_process\";\nimport fs from \"node:fs/promises\";\nimport path from \"node:path\";\nimport { fileURLToPath } from \"node:url\";\n\nconst FONT_URL = \"https://mirrors.ctan.org/fonts/newcomputermodern/otf/NewCMMath-Regular.otf\";\n\nconst root = path.resolve(path.dirname(fileURLToPath(import.meta.url)), \"..\");\nconst cacheDir = path.join(root, \".cache\");\nconst fontPath = path.join(cacheDir, \"NewCMMath-Regular.otf\");\nconst textPath = path.join(cacheDir, \"NewCMMath-Detypify.txt\");\nconst outputDir = path.join(root, \"public\");\nconst outputPath = path.join(outputDir, \"NewCMMath-Detypify.woff2\");\n\n// Download font or read from cache.\nawait fs.access(fontPath).catch(async () => {\n    await fs.mkdir(cacheDir, { recursive: true });\n    const res = await fetch(FONT_URL);\n    if (!res.ok) throw new Error(`Failed to download font: ${res.status} ${res.statusText}`);\n    await fs.writeFile(fontPath, Buffer.from(await res.arrayBuffer()));\n});\n\n// Generate font subset.\nawait fs.writeFile(textPath, Object.values(contribSyms).join(\"\"));\nexecSync(\n    `uvx --from=fonttools[woff] pyftsubset ${fontPath} --text-file=${textPath} --flavor=woff2 --output-file=${outputPath}`,\n    { stdio: \"inherit\" },\n);\n"
  },
  {
    "path": "frontend/ui/src/App.svelte",
    "content": "<script lang=\"ts\">\n    import Contrib from \"./routes/Contrib.svelte\";\n    import FAQ from \"./routes/FAQ.svelte\";\n    import Home from \"./routes/Home.svelte\";\n    import { Detypify, ortEnv } from \"detypify-service\";\n    import { Navbar, NavBrand, NavLi, NavUl, NavHamburger } from \"flowbite-svelte\";\n    import { Spinner, DarkMode, Tooltip, ToolbarButton, Heading } from \"flowbite-svelte\";\n    import { GithubSolid } from \"flowbite-svelte-icons\";\n    import { onMount } from \"svelte\";\n    import { fade } from \"svelte/transition\";\n\n    ortEnv.wasm.numThreads = 1;\n    ortEnv.wasm.wasmPaths = \"https://cdn.jsdelivr.net/npm/onnxruntime-web@1.23.2/dist/\";\n\n    let activeHash = $state(\"#\");\n\n    onMount(() => {\n        const updateHash = () => {\n            activeHash = window.location.hash || \"#\";\n        };\n\n        updateHash();\n        window.addEventListener(\"hashchange\", updateHash);\n        return () => window.removeEventListener(\"hashchange\", updateHash);\n    });\n</script>\n\n<Navbar>\n    <NavBrand href=\"/\">\n        <!--  TODO: logo -->\n        <span class=\"self-center text-2xl font-semibold whitespace-nowrap dark:text-white\">Detypify</span>\n    </NavBrand>\n    <div class=\"flex\">\n        <NavUl activeUrl={activeHash}>\n            <NavLi href=\"#\">Home</NavLi>\n            <NavLi href=\"#contrib\">Contrib</NavLi>\n            <NavLi href=\"#faq\">FAQ</NavLi>\n        </NavUl>\n\n        <ToolbarButton size=\"lg\" class=\"my-auto\" href=\"https://github.com/QuarticCat/detypify\">\n            <GithubSolid size=\"lg\" />\n        </ToolbarButton>\n        <Tooltip class=\"dark:bg-gray-900\" placement=\"bottom\">View on GitHub</Tooltip>\n\n        <DarkMode size=\"lg\" class=\"my-auto\" />\n        <Tooltip class=\"dark:bg-gray-900\" placement=\"bottom\">Toggle dark mode</Tooltip>\n\n        <NavHamburger />\n    </div>\n</Navbar>\n\n{#await Detypify.create()}\n    <div class=\"ui-container min-h-80\">\n        <Spinner size=\"16\" class=\"self-center\" />\n    </div>\n{:then session}\n    {#key activeHash}\n        <div class=\"ui-container\" in:fade={{ duration: 50, delay: 50 }} out:fade={{ duration: 50 }}>\n            {#if activeHash === \"#\"}\n                <Home {session} />\n            {:else if activeHash === \"#contrib\"}\n                <Contrib />\n            {:else if activeHash === \"#faq\"}\n                <FAQ />\n            {:else}\n                <Heading>Not Found</Heading>\n            {/if}\n        </div>\n    {/key}\n{/await}\n"
  },
  {
    "path": "frontend/ui/src/app.css",
    "content": "@import \"tailwindcss\";\n\n@plugin \"flowbite/plugin\";\n\n@custom-variant dark (&:where(.dark, .dark *));\n\n@theme {\n    --color-primary-50: #fff5f2;\n    --color-primary-100: #fff1ee;\n    --color-primary-200: #ffe4de;\n    --color-primary-300: #ffd5cc;\n    --color-primary-400: #ffbcad;\n    --color-primary-500: #fe795d;\n    --color-primary-600: #ef562f;\n    --color-primary-700: #eb4f27;\n    --color-primary-800: #cc4522;\n    --color-primary-900: #a5371b;\n\n    --color-secondary-50: #f0f9ff;\n    --color-secondary-100: #e0f2fe;\n    --color-secondary-200: #bae6fd;\n    --color-secondary-300: #7dd3fc;\n    --color-secondary-400: #38bdf8;\n    --color-secondary-500: #0ea5e9;\n    --color-secondary-600: #0284c7;\n    --color-secondary-700: #0369a1;\n    --color-secondary-800: #075985;\n    --color-secondary-900: #0c4a6e;\n}\n\n@source \"../node_modules/flowbite-svelte/dist\";\n@source \"../node_modules/flowbite-svelte-icons/dist\";\n\n@layer base {\n    @font-face {\n        font-family: NewCMMath-Detypify;\n        src:\n            local(NewComputerModernMath),\n            url(/NewCMMath-Detypify.woff2) format(\"woff2\");\n    }\n\n    html {\n        scrollbar-width: none;\n    }\n}\n\n@layer components {\n    .ui-container {\n        @apply m-[2vw] flex flex-wrap justify-center gap-x-16 gap-y-4;\n    }\n\n    .ui-sub-container {\n        @apply flex flex-col gap-4;\n    }\n\n    .ui-hover-btn {\n        @apply text-gray-600 hover:text-black dark:text-gray-400 dark:hover:text-white;\n    }\n\n    .ui-close-btn {\n        @apply absolute top-1 right-1 p-2;\n    }\n}\n"
  },
  {
    "path": "frontend/ui/src/lib/Candidate.svelte",
    "content": "<script lang=\"ts\">\n    import Card from \"./Card.svelte\";\n    import CopyButton from \"./CopyButton.svelte\";\n    import type { SymbolInfo } from \"detypify-service\";\n    import { Avatar, P } from \"flowbite-svelte\";\n\n    const { info }: { info: SymbolInfo } = $props();\n</script>\n\n<Card>\n    {@const escape = `\\\\u{${info.char.codePointAt(0)?.toString(16).toUpperCase().padStart(4, \"0\")}}`}\n    {@const shorthand = info.shorthand ?? info.markupShorthand ?? info.mathShorthand}\n    {@const shorthandKind = info.markupShorthand ? \"markup\" : info.mathShorthand ? \"math\" : \"\"}\n    <CopyButton text={info.char}>\n        <Avatar cornerStyle=\"rounded\" size=\"lg\" class=\"font-[NewCMMath-Detypify] text-5xl\">\n            {info.char}\n        </Avatar>\n    </CopyButton>\n    <div class=\"flex flex-col gap-y-1\">\n        <P>\n            Name:\n            {#each info.names as name, i}\n                {i === 0 ? \"\" : \" | \"}\n                <CopyButton text={name}>\n                    <code class=\"text-base font-medium\">\n                        {name}\n                    </code>\n                </CopyButton>\n            {/each}\n        </P>\n        <P>\n            Escape:\n            <CopyButton text={escape}>\n                <code class=\"text-base font-medium\">\n                    {escape}\n                </code>\n            </CopyButton>\n        </P>\n        {#if shorthand}\n            <P>\n                Shorthand:\n                <CopyButton text={shorthand}>\n                    <code class=\"text-base font-medium\">\n                        {shorthand}\n                    </code>\n                </CopyButton>\n                {#if shorthandKind}\n                    <span class=\"text-sm text-gray-500\">({shorthandKind})</span>\n                {/if}\n            </P>\n        {/if}\n    </div>\n</Card>\n"
  },
  {
    "path": "frontend/ui/src/lib/Canvas.svelte",
    "content": "<script lang=\"ts\">\n    import type { Stroke, Strokes } from \"detypify-service\";\n    import { Tooltip } from \"flowbite-svelte\";\n    import { CloseOutline } from \"flowbite-svelte-icons\";\n    import { onMount } from \"svelte\";\n\n    let { strokes = $bindable() }: { strokes: Strokes } = $props();\n\n    let canvas: HTMLCanvasElement | undefined;\n    let ctx: CanvasRenderingContext2D | null | undefined;\n    let stroke: Stroke = [];\n\n    onMount(() => {\n        if (!canvas) return;\n        ctx = canvas.getContext(\"2d\");\n        if (!ctx) return;\n        ctx.lineWidth = 5;\n        ctx.lineJoin = \"round\";\n        ctx.lineCap = \"round\";\n    });\n\n    // Every time stroke clears (e.g. refresh), clear canvas.\n    $effect(() => {\n        if (strokes.length > 0 || !ctx) return;\n        ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);\n    });\n\n    function drawStart(event: PointerEvent) {\n        if (event.button !== 0 || !ctx) return;\n\n        const roundedX = Math.round(event.offsetX);\n        const roundedY = Math.round(event.offsetY);\n        stroke = [[roundedX, roundedY]];\n\n        ctx.beginPath();\n        ctx.moveTo(roundedX, roundedY);\n        ctx.lineTo(roundedX, roundedY);\n        ctx.stroke();\n    }\n\n    function drawMove(event: PointerEvent) {\n        if (stroke.length === 0 || !ctx) return;\n\n        const roundedX = Math.round(event.offsetX);\n        const roundedY = Math.round(event.offsetY);\n        const [lastX, lastY] = stroke[stroke.length - 1];\n        stroke.push([roundedX, roundedY]);\n\n        ctx.beginPath();\n        ctx.moveTo(lastX, lastY);\n        ctx.lineTo(roundedX, roundedY);\n        ctx.stroke();\n    }\n\n    function drawEnd() {\n        if (stroke.length === 0) return;\n        strokes = [...strokes, stroke];\n        stroke = [];\n    }\n\n    function drawClear() {\n        strokes = [];\n    }\n</script>\n\n<div class=\"relative w-80\">\n    <canvas\n        width=\"320\"\n        height=\"320\"\n        class=\"touch-none rounded-lg border border-gray-200 bg-gray-100 shadow-md dark:border-0 dark:bg-gray-600\"\n        bind:this={canvas}\n        onpointerdown={drawStart}\n        onpointermove={drawMove}\n        onpointerup={drawEnd}\n        onpointerleave={drawEnd}\n        onpointercancel={drawEnd}\n    ></canvas>\n    <button type=\"button\" class=\"ui-hover-btn ui-close-btn\" onclick={drawClear}>\n        <CloseOutline class=\"size-6\" />\n        <Tooltip class=\"dark:bg-gray-900\">Clear</Tooltip>\n    </button>\n</div>\n"
  },
  {
    "path": "frontend/ui/src/lib/Card.svelte",
    "content": "<script lang=\"ts\">\n    import { fly } from \"svelte/transition\";\n\n    let { class: className = \"\", children, ...restProps } = $props();\n</script>\n\n<div\n    class={`flex flex-row items-center gap-x-4 rounded-lg border border-gray-200 bg-white p-4 text-gray-500 shadow-md dark:border-gray-700 dark:bg-gray-800 dark:text-gray-400 ${className}`}\n    in:fly|local={{ x: 20, duration: 50, delay: 50 }}\n    out:fly|local={{ x: 20, duration: 50 }}\n    {...restProps}\n>\n    {@render children?.()}\n</div>\n"
  },
  {
    "path": "frontend/ui/src/lib/ContribPanel.svelte",
    "content": "<script lang=\"ts\">\n    import { contribSyms } from \"detypify-service\";\n    import type { Strokes } from \"detypify-service\";\n    import { Button, Input } from \"flowbite-svelte\";\n    import { RefreshOutline } from \"flowbite-svelte-icons\";\n\n    export type Sample = {\n        id: Uint32Array;\n        name: string;\n        strokes: Strokes;\n    };\n\n    const symKeys = Object.keys(contribSyms);\n    const contribUrl = import.meta.env.DEV\n        ? \"http://localhost:8787/contrib\"\n        : \"https://detypify.quarticcat.workers.dev/contrib\";\n\n    let {\n        input = $bindable(),\n        strokes = $bindable(),\n        samples = $bindable(),\n    }: {\n        input: string;\n        strokes: Strokes;\n        samples: Sample[];\n    } = $props();\n\n    let submitting = $state(false);\n    const isValid = $derived(Boolean(contribSyms[input]));\n\n    function refresh() {\n        const old = input;\n        while ((input = symKeys[Math.floor(symKeys.length * Math.random())]) === old);\n        strokes = [];\n    }\n\n    function save() {\n        const sample = {\n            id: crypto.getRandomValues(new Uint32Array(4)),\n            name: input,\n            strokes,\n        };\n        samples = [sample, ...samples];\n        strokes = [];\n    }\n\n    function getToken(): number {\n        const existing = localStorage.getItem(\"token\");\n        if (existing) return Number(existing);\n\n        const token = crypto.getRandomValues(new Uint32Array(1))[0].toString();\n        localStorage.setItem(\"token\", token);\n        return Number(token);\n    }\n\n    async function submit() {\n        submitting = true;\n        try {\n            const response = await fetch(contribUrl, {\n                method: \"POST\",\n                headers: {\n                    Accept: \"application/json\",\n                    \"Content-Type\": \"application/json\",\n                },\n                body: JSON.stringify({\n                    ver: 3,\n                    token: getToken(),\n                    samples: samples.map(({ name, strokes }) => [name, strokes]),\n                }),\n            });\n            samples = []; // clear samples only if success\n            window.alert(await response.text());\n        } catch (err) {\n            window.alert(err instanceof Error ? err.message : String(err));\n        }\n        submitting = false;\n    }\n</script>\n\n<Input type=\"text\" placeholder=\"symbol\" color={isValid ? \"green\" : \"red\"} bind:value={input}>\n    {#snippet right()}\n        <button type=\"button\" class=\"ui-hover-btn\" onclick={refresh}>\n            <RefreshOutline />\n        </button>\n    {/snippet}\n</Input>\n\n<div class=\"flex justify-around gap-4\">\n    <Button class=\"w-full\" disabled={!isValid || strokes.length === 0} onclick={save}>Save</Button>\n    <Button class=\"w-full\" disabled={samples.length === 0} onclick={submit} loading={submitting}>Submit</Button>\n</div>\n"
  },
  {
    "path": "frontend/ui/src/lib/CopyButton.svelte",
    "content": "<script lang=\"ts\">\n    import { Tooltip } from \"flowbite-svelte\";\n    import type { Snippet } from \"svelte\";\n\n    let { children, text }: { children?: Snippet; text: string } = $props();\n    let tip = $state(\"Copy\");\n\n    async function copy() {\n        await navigator.clipboard.writeText(text);\n        tip = \"Copied!\";\n    }\n\n    function reset() {\n        tip = \"Copy\";\n    }\n</script>\n\n<button type=\"button\" class=\"ui-hover-btn\" onclick={copy}>\n    {@render children?.()}\n    <Tooltip class=\"dark:bg-gray-900\" onbeforetoggle={reset}>\n        {tip}\n    </Tooltip>\n</button>\n"
  },
  {
    "path": "frontend/ui/src/lib/Preview.svelte",
    "content": "<script lang=\"ts\">\n    import Card from \"./Card.svelte\";\n    import { contribSyms } from \"detypify-service\";\n    import { Avatar, Hr, Tooltip } from \"flowbite-svelte\";\n    import { CloseOutline } from \"flowbite-svelte-icons\";\n\n    const BLANK = \"data:image/gif;base64,R0lGODlhAQABAIAAAP7//wAAACH5BAAAAAAALAAAAAABAAEAAAICRAEAOw==\";\n    const { name, img = BLANK, ondelete }: { name: string; img?: string; ondelete?: () => void } = $props();\n</script>\n\n<Card class=\"relative flex justify-center\">\n    <Avatar cornerStyle=\"rounded\" size=\"lg\" class=\"font-[NewCMMath-Detypify] text-5xl\">\n        <!-- Workaround Safari font rendering issues. -->\n        <span class=\"-m-5 p-5\">{contribSyms[name]}</span>\n    </Avatar>\n    <Hr class=\"h-1 w-12 rounded\" />\n    <Avatar cornerStyle=\"rounded\" size=\"lg\" src={img} />\n\n    {#if ondelete}\n        <button type=\"button\" class=\"ui-hover-btn ui-close-btn\" onclick={ondelete}>\n            <CloseOutline class=\"size-6\" />\n            <Tooltip class=\"dark:bg-gray-900\">Delete</Tooltip>\n        </button>\n    {/if}\n</Card>\n"
  },
  {
    "path": "frontend/ui/src/main.ts",
    "content": "import App from \"./App.svelte\";\nimport \"./app.css\";\nimport { mount } from \"svelte\";\n\nconst app = mount(App, {\n    target: document.getElementById(\"app\")!,\n});\n\nexport default app;\n"
  },
  {
    "path": "frontend/ui/src/routes/Contrib.svelte",
    "content": "<script lang=\"ts\">\n    import Canvas from \"../lib/Canvas.svelte\";\n    import ContribPanel from \"../lib/ContribPanel.svelte\";\n    import type { Sample } from \"../lib/ContribPanel.svelte\";\n    import Preview from \"../lib/Preview.svelte\";\n    import type { Strokes } from \"detypify-service\";\n    import { drawStrokes } from \"detypify-service\";\n    import { Hr, Alert } from \"flowbite-svelte\";\n\n    let input = $state(\"\");\n    let strokes: Strokes = $state([]);\n    let samples: Sample[] = $state([]);\n\n    function draw(strokes: Strokes): string | undefined {\n        if (strokes.length === 0) return;\n        return drawStrokes(strokes)?.toDataURL();\n    }\n</script>\n\n<div class=\"ui-sub-container w-80\">\n    <Canvas bind:strokes />\n    <ContribPanel bind:input bind:strokes bind:samples />\n    <Alert color=\"blue\" border dismissable>\n        Select a symbol, draw it, submit your contribution and make Detypify better!\n    </Alert>\n</div>\n\n<div class=\"ui-sub-container w-100\">\n    <Preview name={input} img={draw(strokes)} />\n    <Hr class=\"mx-auto h-2 w-60 rounded\" />\n    {#each samples as { id, name, strokes }, idx (id)}\n        <Preview {name} img={draw(strokes)} ondelete={() => samples.splice(idx, 1)} />\n    {/each}\n</div>\n"
  },
  {
    "path": "frontend/ui/src/routes/FAQ.svelte",
    "content": "<script lang=\"ts\">\n    import { inferSyms } from \"detypify-service\";\n    import { AccordionItem, Accordion, A, P, Li, List } from \"flowbite-svelte\";\n\n    const supportedSyms = inferSyms.flatMap((info) => info.names);\n    supportedSyms.sort();\n</script>\n\n<div class=\"ui-sub-container w-180\">\n    <Accordion>\n        <AccordionItem>\n            {#snippet header()}Can't find your symbol?{/snippet}\n            <P>\n                Supported symbols are listed in section below. To recognize characters out of this scope, consider\n                giving <A href=\"https://shapecatcher.com/\">shapecatcher</A> a try!\n            </P>\n        </AccordionItem>\n        <AccordionItem>\n            {#snippet header()}Supported symbols?{/snippet}\n            <List>\n                {#each supportedSyms as name}\n                    <Li>{name}</Li>\n                {/each}\n            </List>\n        </AccordionItem>\n        <AccordionItem>\n            {#snippet header()}Results are inaccurate?{/snippet}\n            <P>\n                Some privacy settings (like <A href=\"https://brave.com/shields/\">Brave Shields</A>) have canvas\n                anti-fingerprinting, which might confuse the classifier. Make sure they are turned off for this site.\n                Don't worry! We don't collect any privacy data. This project is fully open-source and works locally on\n                your machine.\n            </P>\n        </AccordionItem>\n        <AccordionItem>\n            {#snippet header()}Use it offline?{/snippet}\n            <P>\n                Check the <A href=\"https://support.google.com/chrome/answer/9658361\">guide</A>.\n            </P>\n        </AccordionItem>\n        <AccordionItem>\n            {#snippet header()}Support us{/snippet}\n            <P>\n                Star this project on <A href=\"https://github.com/QuarticCat/detypify\">GitHub</A>!\n            </P>\n        </AccordionItem>\n    </Accordion>\n</div>\n"
  },
  {
    "path": "frontend/ui/src/routes/Home.svelte",
    "content": "<script lang=\"ts\">\n    import Candidate from \"../lib/Candidate.svelte\";\n    import Canvas from \"../lib/Canvas.svelte\";\n    import type { Strokes } from \"detypify-service\";\n    import { Detypify, inferSyms } from \"detypify-service\";\n    import { Alert, Button } from \"flowbite-svelte\";\n    import { fly } from \"svelte/transition\";\n\n    const { session }: { session: Detypify } = $props();\n\n    let strokes: Strokes = $state([]);\n    let candidates: number[] = $state([]);\n    let numToShow = $state(5);\n\n    // Every time stroke changes, reset numToShow and infer candidates.\n    $effect(() => {\n        numToShow = 5;\n\n        if (strokes.length === 0) {\n            candidates = [];\n            return;\n        }\n\n        session.infer(strokes).then((scores) => {\n            const keys = Array.from(scores.keys());\n            keys.sort((a, b) => scores[b] - scores[a]);\n            candidates = keys;\n        });\n    });\n</script>\n\n<div class=\"ui-sub-container w-80\">\n    <Canvas bind:strokes />\n    {#if \"brave\" in navigator}\n        <Alert color=\"yellow\" border dismissable>\n            If you are using Brave, please turn off Shields for this site, or it won't work properly.\n        </Alert>\n    {/if}\n</div>\n\n<div class=\"ui-sub-container w-100\">\n    {#each candidates.slice(0, numToShow) as i (i)}\n        <Candidate info={inferSyms[i]} />\n    {/each}\n    {#if candidates.length > 0}\n        <div\n            in:fly|local={{ x: 20, duration: 50, delay: 50 }}\n            out:fly|local={{ x: 20, duration: 50 }}\n            class=\"w-fit self-center\"\n        >\n            <Button outline size=\"sm\" onclick={() => (numToShow += 5)}>Show More</Button>\n        </div>\n    {/if}\n</div>\n"
  },
  {
    "path": "frontend/ui/svelte.config.js",
    "content": "import { vitePreprocess } from \"@sveltejs/vite-plugin-svelte\";\n\n/** @type {import(\"@sveltejs/vite-plugin-svelte\").SvelteConfig} */\nexport default {\n    preprocess: vitePreprocess(),\n    compilerOptions: {\n        runes: true,\n    },\n};\n"
  },
  {
    "path": "frontend/ui/tsconfig.app.json",
    "content": "{\n    \"extends\": \"@tsconfig/svelte/tsconfig.json\",\n    \"compilerOptions\": {\n        \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.app.tsbuildinfo\",\n        \"target\": \"ES2022\",\n        \"useDefineForClassFields\": true,\n        \"module\": \"ESNext\",\n        \"types\": [\"svelte\", \"vite/client\"],\n        \"baseUrl\": \".\",\n        \"paths\": {\n            \"detypify-service\": [\"../service/dist/index.d.mts\"]\n        },\n        \"resolveJsonModule\": true,\n        \"noEmit\": true,\n        /**\n         * Typecheck JS in `.svelte` and `.js` files by default.\n         * Disable checkJs if you'd like to use dynamic types in JS.\n         * Note that setting allowJs false does not prevent the use\n         * of JS in `.svelte` files.\n         */\n        \"allowJs\": true,\n        \"checkJs\": true,\n        \"moduleDetection\": \"force\"\n    },\n    \"include\": [\"src/**/*.ts\", \"src/**/*.js\", \"src/**/*.svelte\"]\n}\n"
  },
  {
    "path": "frontend/ui/tsconfig.json",
    "content": "{\n    \"files\": [],\n    \"references\": [\n        {\n            \"path\": \"./tsconfig.app.json\"\n        },\n        {\n            \"path\": \"./tsconfig.node.json\"\n        }\n    ]\n}\n"
  },
  {
    "path": "frontend/ui/tsconfig.node.json",
    "content": "{\n    \"compilerOptions\": {\n        \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.node.tsbuildinfo\",\n        \"target\": \"ES2023\",\n        \"lib\": [\"ES2023\"],\n        \"module\": \"ESNext\",\n        \"types\": [\"node\"],\n        \"skipLibCheck\": true,\n\n        /* Bundler mode */\n        \"moduleResolution\": \"bundler\",\n        \"allowImportingTsExtensions\": true,\n        \"verbatimModuleSyntax\": true,\n        \"moduleDetection\": \"force\",\n        \"noEmit\": true,\n\n        /* Linting */\n        \"strict\": true,\n        \"noUnusedLocals\": true,\n        \"noUnusedParameters\": true,\n        \"erasableSyntaxOnly\": true,\n        \"noFallthroughCasesInSwitch\": true,\n        \"noUncheckedSideEffectImports\": true\n    },\n    \"include\": [\"vite.config.ts\", \"scripts/*\"]\n}\n"
  },
  {
    "path": "frontend/ui/vite.config.ts",
    "content": "import { svelte } from \"@sveltejs/vite-plugin-svelte\";\nimport tailwindcss from \"@tailwindcss/vite\";\nimport { defineConfig } from \"vite\";\nimport { VitePWA } from \"vite-plugin-pwa\";\n\nexport default defineConfig({\n    resolve: {\n        conditions: [\"module\", \"browser\", \"onnxruntime-web-use-extern-wasm\"],\n    },\n    assetsInclude: [\"**/*.onnx\"],\n    plugins: [\n        tailwindcss(),\n        svelte(),\n        VitePWA({\n            registerType: \"autoUpdate\",\n            workbox: {\n                globPatterns: [\"**/*.{js,css,html,ico,png,svg,onnx,woff2}\"],\n                runtimeCaching: [\n                    {\n                        urlPattern: ({ url }) => url.pathname.includes(\"onnxruntime-web\"),\n                        handler: \"CacheFirst\",\n                        options: { cacheName: \"ort-cache\" },\n                    },\n                ],\n                maximumFileSizeToCacheInBytes: 10 * 1024 * 1024,\n            },\n            manifest: {\n                name: \"Detypify\",\n                short_name: \"Detypify\",\n                description: \"Typst symbol classifier\",\n                theme_color: \"#ffffff\",\n            },\n            pwaAssets: {\n                image: \"public/favicon.svg\",\n            },\n        }),\n    ],\n});\n"
  },
  {
    "path": "frontend/worker/README.md",
    "content": "# Detypify Worker\n\nA Cloudflare Worker that accepts contributions from the web page and stores them in a D1 database.\n\n## Development\n\n```console\n$ bun run dev         # start local dev server\n$ bun run deploy      # deploy to Cloudflare\n$ bun run cf-typegen  # generate Cloudflare bindings/types (worker-configuration.d.ts)\n```\n"
  },
  {
    "path": "frontend/worker/package.json",
    "content": "{\n    \"name\": \"detypify-worker\",\n    \"version\": \"0.3.0\",\n    \"private\": true,\n    \"scripts\": {\n        \"dev\": \"wrangler dev\",\n        \"deploy\": \"wrangler deploy --minify\",\n        \"cf-typegen\": \"wrangler types --env-interface CloudflareBindings\"\n    },\n    \"dependencies\": {\n        \"hono\": \"^4.12.8\"\n    },\n    \"devDependencies\": {\n        \"wrangler\": \"^4.75.0\"\n    }\n}\n"
  },
  {
    "path": "frontend/worker/schema.sql",
    "content": "-- $ bunx wrangler d1 execute detypify --remote --file=schema.sql\n\nCREATE TABLE IF NOT EXISTS samples (\n    id      INTEGER PRIMARY KEY,\n    ver     INTEGER,\n    token   INTEGER,\n    sym     TEXT,\n    strokes TEXT\n);\n"
  },
  {
    "path": "frontend/worker/src/index.ts",
    "content": "import { Hono } from \"hono\";\nimport { cors } from \"hono/cors\";\n\ntype Bindings = {\n    DB: D1Database;\n};\n\ntype ContribPayload = {\n    ver: number;\n    token: number;\n    samples: [number, number][][];\n};\n\nconst app = new Hono<{ Bindings: Bindings }>();\n\napp.use(\"/*\", cors());\n\napp.post(\"/contrib\", async (c) => {\n    const { ver, token, samples } = await c.req.json<ContribPayload>();\n    const stmt = c.env.DB.prepare(\"INSERT INTO samples (ver, token, sym, strokes) VALUES (?, ?, ?, ?)\");\n    const inserts = samples.map(([sym, strokes]) => stmt.bind(ver, token, sym, JSON.stringify(strokes)));\n    await c.env.DB.batch(inserts);\n    return c.text(\"Thanks for your contributions!\");\n});\n\nexport default app;\n"
  },
  {
    "path": "frontend/worker/tsconfig.json",
    "content": "{\n    \"compilerOptions\": {\n        \"target\": \"ESNext\",\n        \"module\": \"ESNext\",\n        \"moduleResolution\": \"Bundler\",\n        \"strict\": true,\n        \"skipLibCheck\": true,\n        \"lib\": [\"ESNext\"],\n        \"jsx\": \"react-jsx\",\n        \"jsxImportSource\": \"hono/jsx\"\n    }\n}\n"
  },
  {
    "path": "frontend/worker/worker-configuration.d.ts",
    "content": "/* eslint-disable */\n// Generated by Wrangler by running `wrangler types --env-interface CloudflareBindings` (hash: f3858af3f182d14dabf0ceb8f03b1c78)\n// Runtime types generated with workerd@1.20260111.0 2026-01-13\ndeclare namespace Cloudflare {\n    interface GlobalProps {\n        mainModule: typeof import(\"./src/index\");\n    }\n    interface Env {\n        DB: D1Database;\n    }\n}\ninterface CloudflareBindings extends Cloudflare.Env {}\n\n// Begin runtime types\n/*! *****************************************************************************\nCopyright (c) Cloudflare. All rights reserved.\nCopyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use\nthis file except in compliance with the License. You may obtain a copy of the\nLicense at http://www.apache.org/licenses/LICENSE-2.0\nTHIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED\nWARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,\nMERCHANTABLITY OR NON-INFRINGEMENT.\nSee the Apache Version 2.0 License for specific language governing permissions\nand limitations under the License.\n***************************************************************************** */\n/* eslint-disable */\n// noinspection JSUnusedGlobalSymbols\ndeclare var onmessage: never;\n/**\n * The **`DOMException`** interface represents an abnormal event (called an **exception**) that occurs as a result of calling a method or accessing a property of a web API.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException)\n */\ndeclare class DOMException extends Error {\n    constructor(message?: string, name?: string);\n    /**\n     * The **`message`** read-only property of the a message or description associated with the given error name.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/message)\n     */\n    readonly message: string;\n    /**\n     * The **`name`** read-only property of the one of the strings associated with an error name.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/name)\n     */\n    readonly name: string;\n    /**\n     * The **`code`** read-only property of the DOMException interface returns one of the legacy error code constants, or `0` if none match.\n     * @deprecated\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/code)\n     */\n    readonly code: number;\n    static readonly INDEX_SIZE_ERR: number;\n    static readonly DOMSTRING_SIZE_ERR: number;\n    static readonly HIERARCHY_REQUEST_ERR: number;\n    static readonly WRONG_DOCUMENT_ERR: number;\n    static readonly INVALID_CHARACTER_ERR: number;\n    static readonly NO_DATA_ALLOWED_ERR: number;\n    static readonly NO_MODIFICATION_ALLOWED_ERR: number;\n    static readonly NOT_FOUND_ERR: number;\n    static readonly NOT_SUPPORTED_ERR: number;\n    static readonly INUSE_ATTRIBUTE_ERR: number;\n    static readonly INVALID_STATE_ERR: number;\n    static readonly SYNTAX_ERR: number;\n    static readonly INVALID_MODIFICATION_ERR: number;\n    static readonly NAMESPACE_ERR: number;\n    static readonly INVALID_ACCESS_ERR: number;\n    static readonly VALIDATION_ERR: number;\n    static readonly TYPE_MISMATCH_ERR: number;\n    static readonly SECURITY_ERR: number;\n    static readonly NETWORK_ERR: number;\n    static readonly ABORT_ERR: number;\n    static readonly URL_MISMATCH_ERR: number;\n    static readonly QUOTA_EXCEEDED_ERR: number;\n    static readonly TIMEOUT_ERR: number;\n    static readonly INVALID_NODE_TYPE_ERR: number;\n    static readonly DATA_CLONE_ERR: number;\n    get stack(): any;\n    set stack(value: any);\n}\ntype WorkerGlobalScopeEventMap = {\n    fetch: FetchEvent;\n    scheduled: ScheduledEvent;\n    queue: QueueEvent;\n    unhandledrejection: PromiseRejectionEvent;\n    rejectionhandled: PromiseRejectionEvent;\n};\ndeclare abstract class WorkerGlobalScope extends EventTarget<WorkerGlobalScopeEventMap> {\n    EventTarget: typeof EventTarget;\n}\n/* The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). *\n * The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox).\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console)\n */\ninterface Console {\n    \"assert\"(condition?: boolean, ...data: any[]): void;\n    /**\n     * The **`console.clear()`** static method clears the console if possible.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/clear_static)\n     */\n    clear(): void;\n    /**\n     * The **`console.count()`** static method logs the number of times that this particular call to `count()` has been called.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static)\n     */\n    count(label?: string): void;\n    /**\n     * The **`console.countReset()`** static method resets counter used with console/count_static.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static)\n     */\n    countReset(label?: string): void;\n    /**\n     * The **`console.debug()`** static method outputs a message to the console at the 'debug' log level.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static)\n     */\n    debug(...data: any[]): void;\n    /**\n     * The **`console.dir()`** static method displays a list of the properties of the specified JavaScript object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dir_static)\n     */\n    dir(item?: any, options?: any): void;\n    /**\n     * The **`console.dirxml()`** static method displays an interactive tree of the descendant elements of the specified XML/HTML element.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dirxml_static)\n     */\n    dirxml(...data: any[]): void;\n    /**\n     * The **`console.error()`** static method outputs a message to the console at the 'error' log level.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/error_static)\n     */\n    error(...data: any[]): void;\n    /**\n     * The **`console.group()`** static method creates a new inline group in the Web console log, causing any subsequent console messages to be indented by an additional level, until console/groupEnd_static is called.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static)\n     */\n    group(...data: any[]): void;\n    /**\n     * The **`console.groupCollapsed()`** static method creates a new inline group in the console.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static)\n     */\n    groupCollapsed(...data: any[]): void;\n    /**\n     * The **`console.groupEnd()`** static method exits the current inline group in the console.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static)\n     */\n    groupEnd(): void;\n    /**\n     * The **`console.info()`** static method outputs a message to the console at the 'info' log level.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static)\n     */\n    info(...data: any[]): void;\n    /**\n     * The **`console.log()`** static method outputs a message to the console.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/log_static)\n     */\n    log(...data: any[]): void;\n    /**\n     * The **`console.table()`** static method displays tabular data as a table.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/table_static)\n     */\n    table(tabularData?: any, properties?: string[]): void;\n    /**\n     * The **`console.time()`** static method starts a timer you can use to track how long an operation takes.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static)\n     */\n    time(label?: string): void;\n    /**\n     * The **`console.timeEnd()`** static method stops a timer that was previously started by calling console/time_static.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static)\n     */\n    timeEnd(label?: string): void;\n    /**\n     * The **`console.timeLog()`** static method logs the current value of a timer that was previously started by calling console/time_static.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static)\n     */\n    timeLog(label?: string, ...data: any[]): void;\n    timeStamp(label?: string): void;\n    /**\n     * The **`console.trace()`** static method outputs a stack trace to the console.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static)\n     */\n    trace(...data: any[]): void;\n    /**\n     * The **`console.warn()`** static method outputs a warning message to the console at the 'warning' log level.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/warn_static)\n     */\n    warn(...data: any[]): void;\n}\ndeclare const console: Console;\ntype BufferSource = ArrayBufferView | ArrayBuffer;\ntype TypedArray =\n    | Int8Array\n    | Uint8Array\n    | Uint8ClampedArray\n    | Int16Array\n    | Uint16Array\n    | Int32Array\n    | Uint32Array\n    | Float32Array\n    | Float64Array\n    | BigInt64Array\n    | BigUint64Array;\ndeclare namespace WebAssembly {\n    class CompileError extends Error {\n        constructor(message?: string);\n    }\n    class RuntimeError extends Error {\n        constructor(message?: string);\n    }\n    type ValueType = \"anyfunc\" | \"externref\" | \"f32\" | \"f64\" | \"i32\" | \"i64\" | \"v128\";\n    interface GlobalDescriptor {\n        value: ValueType;\n        mutable?: boolean;\n    }\n    class Global {\n        constructor(descriptor: GlobalDescriptor, value?: any);\n        value: any;\n        valueOf(): any;\n    }\n    type ImportValue = ExportValue | number;\n    type ModuleImports = Record<string, ImportValue>;\n    type Imports = Record<string, ModuleImports>;\n    type ExportValue = Function | Global | Memory | Table;\n    type Exports = Record<string, ExportValue>;\n    class Instance {\n        constructor(module: Module, imports?: Imports);\n        readonly exports: Exports;\n    }\n    interface MemoryDescriptor {\n        initial: number;\n        maximum?: number;\n        shared?: boolean;\n    }\n    class Memory {\n        constructor(descriptor: MemoryDescriptor);\n        readonly buffer: ArrayBuffer;\n        grow(delta: number): number;\n    }\n    type ImportExportKind = \"function\" | \"global\" | \"memory\" | \"table\";\n    interface ModuleExportDescriptor {\n        kind: ImportExportKind;\n        name: string;\n    }\n    interface ModuleImportDescriptor {\n        kind: ImportExportKind;\n        module: string;\n        name: string;\n    }\n    abstract class Module {\n        static customSections(module: Module, sectionName: string): ArrayBuffer[];\n        static exports(module: Module): ModuleExportDescriptor[];\n        static imports(module: Module): ModuleImportDescriptor[];\n    }\n    type TableKind = \"anyfunc\" | \"externref\";\n    interface TableDescriptor {\n        element: TableKind;\n        initial: number;\n        maximum?: number;\n    }\n    class Table {\n        constructor(descriptor: TableDescriptor, value?: any);\n        readonly length: number;\n        get(index: number): any;\n        grow(delta: number, value?: any): number;\n        set(index: number, value?: any): void;\n    }\n    function instantiate(module: Module, imports?: Imports): Promise<Instance>;\n    function validate(bytes: BufferSource): boolean;\n}\n/**\n * The **`ServiceWorkerGlobalScope`** interface of the Service Worker API represents the global execution context of a service worker.\n * Available only in secure contexts.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ServiceWorkerGlobalScope)\n */\ninterface ServiceWorkerGlobalScope extends WorkerGlobalScope {\n    DOMException: typeof DOMException;\n    WorkerGlobalScope: typeof WorkerGlobalScope;\n    btoa(data: string): string;\n    atob(data: string): string;\n    setTimeout(callback: (...args: any[]) => void, msDelay?: number): number;\n    setTimeout<Args extends any[]>(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number;\n    clearTimeout(timeoutId: number | null): void;\n    setInterval(callback: (...args: any[]) => void, msDelay?: number): number;\n    setInterval<Args extends any[]>(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number;\n    clearInterval(timeoutId: number | null): void;\n    queueMicrotask(task: Function): void;\n    structuredClone<T>(value: T, options?: StructuredSerializeOptions): T;\n    reportError(error: any): void;\n    fetch(input: RequestInfo | URL, init?: RequestInit<RequestInitCfProperties>): Promise<Response>;\n    self: ServiceWorkerGlobalScope;\n    crypto: Crypto;\n    caches: CacheStorage;\n    scheduler: Scheduler;\n    performance: Performance;\n    Cloudflare: Cloudflare;\n    readonly origin: string;\n    Event: typeof Event;\n    ExtendableEvent: typeof ExtendableEvent;\n    CustomEvent: typeof CustomEvent;\n    PromiseRejectionEvent: typeof PromiseRejectionEvent;\n    FetchEvent: typeof FetchEvent;\n    TailEvent: typeof TailEvent;\n    TraceEvent: typeof TailEvent;\n    ScheduledEvent: typeof ScheduledEvent;\n    MessageEvent: typeof MessageEvent;\n    CloseEvent: typeof CloseEvent;\n    ReadableStreamDefaultReader: typeof ReadableStreamDefaultReader;\n    ReadableStreamBYOBReader: typeof ReadableStreamBYOBReader;\n    ReadableStream: typeof ReadableStream;\n    WritableStream: typeof WritableStream;\n    WritableStreamDefaultWriter: typeof WritableStreamDefaultWriter;\n    TransformStream: typeof TransformStream;\n    ByteLengthQueuingStrategy: typeof ByteLengthQueuingStrategy;\n    CountQueuingStrategy: typeof CountQueuingStrategy;\n    ErrorEvent: typeof ErrorEvent;\n    MessageChannel: typeof MessageChannel;\n    MessagePort: typeof MessagePort;\n    EventSource: typeof EventSource;\n    ReadableStreamBYOBRequest: typeof ReadableStreamBYOBRequest;\n    ReadableStreamDefaultController: typeof ReadableStreamDefaultController;\n    ReadableByteStreamController: typeof ReadableByteStreamController;\n    WritableStreamDefaultController: typeof WritableStreamDefaultController;\n    TransformStreamDefaultController: typeof TransformStreamDefaultController;\n    CompressionStream: typeof CompressionStream;\n    DecompressionStream: typeof DecompressionStream;\n    TextEncoderStream: typeof TextEncoderStream;\n    TextDecoderStream: typeof TextDecoderStream;\n    Headers: typeof Headers;\n    Body: typeof Body;\n    Request: typeof Request;\n    Response: typeof Response;\n    WebSocket: typeof WebSocket;\n    WebSocketPair: typeof WebSocketPair;\n    WebSocketRequestResponsePair: typeof WebSocketRequestResponsePair;\n    AbortController: typeof AbortController;\n    AbortSignal: typeof AbortSignal;\n    TextDecoder: typeof TextDecoder;\n    TextEncoder: typeof TextEncoder;\n    navigator: Navigator;\n    Navigator: typeof Navigator;\n    URL: typeof URL;\n    URLSearchParams: typeof URLSearchParams;\n    URLPattern: typeof URLPattern;\n    Blob: typeof Blob;\n    File: typeof File;\n    FormData: typeof FormData;\n    Crypto: typeof Crypto;\n    SubtleCrypto: typeof SubtleCrypto;\n    CryptoKey: typeof CryptoKey;\n    CacheStorage: typeof CacheStorage;\n    Cache: typeof Cache;\n    FixedLengthStream: typeof FixedLengthStream;\n    IdentityTransformStream: typeof IdentityTransformStream;\n    HTMLRewriter: typeof HTMLRewriter;\n}\ndeclare function addEventListener<Type extends keyof WorkerGlobalScopeEventMap>(\n    type: Type,\n    handler: EventListenerOrEventListenerObject<WorkerGlobalScopeEventMap[Type]>,\n    options?: EventTargetAddEventListenerOptions | boolean,\n): void;\ndeclare function removeEventListener<Type extends keyof WorkerGlobalScopeEventMap>(\n    type: Type,\n    handler: EventListenerOrEventListenerObject<WorkerGlobalScopeEventMap[Type]>,\n    options?: EventTargetEventListenerOptions | boolean,\n): void;\n/**\n * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent)\n */\ndeclare function dispatchEvent(event: WorkerGlobalScopeEventMap[keyof WorkerGlobalScopeEventMap]): boolean;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/btoa) */\ndeclare function btoa(data: string): string;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/atob) */\ndeclare function atob(data: string): string;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */\ndeclare function setTimeout(callback: (...args: any[]) => void, msDelay?: number): number;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */\ndeclare function setTimeout<Args extends any[]>(\n    callback: (...args: Args) => void,\n    msDelay?: number,\n    ...args: Args\n): number;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearTimeout) */\ndeclare function clearTimeout(timeoutId: number | null): void;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */\ndeclare function setInterval(callback: (...args: any[]) => void, msDelay?: number): number;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */\ndeclare function setInterval<Args extends any[]>(\n    callback: (...args: Args) => void,\n    msDelay?: number,\n    ...args: Args\n): number;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearInterval) */\ndeclare function clearInterval(timeoutId: number | null): void;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/queueMicrotask) */\ndeclare function queueMicrotask(task: Function): void;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/structuredClone) */\ndeclare function structuredClone<T>(value: T, options?: StructuredSerializeOptions): T;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/reportError) */\ndeclare function reportError(error: any): void;\n/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/fetch) */\ndeclare function fetch(input: RequestInfo | URL, init?: RequestInit<RequestInitCfProperties>): Promise<Response>;\ndeclare const self: ServiceWorkerGlobalScope;\n/**\n * The Web Crypto API provides a set of low-level functions for common cryptographic tasks.\n * The Workers runtime implements the full surface of this API, but with some differences in\n * the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms)\n * compared to those implemented in most browsers.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/)\n */\ndeclare const crypto: Crypto;\n/**\n * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/)\n */\ndeclare const caches: CacheStorage;\ndeclare const scheduler: Scheduler;\n/**\n * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,\n * as well as timing of subrequests and other operations.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)\n */\ndeclare const performance: Performance;\ndeclare const Cloudflare: Cloudflare;\ndeclare const origin: string;\ndeclare const navigator: Navigator;\ninterface TestController {}\ninterface ExecutionContext<Props = unknown> {\n    waitUntil(promise: Promise<any>): void;\n    passThroughOnException(): void;\n    readonly exports: Cloudflare.Exports;\n    readonly props: Props;\n}\ntype ExportedHandlerFetchHandler<Env = unknown, CfHostMetadata = unknown> = (\n    request: Request<CfHostMetadata, IncomingRequestCfProperties<CfHostMetadata>>,\n    env: Env,\n    ctx: ExecutionContext,\n) => Response | Promise<Response>;\ntype ExportedHandlerTailHandler<Env = unknown> = (\n    events: TraceItem[],\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ntype ExportedHandlerTraceHandler<Env = unknown> = (\n    traces: TraceItem[],\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ntype ExportedHandlerTailStreamHandler<Env = unknown> = (\n    event: TailStream.TailEvent<TailStream.Onset>,\n    env: Env,\n    ctx: ExecutionContext,\n) => TailStream.TailEventHandlerType | Promise<TailStream.TailEventHandlerType>;\ntype ExportedHandlerScheduledHandler<Env = unknown> = (\n    controller: ScheduledController,\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ntype ExportedHandlerQueueHandler<Env = unknown, Message = unknown> = (\n    batch: MessageBatch<Message>,\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ntype ExportedHandlerTestHandler<Env = unknown> = (\n    controller: TestController,\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ninterface ExportedHandler<Env = unknown, QueueHandlerMessage = unknown, CfHostMetadata = unknown> {\n    fetch?: ExportedHandlerFetchHandler<Env, CfHostMetadata>;\n    tail?: ExportedHandlerTailHandler<Env>;\n    trace?: ExportedHandlerTraceHandler<Env>;\n    tailStream?: ExportedHandlerTailStreamHandler<Env>;\n    scheduled?: ExportedHandlerScheduledHandler<Env>;\n    test?: ExportedHandlerTestHandler<Env>;\n    email?: EmailExportedHandler<Env>;\n    queue?: ExportedHandlerQueueHandler<Env, QueueHandlerMessage>;\n}\ninterface StructuredSerializeOptions {\n    transfer?: any[];\n}\ndeclare abstract class Navigator {\n    sendBeacon(url: string, body?: BodyInit): boolean;\n    readonly userAgent: string;\n    readonly hardwareConcurrency: number;\n    readonly language: string;\n    readonly languages: string[];\n}\ninterface AlarmInvocationInfo {\n    readonly isRetry: boolean;\n    readonly retryCount: number;\n}\ninterface Cloudflare {\n    readonly compatibilityFlags: Record<string, boolean>;\n}\ninterface DurableObject {\n    fetch(request: Request): Response | Promise<Response>;\n    alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise<void>;\n    webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise<void>;\n    webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise<void>;\n    webSocketError?(ws: WebSocket, error: unknown): void | Promise<void>;\n}\ntype DurableObjectStub<T extends Rpc.DurableObjectBranded | undefined = undefined> = Fetcher<\n    T,\n    \"alarm\" | \"webSocketMessage\" | \"webSocketClose\" | \"webSocketError\"\n> & {\n    readonly id: DurableObjectId;\n    readonly name?: string;\n};\ninterface DurableObjectId {\n    toString(): string;\n    equals(other: DurableObjectId): boolean;\n    readonly name?: string;\n}\ndeclare abstract class DurableObjectNamespace<T extends Rpc.DurableObjectBranded | undefined = undefined> {\n    newUniqueId(options?: DurableObjectNamespaceNewUniqueIdOptions): DurableObjectId;\n    idFromName(name: string): DurableObjectId;\n    idFromString(id: string): DurableObjectId;\n    get(id: DurableObjectId, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub<T>;\n    getByName(name: string, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub<T>;\n    jurisdiction(jurisdiction: DurableObjectJurisdiction): DurableObjectNamespace<T>;\n}\ntype DurableObjectJurisdiction = \"eu\" | \"fedramp\" | \"fedramp-high\";\ninterface DurableObjectNamespaceNewUniqueIdOptions {\n    jurisdiction?: DurableObjectJurisdiction;\n}\ntype DurableObjectLocationHint = \"wnam\" | \"enam\" | \"sam\" | \"weur\" | \"eeur\" | \"apac\" | \"oc\" | \"afr\" | \"me\";\ntype DurableObjectRoutingMode = \"primary-only\";\ninterface DurableObjectNamespaceGetDurableObjectOptions {\n    locationHint?: DurableObjectLocationHint;\n    routingMode?: DurableObjectRoutingMode;\n}\ninterface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> {}\ninterface DurableObjectState<Props = unknown> {\n    waitUntil(promise: Promise<any>): void;\n    readonly exports: Cloudflare.Exports;\n    readonly props: Props;\n    readonly id: DurableObjectId;\n    readonly storage: DurableObjectStorage;\n    container?: Container;\n    blockConcurrencyWhile<T>(callback: () => Promise<T>): Promise<T>;\n    acceptWebSocket(ws: WebSocket, tags?: string[]): void;\n    getWebSockets(tag?: string): WebSocket[];\n    setWebSocketAutoResponse(maybeReqResp?: WebSocketRequestResponsePair): void;\n    getWebSocketAutoResponse(): WebSocketRequestResponsePair | null;\n    getWebSocketAutoResponseTimestamp(ws: WebSocket): Date | null;\n    setHibernatableWebSocketEventTimeout(timeoutMs?: number): void;\n    getHibernatableWebSocketEventTimeout(): number | null;\n    getTags(ws: WebSocket): string[];\n    abort(reason?: string): void;\n}\ninterface DurableObjectTransaction {\n    get<T = unknown>(key: string, options?: DurableObjectGetOptions): Promise<T | undefined>;\n    get<T = unknown>(keys: string[], options?: DurableObjectGetOptions): Promise<Map<string, T>>;\n    list<T = unknown>(options?: DurableObjectListOptions): Promise<Map<string, T>>;\n    put<T>(key: string, value: T, options?: DurableObjectPutOptions): Promise<void>;\n    put<T>(entries: Record<string, T>, options?: DurableObjectPutOptions): Promise<void>;\n    delete(key: string, options?: DurableObjectPutOptions): Promise<boolean>;\n    delete(keys: string[], options?: DurableObjectPutOptions): Promise<number>;\n    rollback(): void;\n    getAlarm(options?: DurableObjectGetAlarmOptions): Promise<number | null>;\n    setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise<void>;\n    deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise<void>;\n}\ninterface DurableObjectStorage {\n    get<T = unknown>(key: string, options?: DurableObjectGetOptions): Promise<T | undefined>;\n    get<T = unknown>(keys: string[], options?: DurableObjectGetOptions): Promise<Map<string, T>>;\n    list<T = unknown>(options?: DurableObjectListOptions): Promise<Map<string, T>>;\n    put<T>(key: string, value: T, options?: DurableObjectPutOptions): Promise<void>;\n    put<T>(entries: Record<string, T>, options?: DurableObjectPutOptions): Promise<void>;\n    delete(key: string, options?: DurableObjectPutOptions): Promise<boolean>;\n    delete(keys: string[], options?: DurableObjectPutOptions): Promise<number>;\n    deleteAll(options?: DurableObjectPutOptions): Promise<void>;\n    transaction<T>(closure: (txn: DurableObjectTransaction) => Promise<T>): Promise<T>;\n    getAlarm(options?: DurableObjectGetAlarmOptions): Promise<number | null>;\n    setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise<void>;\n    deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise<void>;\n    sync(): Promise<void>;\n    sql: SqlStorage;\n    kv: SyncKvStorage;\n    transactionSync<T>(closure: () => T): T;\n    getCurrentBookmark(): Promise<string>;\n    getBookmarkForTime(timestamp: number | Date): Promise<string>;\n    onNextSessionRestoreBookmark(bookmark: string): Promise<string>;\n}\ninterface DurableObjectListOptions {\n    start?: string;\n    startAfter?: string;\n    end?: string;\n    prefix?: string;\n    reverse?: boolean;\n    limit?: number;\n    allowConcurrency?: boolean;\n    noCache?: boolean;\n}\ninterface DurableObjectGetOptions {\n    allowConcurrency?: boolean;\n    noCache?: boolean;\n}\ninterface DurableObjectGetAlarmOptions {\n    allowConcurrency?: boolean;\n}\ninterface DurableObjectPutOptions {\n    allowConcurrency?: boolean;\n    allowUnconfirmed?: boolean;\n    noCache?: boolean;\n}\ninterface DurableObjectSetAlarmOptions {\n    allowConcurrency?: boolean;\n    allowUnconfirmed?: boolean;\n}\ndeclare class WebSocketRequestResponsePair {\n    constructor(request: string, response: string);\n    get request(): string;\n    get response(): string;\n}\ninterface AnalyticsEngineDataset {\n    writeDataPoint(event?: AnalyticsEngineDataPoint): void;\n}\ninterface AnalyticsEngineDataPoint {\n    indexes?: ((ArrayBuffer | string) | null)[];\n    doubles?: number[];\n    blobs?: ((ArrayBuffer | string) | null)[];\n}\n/**\n * The **`Event`** interface represents an event which takes place on an `EventTarget`.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event)\n */\ndeclare class Event {\n    constructor(type: string, init?: EventInit);\n    /**\n     * The **`type`** read-only property of the Event interface returns a string containing the event's type.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/type)\n     */\n    get type(): string;\n    /**\n     * The **`eventPhase`** read-only property of the being evaluated.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/eventPhase)\n     */\n    get eventPhase(): number;\n    /**\n     * The read-only **`composed`** property of the or not the event will propagate across the shadow DOM boundary into the standard DOM.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composed)\n     */\n    get composed(): boolean;\n    /**\n     * The **`bubbles`** read-only property of the Event interface indicates whether the event bubbles up through the DOM tree or not.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/bubbles)\n     */\n    get bubbles(): boolean;\n    /**\n     * The **`cancelable`** read-only property of the Event interface indicates whether the event can be canceled, and therefore prevented as if the event never happened.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelable)\n     */\n    get cancelable(): boolean;\n    /**\n     * The **`defaultPrevented`** read-only property of the Event interface returns a boolean value indicating whether or not the call to Event.preventDefault() canceled the event.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/defaultPrevented)\n     */\n    get defaultPrevented(): boolean;\n    /**\n     * The Event property **`returnValue`** indicates whether the default action for this event has been prevented or not.\n     * @deprecated\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/returnValue)\n     */\n    get returnValue(): boolean;\n    /**\n     * The **`currentTarget`** read-only property of the Event interface identifies the element to which the event handler has been attached.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/currentTarget)\n     */\n    get currentTarget(): EventTarget | undefined;\n    /**\n     * The read-only **`target`** property of the dispatched.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/target)\n     */\n    get target(): EventTarget | undefined;\n    /**\n     * The deprecated **`Event.srcElement`** is an alias for the Event.target property.\n     * @deprecated\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/srcElement)\n     */\n    get srcElement(): EventTarget | undefined;\n    /**\n     * The **`timeStamp`** read-only property of the Event interface returns the time (in milliseconds) at which the event was created.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/timeStamp)\n     */\n    get timeStamp(): number;\n    /**\n     * The **`isTrusted`** read-only property of the when the event was generated by the user agent (including via user actions and programmatic methods such as HTMLElement.focus()), and `false` when the event was dispatched via The only exception is the `click` event, which initializes the `isTrusted` property to `false` in user agents.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/isTrusted)\n     */\n    get isTrusted(): boolean;\n    /**\n     * The **`cancelBubble`** property of the Event interface is deprecated.\n     * @deprecated\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble)\n     */\n    get cancelBubble(): boolean;\n    /**\n     * The **`cancelBubble`** property of the Event interface is deprecated.\n     * @deprecated\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble)\n     */\n    set cancelBubble(value: boolean);\n    /**\n     * The **`stopImmediatePropagation()`** method of the If several listeners are attached to the same element for the same event type, they are called in the order in which they were added.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopImmediatePropagation)\n     */\n    stopImmediatePropagation(): void;\n    /**\n     * The **`preventDefault()`** method of the Event interface tells the user agent that if the event does not get explicitly handled, its default action should not be taken as it normally would be.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/preventDefault)\n     */\n    preventDefault(): void;\n    /**\n     * The **`stopPropagation()`** method of the Event interface prevents further propagation of the current event in the capturing and bubbling phases.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopPropagation)\n     */\n    stopPropagation(): void;\n    /**\n     * The **`composedPath()`** method of the Event interface returns the event's path which is an array of the objects on which listeners will be invoked.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composedPath)\n     */\n    composedPath(): EventTarget[];\n    static readonly NONE: number;\n    static readonly CAPTURING_PHASE: number;\n    static readonly AT_TARGET: number;\n    static readonly BUBBLING_PHASE: number;\n}\ninterface EventInit {\n    bubbles?: boolean;\n    cancelable?: boolean;\n    composed?: boolean;\n}\ntype EventListener<EventType extends Event = Event> = (event: EventType) => void;\ninterface EventListenerObject<EventType extends Event = Event> {\n    handleEvent(event: EventType): void;\n}\ntype EventListenerOrEventListenerObject<EventType extends Event = Event> =\n    | EventListener<EventType>\n    | EventListenerObject<EventType>;\n/**\n * The **`EventTarget`** interface is implemented by objects that can receive events and may have listeners for them.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget)\n */\ndeclare class EventTarget<EventMap extends Record<string, Event> = Record<string, Event>> {\n    constructor();\n    /**\n     * The **`addEventListener()`** method of the EventTarget interface sets up a function that will be called whenever the specified event is delivered to the target.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/addEventListener)\n     */\n    addEventListener<Type extends keyof EventMap>(\n        type: Type,\n        handler: EventListenerOrEventListenerObject<EventMap[Type]>,\n        options?: EventTargetAddEventListenerOptions | boolean,\n    ): void;\n    /**\n     * The **`removeEventListener()`** method of the EventTarget interface removes an event listener previously registered with EventTarget.addEventListener() from the target.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/removeEventListener)\n     */\n    removeEventListener<Type extends keyof EventMap>(\n        type: Type,\n        handler: EventListenerOrEventListenerObject<EventMap[Type]>,\n        options?: EventTargetEventListenerOptions | boolean,\n    ): void;\n    /**\n     * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent)\n     */\n    dispatchEvent(event: EventMap[keyof EventMap]): boolean;\n}\ninterface EventTargetEventListenerOptions {\n    capture?: boolean;\n}\ninterface EventTargetAddEventListenerOptions {\n    capture?: boolean;\n    passive?: boolean;\n    once?: boolean;\n    signal?: AbortSignal;\n}\ninterface EventTargetHandlerObject {\n    handleEvent: (event: Event) => any | undefined;\n}\n/**\n * The **`AbortController`** interface represents a controller object that allows you to abort one or more Web requests as and when desired.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController)\n */\ndeclare class AbortController {\n    constructor();\n    /**\n     * The **`signal`** read-only property of the AbortController interface returns an AbortSignal object instance, which can be used to communicate with/abort an asynchronous operation as desired.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/signal)\n     */\n    get signal(): AbortSignal;\n    /**\n     * The **`abort()`** method of the AbortController interface aborts an asynchronous operation before it has completed.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/abort)\n     */\n    abort(reason?: any): void;\n}\n/**\n * The **`AbortSignal`** interface represents a signal object that allows you to communicate with an asynchronous operation (such as a fetch request) and abort it if required via an AbortController object.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal)\n */\ndeclare abstract class AbortSignal extends EventTarget {\n    /**\n     * The **`AbortSignal.abort()`** static method returns an AbortSignal that is already set as aborted (and which does not trigger an AbortSignal/abort_event event).\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_static)\n     */\n    static abort(reason?: any): AbortSignal;\n    /**\n     * The **`AbortSignal.timeout()`** static method returns an AbortSignal that will automatically abort after a specified time.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/timeout_static)\n     */\n    static timeout(delay: number): AbortSignal;\n    /**\n     * The **`AbortSignal.any()`** static method takes an iterable of abort signals and returns an AbortSignal.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/any_static)\n     */\n    static any(signals: AbortSignal[]): AbortSignal;\n    /**\n     * The **`aborted`** read-only property returns a value that indicates whether the asynchronous operations the signal is communicating with are aborted (`true`) or not (`false`).\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/aborted)\n     */\n    get aborted(): boolean;\n    /**\n     * The **`reason`** read-only property returns a JavaScript value that indicates the abort reason.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/reason)\n     */\n    get reason(): any;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */\n    get onabort(): any | null;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */\n    set onabort(value: any | null);\n    /**\n     * The **`throwIfAborted()`** method throws the signal's abort AbortSignal.reason if the signal has been aborted; otherwise it does nothing.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/throwIfAborted)\n     */\n    throwIfAborted(): void;\n}\ninterface Scheduler {\n    wait(delay: number, maybeOptions?: SchedulerWaitOptions): Promise<void>;\n}\ninterface SchedulerWaitOptions {\n    signal?: AbortSignal;\n}\n/**\n * The **`ExtendableEvent`** interface extends the lifetime of the `install` and `activate` events dispatched on the global scope as part of the service worker lifecycle.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent)\n */\ndeclare abstract class ExtendableEvent extends Event {\n    /**\n     * The **`ExtendableEvent.waitUntil()`** method tells the event dispatcher that work is ongoing.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent/waitUntil)\n     */\n    waitUntil(promise: Promise<any>): void;\n}\n/**\n * The **`CustomEvent`** interface represents events initialized by an application for any purpose.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent)\n */\ndeclare class CustomEvent<T = any> extends Event {\n    constructor(type: string, init?: CustomEventCustomEventInit);\n    /**\n     * The read-only **`detail`** property of the CustomEvent interface returns any data passed when initializing the event.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent/detail)\n     */\n    get detail(): T;\n}\ninterface CustomEventCustomEventInit {\n    bubbles?: boolean;\n    cancelable?: boolean;\n    composed?: boolean;\n    detail?: any;\n}\n/**\n * The **`Blob`** interface represents a blob, which is a file-like object of immutable, raw data; they can be read as text or binary data, or converted into a ReadableStream so its methods can be used for processing the data.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob)\n */\ndeclare class Blob {\n    constructor(type?: ((ArrayBuffer | ArrayBufferView) | string | Blob)[], options?: BlobOptions);\n    /**\n     * The **`size`** read-only property of the Blob interface returns the size of the Blob or File in bytes.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size)\n     */\n    get size(): number;\n    /**\n     * The **`type`** read-only property of the Blob interface returns the MIME type of the file.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type)\n     */\n    get type(): string;\n    /**\n     * The **`slice()`** method of the Blob interface creates and returns a new `Blob` object which contains data from a subset of the blob on which it's called.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice)\n     */\n    slice(start?: number, end?: number, type?: string): Blob;\n    /**\n     * The **`arrayBuffer()`** method of the Blob interface returns a Promise that resolves with the contents of the blob as binary data contained in an ArrayBuffer.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer)\n     */\n    arrayBuffer(): Promise<ArrayBuffer>;\n    /**\n     * The **`bytes()`** method of the Blob interface returns a Promise that resolves with a Uint8Array containing the contents of the blob as an array of bytes.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes)\n     */\n    bytes(): Promise<Uint8Array>;\n    /**\n     * The **`text()`** method of the string containing the contents of the blob, interpreted as UTF-8.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text)\n     */\n    text(): Promise<string>;\n    /**\n     * The **`stream()`** method of the Blob interface returns a ReadableStream which upon reading returns the data contained within the `Blob`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/stream)\n     */\n    stream(): ReadableStream;\n}\ninterface BlobOptions {\n    type?: string;\n}\n/**\n * The **`File`** interface provides information about files and allows JavaScript in a web page to access their content.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File)\n */\ndeclare class File extends Blob {\n    constructor(\n        bits: ((ArrayBuffer | ArrayBufferView) | string | Blob)[] | undefined,\n        name: string,\n        options?: FileOptions,\n    );\n    /**\n     * The **`name`** read-only property of the File interface returns the name of the file represented by a File object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name)\n     */\n    get name(): string;\n    /**\n     * The **`lastModified`** read-only property of the File interface provides the last modified date of the file as the number of milliseconds since the Unix epoch (January 1, 1970 at midnight).\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified)\n     */\n    get lastModified(): number;\n}\ninterface FileOptions {\n    type?: string;\n    lastModified?: number;\n}\n/**\n * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/)\n */\ndeclare abstract class CacheStorage {\n    /**\n     * The **`open()`** method of the the Cache object matching the `cacheName`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CacheStorage/open)\n     */\n    open(cacheName: string): Promise<Cache>;\n    readonly default: Cache;\n}\n/**\n * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/)\n */\ndeclare abstract class Cache {\n    /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#delete) */\n    delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<boolean>;\n    /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#match) */\n    match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<Response | undefined>;\n    /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#put) */\n    put(request: RequestInfo | URL, response: Response): Promise<void>;\n}\ninterface CacheQueryOptions {\n    ignoreMethod?: boolean;\n}\n/**\n * The Web Crypto API provides a set of low-level functions for common cryptographic tasks.\n * The Workers runtime implements the full surface of this API, but with some differences in\n * the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms)\n * compared to those implemented in most browsers.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/)\n */\ndeclare abstract class Crypto {\n    /**\n     * The **`Crypto.subtle`** read-only property returns a cryptographic operations.\n     * Available only in secure contexts.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/subtle)\n     */\n    get subtle(): SubtleCrypto;\n    /**\n     * The **`Crypto.getRandomValues()`** method lets you get cryptographically strong random values.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues)\n     */\n    getRandomValues<\n        T extends\n            | Int8Array\n            | Uint8Array\n            | Int16Array\n            | Uint16Array\n            | Int32Array\n            | Uint32Array\n            | BigInt64Array\n            | BigUint64Array,\n    >(buffer: T): T;\n    /**\n     * The **`randomUUID()`** method of the Crypto interface is used to generate a v4 UUID using a cryptographically secure random number generator.\n     * Available only in secure contexts.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID)\n     */\n    randomUUID(): string;\n    DigestStream: typeof DigestStream;\n}\n/**\n * The **`SubtleCrypto`** interface of the Web Crypto API provides a number of low-level cryptographic functions.\n * Available only in secure contexts.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto)\n */\ndeclare abstract class SubtleCrypto {\n    /**\n     * The **`encrypt()`** method of the SubtleCrypto interface encrypts data.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/encrypt)\n     */\n    encrypt(\n        algorithm: string | SubtleCryptoEncryptAlgorithm,\n        key: CryptoKey,\n        plainText: ArrayBuffer | ArrayBufferView,\n    ): Promise<ArrayBuffer>;\n    /**\n     * The **`decrypt()`** method of the SubtleCrypto interface decrypts some encrypted data.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/decrypt)\n     */\n    decrypt(\n        algorithm: string | SubtleCryptoEncryptAlgorithm,\n        key: CryptoKey,\n        cipherText: ArrayBuffer | ArrayBufferView,\n    ): Promise<ArrayBuffer>;\n    /**\n     * The **`sign()`** method of the SubtleCrypto interface generates a digital signature.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/sign)\n     */\n    sign(\n        algorithm: string | SubtleCryptoSignAlgorithm,\n        key: CryptoKey,\n        data: ArrayBuffer | ArrayBufferView,\n    ): Promise<ArrayBuffer>;\n    /**\n     * The **`verify()`** method of the SubtleCrypto interface verifies a digital signature.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/verify)\n     */\n    verify(\n        algorithm: string | SubtleCryptoSignAlgorithm,\n        key: CryptoKey,\n        signature: ArrayBuffer | ArrayBufferView,\n        data: ArrayBuffer | ArrayBufferView,\n    ): Promise<boolean>;\n    /**\n     * The **`digest()`** method of the SubtleCrypto interface generates a _digest_ of the given data, using the specified hash function.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/digest)\n     */\n    digest(algorithm: string | SubtleCryptoHashAlgorithm, data: ArrayBuffer | ArrayBufferView): Promise<ArrayBuffer>;\n    /**\n     * The **`generateKey()`** method of the SubtleCrypto interface is used to generate a new key (for symmetric algorithms) or key pair (for public-key algorithms).\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/generateKey)\n     */\n    generateKey(\n        algorithm: string | SubtleCryptoGenerateKeyAlgorithm,\n        extractable: boolean,\n        keyUsages: string[],\n    ): Promise<CryptoKey | CryptoKeyPair>;\n    /**\n     * The **`deriveKey()`** method of the SubtleCrypto interface can be used to derive a secret key from a master key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveKey)\n     */\n    deriveKey(\n        algorithm: string | SubtleCryptoDeriveKeyAlgorithm,\n        baseKey: CryptoKey,\n        derivedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm,\n        extractable: boolean,\n        keyUsages: string[],\n    ): Promise<CryptoKey>;\n    /**\n     * The **`deriveBits()`** method of the key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveBits)\n     */\n    deriveBits(\n        algorithm: string | SubtleCryptoDeriveKeyAlgorithm,\n        baseKey: CryptoKey,\n        length?: number | null,\n    ): Promise<ArrayBuffer>;\n    /**\n     * The **`importKey()`** method of the SubtleCrypto interface imports a key: that is, it takes as input a key in an external, portable format and gives you a CryptoKey object that you can use in the Web Crypto API.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/importKey)\n     */\n    importKey(\n        format: string,\n        keyData: (ArrayBuffer | ArrayBufferView) | JsonWebKey,\n        algorithm: string | SubtleCryptoImportKeyAlgorithm,\n        extractable: boolean,\n        keyUsages: string[],\n    ): Promise<CryptoKey>;\n    /**\n     * The **`exportKey()`** method of the SubtleCrypto interface exports a key: that is, it takes as input a CryptoKey object and gives you the key in an external, portable format.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/exportKey)\n     */\n    exportKey(format: string, key: CryptoKey): Promise<ArrayBuffer | JsonWebKey>;\n    /**\n     * The **`wrapKey()`** method of the SubtleCrypto interface 'wraps' a key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/wrapKey)\n     */\n    wrapKey(\n        format: string,\n        key: CryptoKey,\n        wrappingKey: CryptoKey,\n        wrapAlgorithm: string | SubtleCryptoEncryptAlgorithm,\n    ): Promise<ArrayBuffer>;\n    /**\n     * The **`unwrapKey()`** method of the SubtleCrypto interface 'unwraps' a key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/unwrapKey)\n     */\n    unwrapKey(\n        format: string,\n        wrappedKey: ArrayBuffer | ArrayBufferView,\n        unwrappingKey: CryptoKey,\n        unwrapAlgorithm: string | SubtleCryptoEncryptAlgorithm,\n        unwrappedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm,\n        extractable: boolean,\n        keyUsages: string[],\n    ): Promise<CryptoKey>;\n    timingSafeEqual(a: ArrayBuffer | ArrayBufferView, b: ArrayBuffer | ArrayBufferView): boolean;\n}\n/**\n * The **`CryptoKey`** interface of the Web Crypto API represents a cryptographic key obtained from one of the SubtleCrypto methods SubtleCrypto.generateKey, SubtleCrypto.deriveKey, SubtleCrypto.importKey, or SubtleCrypto.unwrapKey.\n * Available only in secure contexts.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey)\n */\ndeclare abstract class CryptoKey {\n    /**\n     * The read-only **`type`** property of the CryptoKey interface indicates which kind of key is represented by the object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/type)\n     */\n    readonly type: string;\n    /**\n     * The read-only **`extractable`** property of the CryptoKey interface indicates whether or not the key may be extracted using `SubtleCrypto.exportKey()` or `SubtleCrypto.wrapKey()`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/extractable)\n     */\n    readonly extractable: boolean;\n    /**\n     * The read-only **`algorithm`** property of the CryptoKey interface returns an object describing the algorithm for which this key can be used, and any associated extra parameters.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/algorithm)\n     */\n    readonly algorithm:\n        | CryptoKeyKeyAlgorithm\n        | CryptoKeyAesKeyAlgorithm\n        | CryptoKeyHmacKeyAlgorithm\n        | CryptoKeyRsaKeyAlgorithm\n        | CryptoKeyEllipticKeyAlgorithm\n        | CryptoKeyArbitraryKeyAlgorithm;\n    /**\n     * The read-only **`usages`** property of the CryptoKey interface indicates what can be done with the key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/usages)\n     */\n    readonly usages: string[];\n}\ninterface CryptoKeyPair {\n    publicKey: CryptoKey;\n    privateKey: CryptoKey;\n}\ninterface JsonWebKey {\n    kty: string;\n    use?: string;\n    key_ops?: string[];\n    alg?: string;\n    ext?: boolean;\n    crv?: string;\n    x?: string;\n    y?: string;\n    d?: string;\n    n?: string;\n    e?: string;\n    p?: string;\n    q?: string;\n    dp?: string;\n    dq?: string;\n    qi?: string;\n    oth?: RsaOtherPrimesInfo[];\n    k?: string;\n}\ninterface RsaOtherPrimesInfo {\n    r?: string;\n    d?: string;\n    t?: string;\n}\ninterface SubtleCryptoDeriveKeyAlgorithm {\n    name: string;\n    salt?: ArrayBuffer | ArrayBufferView;\n    iterations?: number;\n    hash?: string | SubtleCryptoHashAlgorithm;\n    $public?: CryptoKey;\n    info?: ArrayBuffer | ArrayBufferView;\n}\ninterface SubtleCryptoEncryptAlgorithm {\n    name: string;\n    iv?: ArrayBuffer | ArrayBufferView;\n    additionalData?: ArrayBuffer | ArrayBufferView;\n    tagLength?: number;\n    counter?: ArrayBuffer | ArrayBufferView;\n    length?: number;\n    label?: ArrayBuffer | ArrayBufferView;\n}\ninterface SubtleCryptoGenerateKeyAlgorithm {\n    name: string;\n    hash?: string | SubtleCryptoHashAlgorithm;\n    modulusLength?: number;\n    publicExponent?: ArrayBuffer | ArrayBufferView;\n    length?: number;\n    namedCurve?: string;\n}\ninterface SubtleCryptoHashAlgorithm {\n    name: string;\n}\ninterface SubtleCryptoImportKeyAlgorithm {\n    name: string;\n    hash?: string | SubtleCryptoHashAlgorithm;\n    length?: number;\n    namedCurve?: string;\n    compressed?: boolean;\n}\ninterface SubtleCryptoSignAlgorithm {\n    name: string;\n    hash?: string | SubtleCryptoHashAlgorithm;\n    dataLength?: number;\n    saltLength?: number;\n}\ninterface CryptoKeyKeyAlgorithm {\n    name: string;\n}\ninterface CryptoKeyAesKeyAlgorithm {\n    name: string;\n    length: number;\n}\ninterface CryptoKeyHmacKeyAlgorithm {\n    name: string;\n    hash: CryptoKeyKeyAlgorithm;\n    length: number;\n}\ninterface CryptoKeyRsaKeyAlgorithm {\n    name: string;\n    modulusLength: number;\n    publicExponent: ArrayBuffer | ArrayBufferView;\n    hash?: CryptoKeyKeyAlgorithm;\n}\ninterface CryptoKeyEllipticKeyAlgorithm {\n    name: string;\n    namedCurve: string;\n}\ninterface CryptoKeyArbitraryKeyAlgorithm {\n    name: string;\n    hash?: CryptoKeyKeyAlgorithm;\n    namedCurve?: string;\n    length?: number;\n}\ndeclare class DigestStream extends WritableStream<ArrayBuffer | ArrayBufferView> {\n    constructor(algorithm: string | SubtleCryptoHashAlgorithm);\n    readonly digest: Promise<ArrayBuffer>;\n    get bytesWritten(): number | bigint;\n}\n/**\n * The **`TextDecoder`** interface represents a decoder for a specific text encoding, such as `UTF-8`, `ISO-8859-2`, `KOI8-R`, `GBK`, etc.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder)\n */\ndeclare class TextDecoder {\n    constructor(label?: string, options?: TextDecoderConstructorOptions);\n    /**\n     * The **`TextDecoder.decode()`** method returns a string containing text decoded from the buffer passed as a parameter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder/decode)\n     */\n    decode(input?: ArrayBuffer | ArrayBufferView, options?: TextDecoderDecodeOptions): string;\n    get encoding(): string;\n    get fatal(): boolean;\n    get ignoreBOM(): boolean;\n}\n/**\n * The **`TextEncoder`** interface takes a stream of code points as input and emits a stream of UTF-8 bytes.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder)\n */\ndeclare class TextEncoder {\n    constructor();\n    /**\n     * The **`TextEncoder.encode()`** method takes a string as input, and returns a Global_Objects/Uint8Array containing the text given in parameters encoded with the specific method for that TextEncoder object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encode)\n     */\n    encode(input?: string): Uint8Array;\n    /**\n     * The **`TextEncoder.encodeInto()`** method takes a string to encode and a destination Uint8Array to put resulting UTF-8 encoded text into, and returns a dictionary object indicating the progress of the encoding.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encodeInto)\n     */\n    encodeInto(input: string, buffer: Uint8Array): TextEncoderEncodeIntoResult;\n    get encoding(): string;\n}\ninterface TextDecoderConstructorOptions {\n    fatal: boolean;\n    ignoreBOM: boolean;\n}\ninterface TextDecoderDecodeOptions {\n    stream: boolean;\n}\ninterface TextEncoderEncodeIntoResult {\n    read: number;\n    written: number;\n}\n/**\n * The **`ErrorEvent`** interface represents events providing information related to errors in scripts or in files.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent)\n */\ndeclare class ErrorEvent extends Event {\n    constructor(type: string, init?: ErrorEventErrorEventInit);\n    /**\n     * The **`filename`** read-only property of the ErrorEvent interface returns a string containing the name of the script file in which the error occurred.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename)\n     */\n    get filename(): string;\n    /**\n     * The **`message`** read-only property of the ErrorEvent interface returns a string containing a human-readable error message describing the problem.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message)\n     */\n    get message(): string;\n    /**\n     * The **`lineno`** read-only property of the ErrorEvent interface returns an integer containing the line number of the script file on which the error occurred.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno)\n     */\n    get lineno(): number;\n    /**\n     * The **`colno`** read-only property of the ErrorEvent interface returns an integer containing the column number of the script file on which the error occurred.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno)\n     */\n    get colno(): number;\n    /**\n     * The **`error`** read-only property of the ErrorEvent interface returns a JavaScript value, such as an Error or DOMException, representing the error associated with this event.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error)\n     */\n    get error(): any;\n}\ninterface ErrorEventErrorEventInit {\n    message?: string;\n    filename?: string;\n    lineno?: number;\n    colno?: number;\n    error?: any;\n}\n/**\n * The **`MessageEvent`** interface represents a message received by a target object.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent)\n */\ndeclare class MessageEvent extends Event {\n    constructor(type: string, initializer: MessageEventInit);\n    /**\n     * The **`data`** read-only property of the The data sent by the message emitter; this can be any data type, depending on what originated this event.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data)\n     */\n    readonly data: any;\n    /**\n     * The **`origin`** read-only property of the origin of the message emitter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/origin)\n     */\n    readonly origin: string | null;\n    /**\n     * The **`lastEventId`** read-only property of the unique ID for the event.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/lastEventId)\n     */\n    readonly lastEventId: string;\n    /**\n     * The **`source`** read-only property of the a WindowProxy, MessagePort, or a `MessageEventSource` (which can be a WindowProxy, message emitter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/source)\n     */\n    readonly source: MessagePort | null;\n    /**\n     * The **`ports`** read-only property of the containing all MessagePort objects sent with the message, in order.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/ports)\n     */\n    readonly ports: MessagePort[];\n}\ninterface MessageEventInit {\n    data: ArrayBuffer | string;\n}\n/**\n * The **`PromiseRejectionEvent`** interface represents events which are sent to the global script context when JavaScript Promises are rejected.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent)\n */\ndeclare abstract class PromiseRejectionEvent extends Event {\n    /**\n     * The PromiseRejectionEvent interface's **`promise`** read-only property indicates the JavaScript rejected.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/promise)\n     */\n    readonly promise: Promise<any>;\n    /**\n     * The PromiseRejectionEvent **`reason`** read-only property is any JavaScript value or Object which provides the reason passed into Promise.reject().\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/reason)\n     */\n    readonly reason: any;\n}\n/**\n * The **`FormData`** interface provides a way to construct a set of key/value pairs representing form fields and their values, which can be sent using the Window/fetch, XMLHttpRequest.send() or navigator.sendBeacon() methods.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData)\n */\ndeclare class FormData {\n    constructor();\n    /**\n     * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append)\n     */\n    append(name: string, value: string): void;\n    /**\n     * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append)\n     */\n    append(name: string, value: Blob, filename?: string): void;\n    /**\n     * The **`delete()`** method of the FormData interface deletes a key and its value(s) from a `FormData` object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/delete)\n     */\n    delete(name: string): void;\n    /**\n     * The **`get()`** method of the FormData interface returns the first value associated with a given key from within a `FormData` object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/get)\n     */\n    get(name: string): (File | string) | null;\n    /**\n     * The **`getAll()`** method of the FormData interface returns all the values associated with a given key from within a `FormData` object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/getAll)\n     */\n    getAll(name: string): (File | string)[];\n    /**\n     * The **`has()`** method of the FormData interface returns whether a `FormData` object contains a certain key.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has)\n     */\n    has(name: string): boolean;\n    /**\n     * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set)\n     */\n    set(name: string, value: string): void;\n    /**\n     * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set)\n     */\n    set(name: string, value: Blob, filename?: string): void;\n    /* Returns an array of key, value pairs for every entry in the list. */\n    entries(): IterableIterator<[key: string, value: File | string]>;\n    /* Returns a list of keys in the list. */\n    keys(): IterableIterator<string>;\n    /* Returns a list of values in the list. */\n    values(): IterableIterator<File | string>;\n    forEach<This = unknown>(\n        callback: (this: This, value: File | string, key: string, parent: FormData) => void,\n        thisArg?: This,\n    ): void;\n    [Symbol.iterator](): IterableIterator<[key: string, value: File | string]>;\n}\ninterface ContentOptions {\n    html?: boolean;\n}\ndeclare class HTMLRewriter {\n    constructor();\n    on(selector: string, handlers: HTMLRewriterElementContentHandlers): HTMLRewriter;\n    onDocument(handlers: HTMLRewriterDocumentContentHandlers): HTMLRewriter;\n    transform(response: Response): Response;\n}\ninterface HTMLRewriterElementContentHandlers {\n    element?(element: Element): void | Promise<void>;\n    comments?(comment: Comment): void | Promise<void>;\n    text?(element: Text): void | Promise<void>;\n}\ninterface HTMLRewriterDocumentContentHandlers {\n    doctype?(doctype: Doctype): void | Promise<void>;\n    comments?(comment: Comment): void | Promise<void>;\n    text?(text: Text): void | Promise<void>;\n    end?(end: DocumentEnd): void | Promise<void>;\n}\ninterface Doctype {\n    readonly name: string | null;\n    readonly publicId: string | null;\n    readonly systemId: string | null;\n}\ninterface Element {\n    tagName: string;\n    readonly attributes: IterableIterator<string[]>;\n    readonly removed: boolean;\n    readonly namespaceURI: string;\n    getAttribute(name: string): string | null;\n    hasAttribute(name: string): boolean;\n    setAttribute(name: string, value: string): Element;\n    removeAttribute(name: string): Element;\n    before(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    after(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    prepend(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    append(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    replace(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    remove(): Element;\n    removeAndKeepContent(): Element;\n    setInnerContent(content: string | ReadableStream | Response, options?: ContentOptions): Element;\n    onEndTag(handler: (tag: EndTag) => void | Promise<void>): void;\n}\ninterface EndTag {\n    name: string;\n    before(content: string | ReadableStream | Response, options?: ContentOptions): EndTag;\n    after(content: string | ReadableStream | Response, options?: ContentOptions): EndTag;\n    remove(): EndTag;\n}\ninterface Comment {\n    text: string;\n    readonly removed: boolean;\n    before(content: string, options?: ContentOptions): Comment;\n    after(content: string, options?: ContentOptions): Comment;\n    replace(content: string, options?: ContentOptions): Comment;\n    remove(): Comment;\n}\ninterface Text {\n    readonly text: string;\n    readonly lastInTextNode: boolean;\n    readonly removed: boolean;\n    before(content: string | ReadableStream | Response, options?: ContentOptions): Text;\n    after(content: string | ReadableStream | Response, options?: ContentOptions): Text;\n    replace(content: string | ReadableStream | Response, options?: ContentOptions): Text;\n    remove(): Text;\n}\ninterface DocumentEnd {\n    append(content: string, options?: ContentOptions): DocumentEnd;\n}\n/**\n * This is the event type for `fetch` events dispatched on the ServiceWorkerGlobalScope.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent)\n */\ndeclare abstract class FetchEvent extends ExtendableEvent {\n    /**\n     * The **`request`** read-only property of the the event handler.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/request)\n     */\n    readonly request: Request;\n    /**\n     * The **`respondWith()`** method of allows you to provide a promise for a Response yourself.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/respondWith)\n     */\n    respondWith(promise: Response | Promise<Response>): void;\n    passThroughOnException(): void;\n}\ntype HeadersInit = Headers | Iterable<Iterable<string>> | Record<string, string>;\n/**\n * The **`Headers`** interface of the Fetch API allows you to perform various actions on HTTP request and response headers.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers)\n */\ndeclare class Headers {\n    constructor(init?: HeadersInit);\n    /**\n     * The **`get()`** method of the Headers interface returns a byte string of all the values of a header within a `Headers` object with a given name.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/get)\n     */\n    get(name: string): string | null;\n    getAll(name: string): string[];\n    /**\n     * The **`getSetCookie()`** method of the Headers interface returns an array containing the values of all Set-Cookie headers associated with a response.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/getSetCookie)\n     */\n    getSetCookie(): string[];\n    /**\n     * The **`has()`** method of the Headers interface returns a boolean stating whether a `Headers` object contains a certain header.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/has)\n     */\n    has(name: string): boolean;\n    /**\n     * The **`set()`** method of the Headers interface sets a new value for an existing header inside a `Headers` object, or adds the header if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/set)\n     */\n    set(name: string, value: string): void;\n    /**\n     * The **`append()`** method of the Headers interface appends a new value onto an existing header inside a `Headers` object, or adds the header if it does not already exist.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/append)\n     */\n    append(name: string, value: string): void;\n    /**\n     * The **`delete()`** method of the Headers interface deletes a header from the current `Headers` object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/delete)\n     */\n    delete(name: string): void;\n    forEach<This = unknown>(\n        callback: (this: This, value: string, key: string, parent: Headers) => void,\n        thisArg?: This,\n    ): void;\n    /* Returns an iterator allowing to go through all key/value pairs contained in this object. */\n    entries(): IterableIterator<[key: string, value: string]>;\n    /* Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. */\n    keys(): IterableIterator<string>;\n    /* Returns an iterator allowing to go through all values of the key/value pairs contained in this object. */\n    values(): IterableIterator<string>;\n    [Symbol.iterator](): IterableIterator<[key: string, value: string]>;\n}\ntype BodyInit = ReadableStream<Uint8Array> | string | ArrayBuffer | ArrayBufferView | Blob | URLSearchParams | FormData;\ndeclare abstract class Body {\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/body) */\n    get body(): ReadableStream | null;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bodyUsed) */\n    get bodyUsed(): boolean;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/arrayBuffer) */\n    arrayBuffer(): Promise<ArrayBuffer>;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bytes) */\n    bytes(): Promise<Uint8Array>;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/text) */\n    text(): Promise<string>;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/json) */\n    json<T>(): Promise<T>;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/formData) */\n    formData(): Promise<FormData>;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/blob) */\n    blob(): Promise<Blob>;\n}\n/**\n * The **`Response`** interface of the Fetch API represents the response to a request.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response)\n */\ndeclare var Response: {\n    prototype: Response;\n    new (body?: BodyInit | null, init?: ResponseInit): Response;\n    error(): Response;\n    redirect(url: string, status?: number): Response;\n    json(any: any, maybeInit?: ResponseInit | Response): Response;\n};\n/**\n * The **`Response`** interface of the Fetch API represents the response to a request.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response)\n */\ninterface Response extends Body {\n    /**\n     * The **`clone()`** method of the Response interface creates a clone of a response object, identical in every way, but stored in a different variable.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/clone)\n     */\n    clone(): Response;\n    /**\n     * The **`status`** read-only property of the Response interface contains the HTTP status codes of the response.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/status)\n     */\n    status: number;\n    /**\n     * The **`statusText`** read-only property of the Response interface contains the status message corresponding to the HTTP status code in Response.status.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/statusText)\n     */\n    statusText: string;\n    /**\n     * The **`headers`** read-only property of the with the response.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/headers)\n     */\n    headers: Headers;\n    /**\n     * The **`ok`** read-only property of the Response interface contains a Boolean stating whether the response was successful (status in the range 200-299) or not.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/ok)\n     */\n    ok: boolean;\n    /**\n     * The **`redirected`** read-only property of the Response interface indicates whether or not the response is the result of a request you made which was redirected.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/redirected)\n     */\n    redirected: boolean;\n    /**\n     * The **`url`** read-only property of the Response interface contains the URL of the response.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/url)\n     */\n    url: string;\n    webSocket: WebSocket | null;\n    cf: any | undefined;\n    /**\n     * The **`type`** read-only property of the Response interface contains the type of the response.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/type)\n     */\n    type: \"default\" | \"error\";\n}\ninterface ResponseInit {\n    status?: number;\n    statusText?: string;\n    headers?: HeadersInit;\n    cf?: any;\n    webSocket?: WebSocket | null;\n    encodeBody?: \"automatic\" | \"manual\";\n}\ntype RequestInfo<CfHostMetadata = unknown, Cf = CfProperties<CfHostMetadata>> = Request<CfHostMetadata, Cf> | string;\n/**\n * The **`Request`** interface of the Fetch API represents a resource request.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request)\n */\ndeclare var Request: {\n    prototype: Request;\n    new <CfHostMetadata = unknown, Cf = CfProperties<CfHostMetadata>>(\n        input: RequestInfo<CfProperties> | URL,\n        init?: RequestInit<Cf>,\n    ): Request<CfHostMetadata, Cf>;\n};\n/**\n * The **`Request`** interface of the Fetch API represents a resource request.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request)\n */\ninterface Request<CfHostMetadata = unknown, Cf = CfProperties<CfHostMetadata>> extends Body {\n    /**\n     * The **`clone()`** method of the Request interface creates a copy of the current `Request` object.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/clone)\n     */\n    clone(): Request<CfHostMetadata, Cf>;\n    /**\n     * The **`method`** read-only property of the `POST`, etc.) A String indicating the method of the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/method)\n     */\n    method: string;\n    /**\n     * The **`url`** read-only property of the Request interface contains the URL of the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/url)\n     */\n    url: string;\n    /**\n     * The **`headers`** read-only property of the with the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/headers)\n     */\n    headers: Headers;\n    /**\n     * The **`redirect`** read-only property of the Request interface contains the mode for how redirects are handled.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/redirect)\n     */\n    redirect: string;\n    fetcher: Fetcher | null;\n    /**\n     * The read-only **`signal`** property of the Request interface returns the AbortSignal associated with the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/signal)\n     */\n    signal: AbortSignal;\n    cf: Cf | undefined;\n    /**\n     * The **`integrity`** read-only property of the Request interface contains the subresource integrity value of the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/integrity)\n     */\n    integrity: string;\n    /**\n     * The **`keepalive`** read-only property of the Request interface contains the request's `keepalive` setting (`true` or `false`), which indicates whether the browser will keep the associated request alive if the page that initiated it is unloaded before the request is complete.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/keepalive)\n     */\n    keepalive: boolean;\n    /**\n     * The **`cache`** read-only property of the Request interface contains the cache mode of the request.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/cache)\n     */\n    cache?: \"no-store\" | \"no-cache\";\n}\ninterface RequestInit<Cf = CfProperties> {\n    /* A string to set request's method. */\n    method?: string;\n    /* A Headers object, an object literal, or an array of two-item arrays to set request's headers. */\n    headers?: HeadersInit;\n    /* A BodyInit object or null to set request's body. */\n    body?: BodyInit | null;\n    /* A string indicating whether request follows redirects, results in an error upon encountering a redirect, or returns the redirect (in an opaque fashion). Sets request's redirect. */\n    redirect?: string;\n    fetcher?: Fetcher | null;\n    cf?: Cf;\n    /* A string indicating how the request will interact with the browser's cache to set request's cache. */\n    cache?: \"no-store\" | \"no-cache\";\n    /* A cryptographic hash of the resource to be fetched by request. Sets request's integrity. */\n    integrity?: string;\n    /* An AbortSignal to set request's signal. */\n    signal?: AbortSignal | null;\n    encodeResponseBody?: \"automatic\" | \"manual\";\n}\ntype Service<\n    T extends\n        | (new (...args: any[]) => Rpc.WorkerEntrypointBranded)\n        | Rpc.WorkerEntrypointBranded\n        | ExportedHandler<any, any, any>\n        | undefined = undefined,\n> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded\n    ? Fetcher<InstanceType<T>>\n    : T extends Rpc.WorkerEntrypointBranded\n      ? Fetcher<T>\n      : T extends Exclude<Rpc.EntrypointBranded, Rpc.WorkerEntrypointBranded>\n        ? never\n        : Fetcher<undefined>;\ntype Fetcher<\n    T extends Rpc.EntrypointBranded | undefined = undefined,\n    Reserved extends string = never,\n> = (T extends Rpc.EntrypointBranded ? Rpc.Provider<T, Reserved | \"fetch\" | \"connect\"> : unknown) & {\n    fetch(input: RequestInfo | URL, init?: RequestInit): Promise<Response>;\n    connect(address: SocketAddress | string, options?: SocketOptions): Socket;\n};\ninterface KVNamespaceListKey<Metadata, Key extends string = string> {\n    name: Key;\n    expiration?: number;\n    metadata?: Metadata;\n}\ntype KVNamespaceListResult<Metadata, Key extends string = string> =\n    | {\n          list_complete: false;\n          keys: KVNamespaceListKey<Metadata, Key>[];\n          cursor: string;\n          cacheStatus: string | null;\n      }\n    | {\n          list_complete: true;\n          keys: KVNamespaceListKey<Metadata, Key>[];\n          cacheStatus: string | null;\n      };\ninterface KVNamespace<Key extends string = string> {\n    get(key: Key, options?: Partial<KVNamespaceGetOptions<undefined>>): Promise<string | null>;\n    get(key: Key, type: \"text\"): Promise<string | null>;\n    get<ExpectedValue = unknown>(key: Key, type: \"json\"): Promise<ExpectedValue | null>;\n    get(key: Key, type: \"arrayBuffer\"): Promise<ArrayBuffer | null>;\n    get(key: Key, type: \"stream\"): Promise<ReadableStream | null>;\n    get(key: Key, options?: KVNamespaceGetOptions<\"text\">): Promise<string | null>;\n    get<ExpectedValue = unknown>(key: Key, options?: KVNamespaceGetOptions<\"json\">): Promise<ExpectedValue | null>;\n    get(key: Key, options?: KVNamespaceGetOptions<\"arrayBuffer\">): Promise<ArrayBuffer | null>;\n    get(key: Key, options?: KVNamespaceGetOptions<\"stream\">): Promise<ReadableStream | null>;\n    get(key: Array<Key>, type: \"text\"): Promise<Map<string, string | null>>;\n    get<ExpectedValue = unknown>(key: Array<Key>, type: \"json\"): Promise<Map<string, ExpectedValue | null>>;\n    get(key: Array<Key>, options?: Partial<KVNamespaceGetOptions<undefined>>): Promise<Map<string, string | null>>;\n    get(key: Array<Key>, options?: KVNamespaceGetOptions<\"text\">): Promise<Map<string, string | null>>;\n    get<ExpectedValue = unknown>(\n        key: Array<Key>,\n        options?: KVNamespaceGetOptions<\"json\">,\n    ): Promise<Map<string, ExpectedValue | null>>;\n    list<Metadata = unknown>(options?: KVNamespaceListOptions): Promise<KVNamespaceListResult<Metadata, Key>>;\n    put(\n        key: Key,\n        value: string | ArrayBuffer | ArrayBufferView | ReadableStream,\n        options?: KVNamespacePutOptions,\n    ): Promise<void>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        options?: Partial<KVNamespaceGetOptions<undefined>>,\n    ): Promise<KVNamespaceGetWithMetadataResult<string, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        type: \"text\",\n    ): Promise<KVNamespaceGetWithMetadataResult<string, Metadata>>;\n    getWithMetadata<ExpectedValue = unknown, Metadata = unknown>(\n        key: Key,\n        type: \"json\",\n    ): Promise<KVNamespaceGetWithMetadataResult<ExpectedValue, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        type: \"arrayBuffer\",\n    ): Promise<KVNamespaceGetWithMetadataResult<ArrayBuffer, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        type: \"stream\",\n    ): Promise<KVNamespaceGetWithMetadataResult<ReadableStream, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        options: KVNamespaceGetOptions<\"text\">,\n    ): Promise<KVNamespaceGetWithMetadataResult<string, Metadata>>;\n    getWithMetadata<ExpectedValue = unknown, Metadata = unknown>(\n        key: Key,\n        options: KVNamespaceGetOptions<\"json\">,\n    ): Promise<KVNamespaceGetWithMetadataResult<ExpectedValue, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        options: KVNamespaceGetOptions<\"arrayBuffer\">,\n    ): Promise<KVNamespaceGetWithMetadataResult<ArrayBuffer, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Key,\n        options: KVNamespaceGetOptions<\"stream\">,\n    ): Promise<KVNamespaceGetWithMetadataResult<ReadableStream, Metadata>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Array<Key>,\n        type: \"text\",\n    ): Promise<Map<string, KVNamespaceGetWithMetadataResult<string, Metadata>>>;\n    getWithMetadata<ExpectedValue = unknown, Metadata = unknown>(\n        key: Array<Key>,\n        type: \"json\",\n    ): Promise<Map<string, KVNamespaceGetWithMetadataResult<ExpectedValue, Metadata>>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Array<Key>,\n        options?: Partial<KVNamespaceGetOptions<undefined>>,\n    ): Promise<Map<string, KVNamespaceGetWithMetadataResult<string, Metadata>>>;\n    getWithMetadata<Metadata = unknown>(\n        key: Array<Key>,\n        options?: KVNamespaceGetOptions<\"text\">,\n    ): Promise<Map<string, KVNamespaceGetWithMetadataResult<string, Metadata>>>;\n    getWithMetadata<ExpectedValue = unknown, Metadata = unknown>(\n        key: Array<Key>,\n        options?: KVNamespaceGetOptions<\"json\">,\n    ): Promise<Map<string, KVNamespaceGetWithMetadataResult<ExpectedValue, Metadata>>>;\n    delete(key: Key): Promise<void>;\n}\ninterface KVNamespaceListOptions {\n    limit?: number;\n    prefix?: string | null;\n    cursor?: string | null;\n}\ninterface KVNamespaceGetOptions<Type> {\n    type: Type;\n    cacheTtl?: number;\n}\ninterface KVNamespacePutOptions {\n    expiration?: number;\n    expirationTtl?: number;\n    metadata?: any | null;\n}\ninterface KVNamespaceGetWithMetadataResult<Value, Metadata> {\n    value: Value | null;\n    metadata: Metadata | null;\n    cacheStatus: string | null;\n}\ntype QueueContentType = \"text\" | \"bytes\" | \"json\" | \"v8\";\ninterface Queue<Body = unknown> {\n    send(message: Body, options?: QueueSendOptions): Promise<void>;\n    sendBatch(messages: Iterable<MessageSendRequest<Body>>, options?: QueueSendBatchOptions): Promise<void>;\n}\ninterface QueueSendOptions {\n    contentType?: QueueContentType;\n    delaySeconds?: number;\n}\ninterface QueueSendBatchOptions {\n    delaySeconds?: number;\n}\ninterface MessageSendRequest<Body = unknown> {\n    body: Body;\n    contentType?: QueueContentType;\n    delaySeconds?: number;\n}\ninterface QueueRetryOptions {\n    delaySeconds?: number;\n}\ninterface Message<Body = unknown> {\n    readonly id: string;\n    readonly timestamp: Date;\n    readonly body: Body;\n    readonly attempts: number;\n    retry(options?: QueueRetryOptions): void;\n    ack(): void;\n}\ninterface QueueEvent<Body = unknown> extends ExtendableEvent {\n    readonly messages: readonly Message<Body>[];\n    readonly queue: string;\n    retryAll(options?: QueueRetryOptions): void;\n    ackAll(): void;\n}\ninterface MessageBatch<Body = unknown> {\n    readonly messages: readonly Message<Body>[];\n    readonly queue: string;\n    retryAll(options?: QueueRetryOptions): void;\n    ackAll(): void;\n}\ninterface R2Error extends Error {\n    readonly name: string;\n    readonly code: number;\n    readonly message: string;\n    readonly action: string;\n    readonly stack: any;\n}\ninterface R2ListOptions {\n    limit?: number;\n    prefix?: string;\n    cursor?: string;\n    delimiter?: string;\n    startAfter?: string;\n    include?: (\"httpMetadata\" | \"customMetadata\")[];\n}\ndeclare abstract class R2Bucket {\n    head(key: string): Promise<R2Object | null>;\n    get(\n        key: string,\n        options: R2GetOptions & {\n            onlyIf: R2Conditional | Headers;\n        },\n    ): Promise<R2ObjectBody | R2Object | null>;\n    get(key: string, options?: R2GetOptions): Promise<R2ObjectBody | null>;\n    put(\n        key: string,\n        value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob,\n        options?: R2PutOptions & {\n            onlyIf: R2Conditional | Headers;\n        },\n    ): Promise<R2Object | null>;\n    put(\n        key: string,\n        value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob,\n        options?: R2PutOptions,\n    ): Promise<R2Object>;\n    createMultipartUpload(key: string, options?: R2MultipartOptions): Promise<R2MultipartUpload>;\n    resumeMultipartUpload(key: string, uploadId: string): R2MultipartUpload;\n    delete(keys: string | string[]): Promise<void>;\n    list(options?: R2ListOptions): Promise<R2Objects>;\n}\ninterface R2MultipartUpload {\n    readonly key: string;\n    readonly uploadId: string;\n    uploadPart(\n        partNumber: number,\n        value: ReadableStream | (ArrayBuffer | ArrayBufferView) | string | Blob,\n        options?: R2UploadPartOptions,\n    ): Promise<R2UploadedPart>;\n    abort(): Promise<void>;\n    complete(uploadedParts: R2UploadedPart[]): Promise<R2Object>;\n}\ninterface R2UploadedPart {\n    partNumber: number;\n    etag: string;\n}\ndeclare abstract class R2Object {\n    readonly key: string;\n    readonly version: string;\n    readonly size: number;\n    readonly etag: string;\n    readonly httpEtag: string;\n    readonly checksums: R2Checksums;\n    readonly uploaded: Date;\n    readonly httpMetadata?: R2HTTPMetadata;\n    readonly customMetadata?: Record<string, string>;\n    readonly range?: R2Range;\n    readonly storageClass: string;\n    readonly ssecKeyMd5?: string;\n    writeHttpMetadata(headers: Headers): void;\n}\ninterface R2ObjectBody extends R2Object {\n    get body(): ReadableStream;\n    get bodyUsed(): boolean;\n    arrayBuffer(): Promise<ArrayBuffer>;\n    bytes(): Promise<Uint8Array>;\n    text(): Promise<string>;\n    json<T>(): Promise<T>;\n    blob(): Promise<Blob>;\n}\ntype R2Range =\n    | {\n          offset: number;\n          length?: number;\n      }\n    | {\n          offset?: number;\n          length: number;\n      }\n    | {\n          suffix: number;\n      };\ninterface R2Conditional {\n    etagMatches?: string;\n    etagDoesNotMatch?: string;\n    uploadedBefore?: Date;\n    uploadedAfter?: Date;\n    secondsGranularity?: boolean;\n}\ninterface R2GetOptions {\n    onlyIf?: R2Conditional | Headers;\n    range?: R2Range | Headers;\n    ssecKey?: ArrayBuffer | string;\n}\ninterface R2PutOptions {\n    onlyIf?: R2Conditional | Headers;\n    httpMetadata?: R2HTTPMetadata | Headers;\n    customMetadata?: Record<string, string>;\n    md5?: (ArrayBuffer | ArrayBufferView) | string;\n    sha1?: (ArrayBuffer | ArrayBufferView) | string;\n    sha256?: (ArrayBuffer | ArrayBufferView) | string;\n    sha384?: (ArrayBuffer | ArrayBufferView) | string;\n    sha512?: (ArrayBuffer | ArrayBufferView) | string;\n    storageClass?: string;\n    ssecKey?: ArrayBuffer | string;\n}\ninterface R2MultipartOptions {\n    httpMetadata?: R2HTTPMetadata | Headers;\n    customMetadata?: Record<string, string>;\n    storageClass?: string;\n    ssecKey?: ArrayBuffer | string;\n}\ninterface R2Checksums {\n    readonly md5?: ArrayBuffer;\n    readonly sha1?: ArrayBuffer;\n    readonly sha256?: ArrayBuffer;\n    readonly sha384?: ArrayBuffer;\n    readonly sha512?: ArrayBuffer;\n    toJSON(): R2StringChecksums;\n}\ninterface R2StringChecksums {\n    md5?: string;\n    sha1?: string;\n    sha256?: string;\n    sha384?: string;\n    sha512?: string;\n}\ninterface R2HTTPMetadata {\n    contentType?: string;\n    contentLanguage?: string;\n    contentDisposition?: string;\n    contentEncoding?: string;\n    cacheControl?: string;\n    cacheExpiry?: Date;\n}\ntype R2Objects = {\n    objects: R2Object[];\n    delimitedPrefixes: string[];\n} & (\n    | {\n          truncated: true;\n          cursor: string;\n      }\n    | {\n          truncated: false;\n      }\n);\ninterface R2UploadPartOptions {\n    ssecKey?: ArrayBuffer | string;\n}\ndeclare abstract class ScheduledEvent extends ExtendableEvent {\n    readonly scheduledTime: number;\n    readonly cron: string;\n    noRetry(): void;\n}\ninterface ScheduledController {\n    readonly scheduledTime: number;\n    readonly cron: string;\n    noRetry(): void;\n}\ninterface QueuingStrategy<T = any> {\n    highWaterMark?: number | bigint;\n    size?: (chunk: T) => number | bigint;\n}\ninterface UnderlyingSink<W = any> {\n    type?: string;\n    start?: (controller: WritableStreamDefaultController) => void | Promise<void>;\n    write?: (chunk: W, controller: WritableStreamDefaultController) => void | Promise<void>;\n    abort?: (reason: any) => void | Promise<void>;\n    close?: () => void | Promise<void>;\n}\ninterface UnderlyingByteSource {\n    type: \"bytes\";\n    autoAllocateChunkSize?: number;\n    start?: (controller: ReadableByteStreamController) => void | Promise<void>;\n    pull?: (controller: ReadableByteStreamController) => void | Promise<void>;\n    cancel?: (reason: any) => void | Promise<void>;\n}\ninterface UnderlyingSource<R = any> {\n    type?: \"\" | undefined;\n    start?: (controller: ReadableStreamDefaultController<R>) => void | Promise<void>;\n    pull?: (controller: ReadableStreamDefaultController<R>) => void | Promise<void>;\n    cancel?: (reason: any) => void | Promise<void>;\n    expectedLength?: number | bigint;\n}\ninterface Transformer<I = any, O = any> {\n    readableType?: string;\n    writableType?: string;\n    start?: (controller: TransformStreamDefaultController<O>) => void | Promise<void>;\n    transform?: (chunk: I, controller: TransformStreamDefaultController<O>) => void | Promise<void>;\n    flush?: (controller: TransformStreamDefaultController<O>) => void | Promise<void>;\n    cancel?: (reason: any) => void | Promise<void>;\n    expectedLength?: number;\n}\ninterface StreamPipeOptions {\n    preventAbort?: boolean;\n    preventCancel?: boolean;\n    /**\n     * Pipes this readable stream to a given writable stream destination. The way in which the piping process behaves under various error conditions can be customized with a number of passed options. It returns a promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered.\n     *\n     * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader.\n     *\n     * Errors and closures of the source and destination streams propagate as follows:\n     *\n     * An error in this source readable stream will abort destination, unless preventAbort is truthy. The returned promise will be rejected with the source's error, or with any error that occurs during aborting the destination.\n     *\n     * An error in destination will cancel this source readable stream, unless preventCancel is truthy. The returned promise will be rejected with the destination's error, or with any error that occurs during canceling the source.\n     *\n     * When this source readable stream closes, destination will be closed, unless preventClose is truthy. The returned promise will be fulfilled once this process completes, unless an error is encountered while closing the destination, in which case it will be rejected with that error.\n     *\n     * If destination starts out closed or closing, this source readable stream will be canceled, unless preventCancel is true. The returned promise will be rejected with an error indicating piping to a closed stream failed, or with any error that occurs during canceling the source.\n     *\n     * The signal option can be set to an AbortSignal to allow aborting an ongoing pipe operation via the corresponding AbortController. In this case, this source readable stream will be canceled, and destination aborted, unless the respective options preventCancel or preventAbort are set.\n     */\n    preventClose?: boolean;\n    signal?: AbortSignal;\n}\ntype ReadableStreamReadResult<R = any> =\n    | {\n          done: false;\n          value: R;\n      }\n    | {\n          done: true;\n          value?: undefined;\n      };\n/**\n * The `ReadableStream` interface of the Streams API represents a readable stream of byte data.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream)\n */\ninterface ReadableStream<R = any> {\n    /**\n     * The **`locked`** read-only property of the ReadableStream interface returns whether or not the readable stream is locked to a reader.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/locked)\n     */\n    get locked(): boolean;\n    /**\n     * The **`cancel()`** method of the ReadableStream interface returns a Promise that resolves when the stream is canceled.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/cancel)\n     */\n    cancel(reason?: any): Promise<void>;\n    /**\n     * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader)\n     */\n    getReader(): ReadableStreamDefaultReader<R>;\n    /**\n     * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader)\n     */\n    getReader(options: ReadableStreamGetReaderOptions): ReadableStreamBYOBReader;\n    /**\n     * The **`pipeThrough()`** method of the ReadableStream interface provides a chainable way of piping the current stream through a transform stream or any other writable/readable pair.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeThrough)\n     */\n    pipeThrough<T>(transform: ReadableWritablePair<T, R>, options?: StreamPipeOptions): ReadableStream<T>;\n    /**\n     * The **`pipeTo()`** method of the ReadableStream interface pipes the current `ReadableStream` to a given WritableStream and returns a Promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeTo)\n     */\n    pipeTo(destination: WritableStream<R>, options?: StreamPipeOptions): Promise<void>;\n    /**\n     * The **`tee()`** method of the two-element array containing the two resulting branches as new ReadableStream instances.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/tee)\n     */\n    tee(): [ReadableStream<R>, ReadableStream<R>];\n    values(options?: ReadableStreamValuesOptions): AsyncIterableIterator<R>;\n    [Symbol.asyncIterator](options?: ReadableStreamValuesOptions): AsyncIterableIterator<R>;\n}\n/**\n * The `ReadableStream` interface of the Streams API represents a readable stream of byte data.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream)\n */\ndeclare const ReadableStream: {\n    prototype: ReadableStream;\n    new (underlyingSource: UnderlyingByteSource, strategy?: QueuingStrategy<Uint8Array>): ReadableStream<Uint8Array>;\n    new <R = any>(underlyingSource?: UnderlyingSource<R>, strategy?: QueuingStrategy<R>): ReadableStream<R>;\n};\n/**\n * The **`ReadableStreamDefaultReader`** interface of the Streams API represents a default reader that can be used to read stream data supplied from a network (such as a fetch request).\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader)\n */\ndeclare class ReadableStreamDefaultReader<R = any> {\n    constructor(stream: ReadableStream);\n    get closed(): Promise<void>;\n    cancel(reason?: any): Promise<void>;\n    /**\n     * The **`read()`** method of the ReadableStreamDefaultReader interface returns a Promise providing access to the next chunk in the stream's internal queue.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/read)\n     */\n    read(): Promise<ReadableStreamReadResult<R>>;\n    /**\n     * The **`releaseLock()`** method of the ReadableStreamDefaultReader interface releases the reader's lock on the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/releaseLock)\n     */\n    releaseLock(): void;\n}\n/**\n * The `ReadableStreamBYOBReader` interface of the Streams API defines a reader for a ReadableStream that supports zero-copy reading from an underlying byte source.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader)\n */\ndeclare class ReadableStreamBYOBReader {\n    constructor(stream: ReadableStream);\n    get closed(): Promise<void>;\n    cancel(reason?: any): Promise<void>;\n    /**\n     * The **`read()`** method of the ReadableStreamBYOBReader interface is used to read data into a view on a user-supplied buffer from an associated readable byte stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/read)\n     */\n    read<T extends ArrayBufferView>(view: T): Promise<ReadableStreamReadResult<T>>;\n    /**\n     * The **`releaseLock()`** method of the ReadableStreamBYOBReader interface releases the reader's lock on the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/releaseLock)\n     */\n    releaseLock(): void;\n    readAtLeast<T extends ArrayBufferView>(minElements: number, view: T): Promise<ReadableStreamReadResult<T>>;\n}\ninterface ReadableStreamBYOBReaderReadableStreamBYOBReaderReadOptions {\n    min?: number;\n}\ninterface ReadableStreamGetReaderOptions {\n    /**\n     * Creates a ReadableStreamBYOBReader and locks the stream to the new reader.\n     *\n     * This call behaves the same way as the no-argument variant, except that it only works on readable byte streams, i.e. streams which were constructed specifically with the ability to handle \"bring your own buffer\" reading. The returned BYOB reader provides the ability to directly read individual chunks from the stream via its read() method, into developer-supplied buffers, allowing more precise control over allocation.\n     */\n    mode: \"byob\";\n}\n/**\n * The **`ReadableStreamBYOBRequest`** interface of the Streams API represents a 'pull request' for data from an underlying source that will made as a zero-copy transfer to a consumer (bypassing the stream's internal queues).\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest)\n */\ndeclare abstract class ReadableStreamBYOBRequest {\n    /**\n     * The **`view`** getter property of the ReadableStreamBYOBRequest interface returns the current view.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/view)\n     */\n    get view(): Uint8Array | null;\n    /**\n     * The **`respond()`** method of the ReadableStreamBYOBRequest interface is used to signal to the associated readable byte stream that the specified number of bytes were written into the ReadableStreamBYOBRequest.view.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respond)\n     */\n    respond(bytesWritten: number): void;\n    /**\n     * The **`respondWithNewView()`** method of the ReadableStreamBYOBRequest interface specifies a new view that the consumer of the associated readable byte stream should write to instead of ReadableStreamBYOBRequest.view.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respondWithNewView)\n     */\n    respondWithNewView(view: ArrayBuffer | ArrayBufferView): void;\n    get atLeast(): number | null;\n}\n/**\n * The **`ReadableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a ReadableStream's state and internal queue.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController)\n */\ndeclare abstract class ReadableStreamDefaultController<R = any> {\n    /**\n     * The **`desiredSize`** read-only property of the required to fill the stream's internal queue.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/desiredSize)\n     */\n    get desiredSize(): number | null;\n    /**\n     * The **`close()`** method of the ReadableStreamDefaultController interface closes the associated stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/close)\n     */\n    close(): void;\n    /**\n     * The **`enqueue()`** method of the ```js-nolint enqueue(chunk) ``` - `chunk` - : The chunk to enqueue.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/enqueue)\n     */\n    enqueue(chunk?: R): void;\n    /**\n     * The **`error()`** method of the with the associated stream to error.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/error)\n     */\n    error(reason: any): void;\n}\n/**\n * The **`ReadableByteStreamController`** interface of the Streams API represents a controller for a readable byte stream.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController)\n */\ndeclare abstract class ReadableByteStreamController {\n    /**\n     * The **`byobRequest`** read-only property of the ReadableByteStreamController interface returns the current BYOB request, or `null` if there are no pending requests.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/byobRequest)\n     */\n    get byobRequest(): ReadableStreamBYOBRequest | null;\n    /**\n     * The **`desiredSize`** read-only property of the ReadableByteStreamController interface returns the number of bytes required to fill the stream's internal queue to its 'desired size'.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/desiredSize)\n     */\n    get desiredSize(): number | null;\n    /**\n     * The **`close()`** method of the ReadableByteStreamController interface closes the associated stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/close)\n     */\n    close(): void;\n    /**\n     * The **`enqueue()`** method of the ReadableByteStreamController interface enqueues a given chunk on the associated readable byte stream (the chunk is copied into the stream's internal queues).\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/enqueue)\n     */\n    enqueue(chunk: ArrayBuffer | ArrayBufferView): void;\n    /**\n     * The **`error()`** method of the ReadableByteStreamController interface causes any future interactions with the associated stream to error with the specified reason.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/error)\n     */\n    error(reason: any): void;\n}\n/**\n * The **`WritableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a WritableStream's state.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController)\n */\ndeclare abstract class WritableStreamDefaultController {\n    /**\n     * The read-only **`signal`** property of the WritableStreamDefaultController interface returns the AbortSignal associated with the controller.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/signal)\n     */\n    get signal(): AbortSignal;\n    /**\n     * The **`error()`** method of the with the associated stream to error.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/error)\n     */\n    error(reason?: any): void;\n}\n/**\n * The **`TransformStreamDefaultController`** interface of the Streams API provides methods to manipulate the associated ReadableStream and WritableStream.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController)\n */\ndeclare abstract class TransformStreamDefaultController<O = any> {\n    /**\n     * The **`desiredSize`** read-only property of the TransformStreamDefaultController interface returns the desired size to fill the queue of the associated ReadableStream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/desiredSize)\n     */\n    get desiredSize(): number | null;\n    /**\n     * The **`enqueue()`** method of the TransformStreamDefaultController interface enqueues the given chunk in the readable side of the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/enqueue)\n     */\n    enqueue(chunk?: O): void;\n    /**\n     * The **`error()`** method of the TransformStreamDefaultController interface errors both sides of the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/error)\n     */\n    error(reason: any): void;\n    /**\n     * The **`terminate()`** method of the TransformStreamDefaultController interface closes the readable side and errors the writable side of the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/terminate)\n     */\n    terminate(): void;\n}\ninterface ReadableWritablePair<R = any, W = any> {\n    readable: ReadableStream<R>;\n    /**\n     * Provides a convenient, chainable way of piping this readable stream through a transform stream (or any other { writable, readable } pair). It simply pipes the stream into the writable side of the supplied pair, and returns the readable side for further use.\n     *\n     * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader.\n     */\n    writable: WritableStream<W>;\n}\n/**\n * The **`WritableStream`** interface of the Streams API provides a standard abstraction for writing streaming data to a destination, known as a sink.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream)\n */\ndeclare class WritableStream<W = any> {\n    constructor(underlyingSink?: UnderlyingSink, queuingStrategy?: QueuingStrategy);\n    /**\n     * The **`locked`** read-only property of the WritableStream interface returns a boolean indicating whether the `WritableStream` is locked to a writer.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/locked)\n     */\n    get locked(): boolean;\n    /**\n     * The **`abort()`** method of the WritableStream interface aborts the stream, signaling that the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/abort)\n     */\n    abort(reason?: any): Promise<void>;\n    /**\n     * The **`close()`** method of the WritableStream interface closes the associated stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/close)\n     */\n    close(): Promise<void>;\n    /**\n     * The **`getWriter()`** method of the WritableStream interface returns a new instance of WritableStreamDefaultWriter and locks the stream to that instance.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/getWriter)\n     */\n    getWriter(): WritableStreamDefaultWriter<W>;\n}\n/**\n * The **`WritableStreamDefaultWriter`** interface of the Streams API is the object returned by WritableStream.getWriter() and once created locks the writer to the `WritableStream` ensuring that no other streams can write to the underlying sink.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter)\n */\ndeclare class WritableStreamDefaultWriter<W = any> {\n    constructor(stream: WritableStream);\n    /**\n     * The **`closed`** read-only property of the the stream errors or the writer's lock is released.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/closed)\n     */\n    get closed(): Promise<void>;\n    /**\n     * The **`ready`** read-only property of the that resolves when the desired size of the stream's internal queue transitions from non-positive to positive, signaling that it is no longer applying backpressure.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/ready)\n     */\n    get ready(): Promise<void>;\n    /**\n     * The **`desiredSize`** read-only property of the to fill the stream's internal queue.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/desiredSize)\n     */\n    get desiredSize(): number | null;\n    /**\n     * The **`abort()`** method of the the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/abort)\n     */\n    abort(reason?: any): Promise<void>;\n    /**\n     * The **`close()`** method of the stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/close)\n     */\n    close(): Promise<void>;\n    /**\n     * The **`write()`** method of the operation.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/write)\n     */\n    write(chunk?: W): Promise<void>;\n    /**\n     * The **`releaseLock()`** method of the corresponding stream.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/releaseLock)\n     */\n    releaseLock(): void;\n}\n/**\n * The **`TransformStream`** interface of the Streams API represents a concrete implementation of the pipe chain _transform stream_ concept.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream)\n */\ndeclare class TransformStream<I = any, O = any> {\n    constructor(\n        transformer?: Transformer<I, O>,\n        writableStrategy?: QueuingStrategy<I>,\n        readableStrategy?: QueuingStrategy<O>,\n    );\n    /**\n     * The **`readable`** read-only property of the TransformStream interface returns the ReadableStream instance controlled by this `TransformStream`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/readable)\n     */\n    get readable(): ReadableStream<O>;\n    /**\n     * The **`writable`** read-only property of the TransformStream interface returns the WritableStream instance controlled by this `TransformStream`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/writable)\n     */\n    get writable(): WritableStream<I>;\n}\ndeclare class FixedLengthStream extends IdentityTransformStream {\n    constructor(expectedLength: number | bigint, queuingStrategy?: IdentityTransformStreamQueuingStrategy);\n}\ndeclare class IdentityTransformStream extends TransformStream<ArrayBuffer | ArrayBufferView, Uint8Array> {\n    constructor(queuingStrategy?: IdentityTransformStreamQueuingStrategy);\n}\ninterface IdentityTransformStreamQueuingStrategy {\n    highWaterMark?: number | bigint;\n}\ninterface ReadableStreamValuesOptions {\n    preventCancel?: boolean;\n}\n/**\n * The **`CompressionStream`** interface of the Compression Streams API is an API for compressing a stream of data.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CompressionStream)\n */\ndeclare class CompressionStream extends TransformStream<ArrayBuffer | ArrayBufferView, Uint8Array> {\n    constructor(format: \"gzip\" | \"deflate\" | \"deflate-raw\");\n}\n/**\n * The **`DecompressionStream`** interface of the Compression Streams API is an API for decompressing a stream of data.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DecompressionStream)\n */\ndeclare class DecompressionStream extends TransformStream<ArrayBuffer | ArrayBufferView, Uint8Array> {\n    constructor(format: \"gzip\" | \"deflate\" | \"deflate-raw\");\n}\n/**\n * The **`TextEncoderStream`** interface of the Encoding API converts a stream of strings into bytes in the UTF-8 encoding.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoderStream)\n */\ndeclare class TextEncoderStream extends TransformStream<string, Uint8Array> {\n    constructor();\n    get encoding(): string;\n}\n/**\n * The **`TextDecoderStream`** interface of the Encoding API converts a stream of text in a binary encoding, such as UTF-8 etc., to a stream of strings.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoderStream)\n */\ndeclare class TextDecoderStream extends TransformStream<ArrayBuffer | ArrayBufferView, string> {\n    constructor(label?: string, options?: TextDecoderStreamTextDecoderStreamInit);\n    get encoding(): string;\n    get fatal(): boolean;\n    get ignoreBOM(): boolean;\n}\ninterface TextDecoderStreamTextDecoderStreamInit {\n    fatal?: boolean;\n    ignoreBOM?: boolean;\n}\n/**\n * The **`ByteLengthQueuingStrategy`** interface of the Streams API provides a built-in byte length queuing strategy that can be used when constructing streams.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy)\n */\ndeclare class ByteLengthQueuingStrategy implements QueuingStrategy<ArrayBufferView> {\n    constructor(init: QueuingStrategyInit);\n    /**\n     * The read-only **`ByteLengthQueuingStrategy.highWaterMark`** property returns the total number of bytes that can be contained in the internal queue before backpressure is applied.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/highWaterMark)\n     */\n    get highWaterMark(): number;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/size) */\n    get size(): (chunk?: any) => number;\n}\n/**\n * The **`CountQueuingStrategy`** interface of the Streams API provides a built-in chunk counting queuing strategy that can be used when constructing streams.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy)\n */\ndeclare class CountQueuingStrategy implements QueuingStrategy {\n    constructor(init: QueuingStrategyInit);\n    /**\n     * The read-only **`CountQueuingStrategy.highWaterMark`** property returns the total number of chunks that can be contained in the internal queue before backpressure is applied.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/highWaterMark)\n     */\n    get highWaterMark(): number;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/size) */\n    get size(): (chunk?: any) => number;\n}\ninterface QueuingStrategyInit {\n    /**\n     * Creates a new ByteLengthQueuingStrategy with the provided high water mark.\n     *\n     * Note that the provided high water mark will not be validated ahead of time. Instead, if it is negative, NaN, or not a number, the resulting ByteLengthQueuingStrategy will cause the corresponding stream constructor to throw.\n     */\n    highWaterMark: number;\n}\ninterface ScriptVersion {\n    id?: string;\n    tag?: string;\n    message?: string;\n}\ndeclare abstract class TailEvent extends ExtendableEvent {\n    readonly events: TraceItem[];\n    readonly traces: TraceItem[];\n}\ninterface TraceItem {\n    readonly event:\n        | (\n              | TraceItemFetchEventInfo\n              | TraceItemJsRpcEventInfo\n              | TraceItemScheduledEventInfo\n              | TraceItemAlarmEventInfo\n              | TraceItemQueueEventInfo\n              | TraceItemEmailEventInfo\n              | TraceItemTailEventInfo\n              | TraceItemCustomEventInfo\n              | TraceItemHibernatableWebSocketEventInfo\n          )\n        | null;\n    readonly eventTimestamp: number | null;\n    readonly logs: TraceLog[];\n    readonly exceptions: TraceException[];\n    readonly diagnosticsChannelEvents: TraceDiagnosticChannelEvent[];\n    readonly scriptName: string | null;\n    readonly entrypoint?: string;\n    readonly scriptVersion?: ScriptVersion;\n    readonly dispatchNamespace?: string;\n    readonly scriptTags?: string[];\n    readonly durableObjectId?: string;\n    readonly outcome: string;\n    readonly executionModel: string;\n    readonly truncated: boolean;\n    readonly cpuTime: number;\n    readonly wallTime: number;\n}\ninterface TraceItemAlarmEventInfo {\n    readonly scheduledTime: Date;\n}\ninterface TraceItemCustomEventInfo {}\ninterface TraceItemScheduledEventInfo {\n    readonly scheduledTime: number;\n    readonly cron: string;\n}\ninterface TraceItemQueueEventInfo {\n    readonly queue: string;\n    readonly batchSize: number;\n}\ninterface TraceItemEmailEventInfo {\n    readonly mailFrom: string;\n    readonly rcptTo: string;\n    readonly rawSize: number;\n}\ninterface TraceItemTailEventInfo {\n    readonly consumedEvents: TraceItemTailEventInfoTailItem[];\n}\ninterface TraceItemTailEventInfoTailItem {\n    readonly scriptName: string | null;\n}\ninterface TraceItemFetchEventInfo {\n    readonly response?: TraceItemFetchEventInfoResponse;\n    readonly request: TraceItemFetchEventInfoRequest;\n}\ninterface TraceItemFetchEventInfoRequest {\n    readonly cf?: any;\n    readonly headers: Record<string, string>;\n    readonly method: string;\n    readonly url: string;\n    getUnredacted(): TraceItemFetchEventInfoRequest;\n}\ninterface TraceItemFetchEventInfoResponse {\n    readonly status: number;\n}\ninterface TraceItemJsRpcEventInfo {\n    readonly rpcMethod: string;\n}\ninterface TraceItemHibernatableWebSocketEventInfo {\n    readonly getWebSocketEvent:\n        | TraceItemHibernatableWebSocketEventInfoMessage\n        | TraceItemHibernatableWebSocketEventInfoClose\n        | TraceItemHibernatableWebSocketEventInfoError;\n}\ninterface TraceItemHibernatableWebSocketEventInfoMessage {\n    readonly webSocketEventType: string;\n}\ninterface TraceItemHibernatableWebSocketEventInfoClose {\n    readonly webSocketEventType: string;\n    readonly code: number;\n    readonly wasClean: boolean;\n}\ninterface TraceItemHibernatableWebSocketEventInfoError {\n    readonly webSocketEventType: string;\n}\ninterface TraceLog {\n    readonly timestamp: number;\n    readonly level: string;\n    readonly message: any;\n}\ninterface TraceException {\n    readonly timestamp: number;\n    readonly message: string;\n    readonly name: string;\n    readonly stack?: string;\n}\ninterface TraceDiagnosticChannelEvent {\n    readonly timestamp: number;\n    readonly channel: string;\n    readonly message: any;\n}\ninterface TraceMetrics {\n    readonly cpuTime: number;\n    readonly wallTime: number;\n}\ninterface UnsafeTraceMetrics {\n    fromTrace(item: TraceItem): TraceMetrics;\n}\n/**\n * The **`URL`** interface is used to parse, construct, normalize, and encode URL.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL)\n */\ndeclare class URL {\n    constructor(url: string | URL, base?: string | URL);\n    /**\n     * The **`origin`** read-only property of the URL interface returns a string containing the Unicode serialization of the origin of the represented URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/origin)\n     */\n    get origin(): string;\n    /**\n     * The **`href`** property of the URL interface is a string containing the whole URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href)\n     */\n    get href(): string;\n    /**\n     * The **`href`** property of the URL interface is a string containing the whole URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href)\n     */\n    set href(value: string);\n    /**\n     * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol)\n     */\n    get protocol(): string;\n    /**\n     * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol)\n     */\n    set protocol(value: string);\n    /**\n     * The **`username`** property of the URL interface is a string containing the username component of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username)\n     */\n    get username(): string;\n    /**\n     * The **`username`** property of the URL interface is a string containing the username component of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username)\n     */\n    set username(value: string);\n    /**\n     * The **`password`** property of the URL interface is a string containing the password component of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password)\n     */\n    get password(): string;\n    /**\n     * The **`password`** property of the URL interface is a string containing the password component of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password)\n     */\n    set password(value: string);\n    /**\n     * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host)\n     */\n    get host(): string;\n    /**\n     * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host)\n     */\n    set host(value: string);\n    /**\n     * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname)\n     */\n    get hostname(): string;\n    /**\n     * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname)\n     */\n    set hostname(value: string);\n    /**\n     * The **`port`** property of the URL interface is a string containing the port number of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port)\n     */\n    get port(): string;\n    /**\n     * The **`port`** property of the URL interface is a string containing the port number of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port)\n     */\n    set port(value: string);\n    /**\n     * The **`pathname`** property of the URL interface represents a location in a hierarchical structure.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname)\n     */\n    get pathname(): string;\n    /**\n     * The **`pathname`** property of the URL interface represents a location in a hierarchical structure.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname)\n     */\n    set pathname(value: string);\n    /**\n     * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search)\n     */\n    get search(): string;\n    /**\n     * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search)\n     */\n    set search(value: string);\n    /**\n     * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash)\n     */\n    get hash(): string;\n    /**\n     * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash)\n     */\n    set hash(value: string);\n    /**\n     * The **`searchParams`** read-only property of the access to the [MISSING: httpmethod('GET')] decoded query arguments contained in the URL.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/searchParams)\n     */\n    get searchParams(): URLSearchParams;\n    /**\n     * The **`toJSON()`** method of the URL interface returns a string containing a serialized version of the URL, although in practice it seems to have the same effect as ```js-nolint toJSON() ``` None.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/toJSON)\n     */\n    toJSON(): string;\n    /*function toString() { [native code] }*/\n    toString(): string;\n    /**\n     * The **`URL.canParse()`** static method of the URL interface returns a boolean indicating whether or not an absolute URL, or a relative URL combined with a base URL, are parsable and valid.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/canParse_static)\n     */\n    static canParse(url: string, base?: string): boolean;\n    /**\n     * The **`URL.parse()`** static method of the URL interface returns a newly created URL object representing the URL defined by the parameters.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/parse_static)\n     */\n    static parse(url: string, base?: string): URL | null;\n    /**\n     * The **`createObjectURL()`** static method of the URL interface creates a string containing a URL representing the object given in the parameter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/createObjectURL_static)\n     */\n    static createObjectURL(object: File | Blob): string;\n    /**\n     * The **`revokeObjectURL()`** static method of the URL interface releases an existing object URL which was previously created by calling Call this method when you've finished using an object URL to let the browser know not to keep the reference to the file any longer.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/revokeObjectURL_static)\n     */\n    static revokeObjectURL(object_url: string): void;\n}\n/**\n * The **`URLSearchParams`** interface defines utility methods to work with the query string of a URL.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams)\n */\ndeclare class URLSearchParams {\n    constructor(init?: Iterable<Iterable<string>> | Record<string, string> | string);\n    /**\n     * The **`size`** read-only property of the URLSearchParams interface indicates the total number of search parameter entries.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/size)\n     */\n    get size(): number;\n    /**\n     * The **`append()`** method of the URLSearchParams interface appends a specified key/value pair as a new search parameter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/append)\n     */\n    append(name: string, value: string): void;\n    /**\n     * The **`delete()`** method of the URLSearchParams interface deletes specified parameters and their associated value(s) from the list of all search parameters.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/delete)\n     */\n    delete(name: string, value?: string): void;\n    /**\n     * The **`get()`** method of the URLSearchParams interface returns the first value associated to the given search parameter.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/get)\n     */\n    get(name: string): string | null;\n    /**\n     * The **`getAll()`** method of the URLSearchParams interface returns all the values associated with a given search parameter as an array.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/getAll)\n     */\n    getAll(name: string): string[];\n    /**\n     * The **`has()`** method of the URLSearchParams interface returns a boolean value that indicates whether the specified parameter is in the search parameters.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/has)\n     */\n    has(name: string, value?: string): boolean;\n    /**\n     * The **`set()`** method of the URLSearchParams interface sets the value associated with a given search parameter to the given value.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/set)\n     */\n    set(name: string, value: string): void;\n    /**\n     * The **`URLSearchParams.sort()`** method sorts all key/value pairs contained in this object in place and returns `undefined`.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/sort)\n     */\n    sort(): void;\n    /* Returns an array of key, value pairs for every entry in the search params. */\n    entries(): IterableIterator<[key: string, value: string]>;\n    /* Returns a list of keys in the search params. */\n    keys(): IterableIterator<string>;\n    /* Returns a list of values in the search params. */\n    values(): IterableIterator<string>;\n    forEach<This = unknown>(\n        callback: (this: This, value: string, key: string, parent: URLSearchParams) => void,\n        thisArg?: This,\n    ): void;\n    /*function toString() { [native code] }*/\n    toString(): string;\n    [Symbol.iterator](): IterableIterator<[key: string, value: string]>;\n}\ndeclare class URLPattern {\n    constructor(\n        input?: string | URLPatternInit,\n        baseURL?: string | URLPatternOptions,\n        patternOptions?: URLPatternOptions,\n    );\n    get protocol(): string;\n    get username(): string;\n    get password(): string;\n    get hostname(): string;\n    get port(): string;\n    get pathname(): string;\n    get search(): string;\n    get hash(): string;\n    get hasRegExpGroups(): boolean;\n    test(input?: string | URLPatternInit, baseURL?: string): boolean;\n    exec(input?: string | URLPatternInit, baseURL?: string): URLPatternResult | null;\n}\ninterface URLPatternInit {\n    protocol?: string;\n    username?: string;\n    password?: string;\n    hostname?: string;\n    port?: string;\n    pathname?: string;\n    search?: string;\n    hash?: string;\n    baseURL?: string;\n}\ninterface URLPatternComponentResult {\n    input: string;\n    groups: Record<string, string>;\n}\ninterface URLPatternResult {\n    inputs: (string | URLPatternInit)[];\n    protocol: URLPatternComponentResult;\n    username: URLPatternComponentResult;\n    password: URLPatternComponentResult;\n    hostname: URLPatternComponentResult;\n    port: URLPatternComponentResult;\n    pathname: URLPatternComponentResult;\n    search: URLPatternComponentResult;\n    hash: URLPatternComponentResult;\n}\ninterface URLPatternOptions {\n    ignoreCase?: boolean;\n}\n/**\n * A `CloseEvent` is sent to clients using WebSockets when the connection is closed.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent)\n */\ndeclare class CloseEvent extends Event {\n    constructor(type: string, initializer?: CloseEventInit);\n    /**\n     * The **`code`** read-only property of the CloseEvent interface returns a WebSocket connection close code indicating the reason the connection was closed.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/code)\n     */\n    readonly code: number;\n    /**\n     * The **`reason`** read-only property of the CloseEvent interface returns the WebSocket connection close reason the server gave for closing the connection; that is, a concise human-readable prose explanation for the closure.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/reason)\n     */\n    readonly reason: string;\n    /**\n     * The **`wasClean`** read-only property of the CloseEvent interface returns `true` if the connection closed cleanly.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/wasClean)\n     */\n    readonly wasClean: boolean;\n}\ninterface CloseEventInit {\n    code?: number;\n    reason?: string;\n    wasClean?: boolean;\n}\ntype WebSocketEventMap = {\n    close: CloseEvent;\n    message: MessageEvent;\n    open: Event;\n    error: ErrorEvent;\n};\n/**\n * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket)\n */\ndeclare var WebSocket: {\n    prototype: WebSocket;\n    new (url: string, protocols?: string[] | string): WebSocket;\n    readonly READY_STATE_CONNECTING: number;\n    readonly CONNECTING: number;\n    readonly READY_STATE_OPEN: number;\n    readonly OPEN: number;\n    readonly READY_STATE_CLOSING: number;\n    readonly CLOSING: number;\n    readonly READY_STATE_CLOSED: number;\n    readonly CLOSED: number;\n};\n/**\n * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket)\n */\ninterface WebSocket extends EventTarget<WebSocketEventMap> {\n    accept(): void;\n    /**\n     * The **`WebSocket.send()`** method enqueues the specified data to be transmitted to the server over the WebSocket connection, increasing the value of `bufferedAmount` by the number of bytes needed to contain the data.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/send)\n     */\n    send(message: (ArrayBuffer | ArrayBufferView) | string): void;\n    /**\n     * The **`WebSocket.close()`** method closes the already `CLOSED`, this method does nothing.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/close)\n     */\n    close(code?: number, reason?: string): void;\n    serializeAttachment(attachment: any): void;\n    deserializeAttachment(): any | null;\n    /**\n     * The **`WebSocket.readyState`** read-only property returns the current state of the WebSocket connection.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/readyState)\n     */\n    readyState: number;\n    /**\n     * The **`WebSocket.url`** read-only property returns the absolute URL of the WebSocket as resolved by the constructor.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/url)\n     */\n    url: string | null;\n    /**\n     * The **`WebSocket.protocol`** read-only property returns the name of the sub-protocol the server selected; this will be one of the strings specified in the `protocols` parameter when creating the WebSocket object, or the empty string if no connection is established.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/protocol)\n     */\n    protocol: string | null;\n    /**\n     * The **`WebSocket.extensions`** read-only property returns the extensions selected by the server.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/extensions)\n     */\n    extensions: string | null;\n}\ndeclare const WebSocketPair: {\n    new (): {\n        0: WebSocket;\n        1: WebSocket;\n    };\n};\ninterface SqlStorage {\n    exec<T extends Record<string, SqlStorageValue>>(query: string, ...bindings: any[]): SqlStorageCursor<T>;\n    get databaseSize(): number;\n    Cursor: typeof SqlStorageCursor;\n    Statement: typeof SqlStorageStatement;\n}\ndeclare abstract class SqlStorageStatement {}\ntype SqlStorageValue = ArrayBuffer | string | number | null;\ndeclare abstract class SqlStorageCursor<T extends Record<string, SqlStorageValue>> {\n    next():\n        | {\n              done?: false;\n              value: T;\n          }\n        | {\n              done: true;\n              value?: never;\n          };\n    toArray(): T[];\n    one(): T;\n    raw<U extends SqlStorageValue[]>(): IterableIterator<U>;\n    columnNames: string[];\n    get rowsRead(): number;\n    get rowsWritten(): number;\n    [Symbol.iterator](): IterableIterator<T>;\n}\ninterface Socket {\n    get readable(): ReadableStream;\n    get writable(): WritableStream;\n    get closed(): Promise<void>;\n    get opened(): Promise<SocketInfo>;\n    get upgraded(): boolean;\n    get secureTransport(): \"on\" | \"off\" | \"starttls\";\n    close(): Promise<void>;\n    startTls(options?: TlsOptions): Socket;\n}\ninterface SocketOptions {\n    secureTransport?: string;\n    allowHalfOpen: boolean;\n    highWaterMark?: number | bigint;\n}\ninterface SocketAddress {\n    hostname: string;\n    port: number;\n}\ninterface TlsOptions {\n    expectedServerHostname?: string;\n}\ninterface SocketInfo {\n    remoteAddress?: string;\n    localAddress?: string;\n}\n/**\n * The **`EventSource`** interface is web content's interface to server-sent events.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource)\n */\ndeclare class EventSource extends EventTarget {\n    constructor(url: string, init?: EventSourceEventSourceInit);\n    /**\n     * The **`close()`** method of the EventSource interface closes the connection, if one is made, and sets the ```js-nolint close() ``` None.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/close)\n     */\n    close(): void;\n    /**\n     * The **`url`** read-only property of the URL of the source.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/url)\n     */\n    get url(): string;\n    /**\n     * The **`withCredentials`** read-only property of the the `EventSource` object was instantiated with CORS credentials set.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/withCredentials)\n     */\n    get withCredentials(): boolean;\n    /**\n     * The **`readyState`** read-only property of the connection.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/readyState)\n     */\n    get readyState(): number;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */\n    get onopen(): any | null;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */\n    set onopen(value: any | null);\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */\n    get onmessage(): any | null;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */\n    set onmessage(value: any | null);\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */\n    get onerror(): any | null;\n    /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */\n    set onerror(value: any | null);\n    static readonly CONNECTING: number;\n    static readonly OPEN: number;\n    static readonly CLOSED: number;\n    static from(stream: ReadableStream): EventSource;\n}\ninterface EventSourceEventSourceInit {\n    withCredentials?: boolean;\n    fetcher?: Fetcher;\n}\ninterface Container {\n    get running(): boolean;\n    start(options?: ContainerStartupOptions): void;\n    monitor(): Promise<void>;\n    destroy(error?: any): Promise<void>;\n    signal(signo: number): void;\n    getTcpPort(port: number): Fetcher;\n    setInactivityTimeout(durationMs: number | bigint): Promise<void>;\n}\ninterface ContainerStartupOptions {\n    entrypoint?: string[];\n    enableInternet: boolean;\n    env?: Record<string, string>;\n    hardTimeout?: number | bigint;\n}\n/**\n * The **`MessagePort`** interface of the Channel Messaging API represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort)\n */\ndeclare abstract class MessagePort extends EventTarget {\n    /**\n     * The **`postMessage()`** method of the transfers ownership of objects to other browsing contexts.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/postMessage)\n     */\n    postMessage(data?: any, options?: any[] | MessagePortPostMessageOptions): void;\n    /**\n     * The **`close()`** method of the MessagePort interface disconnects the port, so it is no longer active.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/close)\n     */\n    close(): void;\n    /**\n     * The **`start()`** method of the MessagePort interface starts the sending of messages queued on the port.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/start)\n     */\n    start(): void;\n    get onmessage(): any | null;\n    set onmessage(value: any | null);\n}\n/**\n * The **`MessageChannel`** interface of the Channel Messaging API allows us to create a new message channel and send data through it via its two MessagePort properties.\n *\n * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel)\n */\ndeclare class MessageChannel {\n    constructor();\n    /**\n     * The **`port1`** read-only property of the the port attached to the context that originated the channel.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port1)\n     */\n    readonly port1: MessagePort;\n    /**\n     * The **`port2`** read-only property of the the port attached to the context at the other end of the channel, which the message is initially sent to.\n     *\n     * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port2)\n     */\n    readonly port2: MessagePort;\n}\ninterface MessagePortPostMessageOptions {\n    transfer?: any[];\n}\ntype LoopbackForExport<\n    T extends (new (...args: any[]) => Rpc.EntrypointBranded) | ExportedHandler<any, any, any> | undefined = undefined,\n> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded\n    ? LoopbackServiceStub<InstanceType<T>>\n    : T extends new (...args: any[]) => Rpc.DurableObjectBranded\n      ? LoopbackDurableObjectClass<InstanceType<T>>\n      : T extends ExportedHandler<any, any, any>\n        ? LoopbackServiceStub<undefined>\n        : undefined;\ntype LoopbackServiceStub<T extends Rpc.WorkerEntrypointBranded | undefined = undefined> = Fetcher<T> &\n    (T extends CloudflareWorkersModule.WorkerEntrypoint<any, infer Props>\n        ? (opts: { props?: Props }) => Fetcher<T>\n        : (opts: { props?: any }) => Fetcher<T>);\ntype LoopbackDurableObjectClass<T extends Rpc.DurableObjectBranded | undefined = undefined> = DurableObjectClass<T> &\n    (T extends CloudflareWorkersModule.DurableObject<any, infer Props>\n        ? (opts: { props?: Props }) => DurableObjectClass<T>\n        : (opts: { props?: any }) => DurableObjectClass<T>);\ninterface SyncKvStorage {\n    get<T = unknown>(key: string): T | undefined;\n    list<T = unknown>(options?: SyncKvListOptions): Iterable<[string, T]>;\n    put<T>(key: string, value: T): void;\n    delete(key: string): boolean;\n}\ninterface SyncKvListOptions {\n    start?: string;\n    startAfter?: string;\n    end?: string;\n    prefix?: string;\n    reverse?: boolean;\n    limit?: number;\n}\ninterface WorkerStub {\n    getEntrypoint<T extends Rpc.WorkerEntrypointBranded | undefined>(\n        name?: string,\n        options?: WorkerStubEntrypointOptions,\n    ): Fetcher<T>;\n}\ninterface WorkerStubEntrypointOptions {\n    props?: any;\n}\ninterface WorkerLoader {\n    get(name: string | null, getCode: () => WorkerLoaderWorkerCode | Promise<WorkerLoaderWorkerCode>): WorkerStub;\n}\ninterface WorkerLoaderModule {\n    js?: string;\n    cjs?: string;\n    text?: string;\n    data?: ArrayBuffer;\n    json?: any;\n    py?: string;\n    wasm?: ArrayBuffer;\n}\ninterface WorkerLoaderWorkerCode {\n    compatibilityDate: string;\n    compatibilityFlags?: string[];\n    allowExperimental?: boolean;\n    mainModule: string;\n    modules: Record<string, WorkerLoaderModule | string>;\n    env?: any;\n    globalOutbound?: Fetcher | null;\n    tails?: Fetcher[];\n    streamingTails?: Fetcher[];\n}\n/**\n * The Workers runtime supports a subset of the Performance API, used to measure timing and performance,\n * as well as timing of subrequests and other operations.\n *\n * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/)\n */\ndeclare abstract class Performance {\n    /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */\n    get timeOrigin(): number;\n    /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */\n    now(): number;\n}\ntype AiImageClassificationInput = {\n    image: number[];\n};\ntype AiImageClassificationOutput = {\n    score?: number;\n    label?: string;\n}[];\ndeclare abstract class BaseAiImageClassification {\n    inputs: AiImageClassificationInput;\n    postProcessedOutputs: AiImageClassificationOutput;\n}\ntype AiImageToTextInput = {\n    image: number[];\n    prompt?: string;\n    max_tokens?: number;\n    temperature?: number;\n    top_p?: number;\n    top_k?: number;\n    seed?: number;\n    repetition_penalty?: number;\n    frequency_penalty?: number;\n    presence_penalty?: number;\n    raw?: boolean;\n    messages?: RoleScopedChatInput[];\n};\ntype AiImageToTextOutput = {\n    description: string;\n};\ndeclare abstract class BaseAiImageToText {\n    inputs: AiImageToTextInput;\n    postProcessedOutputs: AiImageToTextOutput;\n}\ntype AiImageTextToTextInput = {\n    image: string;\n    prompt?: string;\n    max_tokens?: number;\n    temperature?: number;\n    ignore_eos?: boolean;\n    top_p?: number;\n    top_k?: number;\n    seed?: number;\n    repetition_penalty?: number;\n    frequency_penalty?: number;\n    presence_penalty?: number;\n    raw?: boolean;\n    messages?: RoleScopedChatInput[];\n};\ntype AiImageTextToTextOutput = {\n    description: string;\n};\ndeclare abstract class BaseAiImageTextToText {\n    inputs: AiImageTextToTextInput;\n    postProcessedOutputs: AiImageTextToTextOutput;\n}\ntype AiMultimodalEmbeddingsInput = {\n    image: string;\n    text: string[];\n};\ntype AiIMultimodalEmbeddingsOutput = {\n    data: number[][];\n    shape: number[];\n};\ndeclare abstract class BaseAiMultimodalEmbeddings {\n    inputs: AiImageTextToTextInput;\n    postProcessedOutputs: AiImageTextToTextOutput;\n}\ntype AiObjectDetectionInput = {\n    image: number[];\n};\ntype AiObjectDetectionOutput = {\n    score?: number;\n    label?: string;\n}[];\ndeclare abstract class BaseAiObjectDetection {\n    inputs: AiObjectDetectionInput;\n    postProcessedOutputs: AiObjectDetectionOutput;\n}\ntype AiSentenceSimilarityInput = {\n    source: string;\n    sentences: string[];\n};\ntype AiSentenceSimilarityOutput = number[];\ndeclare abstract class BaseAiSentenceSimilarity {\n    inputs: AiSentenceSimilarityInput;\n    postProcessedOutputs: AiSentenceSimilarityOutput;\n}\ntype AiAutomaticSpeechRecognitionInput = {\n    audio: number[];\n};\ntype AiAutomaticSpeechRecognitionOutput = {\n    text?: string;\n    words?: {\n        word: string;\n        start: number;\n        end: number;\n    }[];\n    vtt?: string;\n};\ndeclare abstract class BaseAiAutomaticSpeechRecognition {\n    inputs: AiAutomaticSpeechRecognitionInput;\n    postProcessedOutputs: AiAutomaticSpeechRecognitionOutput;\n}\ntype AiSummarizationInput = {\n    input_text: string;\n    max_length?: number;\n};\ntype AiSummarizationOutput = {\n    summary: string;\n};\ndeclare abstract class BaseAiSummarization {\n    inputs: AiSummarizationInput;\n    postProcessedOutputs: AiSummarizationOutput;\n}\ntype AiTextClassificationInput = {\n    text: string;\n};\ntype AiTextClassificationOutput = {\n    score?: number;\n    label?: string;\n}[];\ndeclare abstract class BaseAiTextClassification {\n    inputs: AiTextClassificationInput;\n    postProcessedOutputs: AiTextClassificationOutput;\n}\ntype AiTextEmbeddingsInput = {\n    text: string | string[];\n};\ntype AiTextEmbeddingsOutput = {\n    shape: number[];\n    data: number[][];\n};\ndeclare abstract class BaseAiTextEmbeddings {\n    inputs: AiTextEmbeddingsInput;\n    postProcessedOutputs: AiTextEmbeddingsOutput;\n}\ntype RoleScopedChatInput = {\n    role: \"user\" | \"assistant\" | \"system\" | \"tool\" | (string & NonNullable<unknown>);\n    content: string;\n    name?: string;\n};\ntype AiTextGenerationToolLegacyInput = {\n    name: string;\n    description: string;\n    parameters?: {\n        type: \"object\" | (string & NonNullable<unknown>);\n        properties: {\n            [key: string]: {\n                type: string;\n                description?: string;\n            };\n        };\n        required: string[];\n    };\n};\ntype AiTextGenerationToolInput = {\n    type: \"function\" | (string & NonNullable<unknown>);\n    function: {\n        name: string;\n        description: string;\n        parameters?: {\n            type: \"object\" | (string & NonNullable<unknown>);\n            properties: {\n                [key: string]: {\n                    type: string;\n                    description?: string;\n                };\n            };\n            required: string[];\n        };\n    };\n};\ntype AiTextGenerationFunctionsInput = {\n    name: string;\n    code: string;\n};\ntype AiTextGenerationResponseFormat = {\n    type: string;\n    json_schema?: any;\n};\ntype AiTextGenerationInput = {\n    prompt?: string;\n    raw?: boolean;\n    stream?: boolean;\n    max_tokens?: number;\n    temperature?: number;\n    top_p?: number;\n    top_k?: number;\n    seed?: number;\n    repetition_penalty?: number;\n    frequency_penalty?: number;\n    presence_penalty?: number;\n    messages?: RoleScopedChatInput[];\n    response_format?: AiTextGenerationResponseFormat;\n    tools?: AiTextGenerationToolInput[] | AiTextGenerationToolLegacyInput[] | (object & NonNullable<unknown>);\n    functions?: AiTextGenerationFunctionsInput[];\n};\ntype AiTextGenerationToolLegacyOutput = {\n    name: string;\n    arguments: unknown;\n};\ntype AiTextGenerationToolOutput = {\n    id: string;\n    type: \"function\";\n    function: {\n        name: string;\n        arguments: string;\n    };\n};\ntype UsageTags = {\n    prompt_tokens: number;\n    completion_tokens: number;\n    total_tokens: number;\n};\ntype AiTextGenerationOutput = {\n    response?: string;\n    tool_calls?: AiTextGenerationToolLegacyOutput[] & AiTextGenerationToolOutput[];\n    usage?: UsageTags;\n};\ndeclare abstract class BaseAiTextGeneration {\n    inputs: AiTextGenerationInput;\n    postProcessedOutputs: AiTextGenerationOutput;\n}\ntype AiTextToSpeechInput = {\n    prompt: string;\n    lang?: string;\n};\ntype AiTextToSpeechOutput =\n    | Uint8Array\n    | {\n          audio: string;\n      };\ndeclare abstract class BaseAiTextToSpeech {\n    inputs: AiTextToSpeechInput;\n    postProcessedOutputs: AiTextToSpeechOutput;\n}\ntype AiTextToImageInput = {\n    prompt: string;\n    negative_prompt?: string;\n    height?: number;\n    width?: number;\n    image?: number[];\n    image_b64?: string;\n    mask?: number[];\n    num_steps?: number;\n    strength?: number;\n    guidance?: number;\n    seed?: number;\n};\ntype AiTextToImageOutput = ReadableStream<Uint8Array>;\ndeclare abstract class BaseAiTextToImage {\n    inputs: AiTextToImageInput;\n    postProcessedOutputs: AiTextToImageOutput;\n}\ntype AiTranslationInput = {\n    text: string;\n    target_lang: string;\n    source_lang?: string;\n};\ntype AiTranslationOutput = {\n    translated_text?: string;\n};\ndeclare abstract class BaseAiTranslation {\n    inputs: AiTranslationInput;\n    postProcessedOutputs: AiTranslationOutput;\n}\n/**\n * Workers AI support for OpenAI's Responses API\n * Reference: https://github.com/openai/openai-node/blob/master/src/resources/responses/responses.ts\n *\n * It's a stripped down version from its source.\n * It currently supports basic function calling, json mode and accepts images as input.\n *\n * It does not include types for WebSearch, CodeInterpreter, FileInputs, MCP, CustomTools.\n * We plan to add those incrementally as model + platform capabilities evolve.\n */\ntype ResponsesInput = {\n    background?: boolean | null;\n    conversation?: string | ResponseConversationParam | null;\n    include?: Array<ResponseIncludable> | null;\n    input?: string | ResponseInput;\n    instructions?: string | null;\n    max_output_tokens?: number | null;\n    parallel_tool_calls?: boolean | null;\n    previous_response_id?: string | null;\n    prompt_cache_key?: string;\n    reasoning?: Reasoning | null;\n    safety_identifier?: string;\n    service_tier?: \"auto\" | \"default\" | \"flex\" | \"scale\" | \"priority\" | null;\n    stream?: boolean | null;\n    stream_options?: StreamOptions | null;\n    temperature?: number | null;\n    text?: ResponseTextConfig;\n    tool_choice?: ToolChoiceOptions | ToolChoiceFunction;\n    tools?: Array<Tool>;\n    top_p?: number | null;\n    truncation?: \"auto\" | \"disabled\" | null;\n};\ntype ResponsesOutput = {\n    id?: string;\n    created_at?: number;\n    output_text?: string;\n    error?: ResponseError | null;\n    incomplete_details?: ResponseIncompleteDetails | null;\n    instructions?: string | Array<ResponseInputItem> | null;\n    object?: \"response\";\n    output?: Array<ResponseOutputItem>;\n    parallel_tool_calls?: boolean;\n    temperature?: number | null;\n    tool_choice?: ToolChoiceOptions | ToolChoiceFunction;\n    tools?: Array<Tool>;\n    top_p?: number | null;\n    max_output_tokens?: number | null;\n    previous_response_id?: string | null;\n    prompt?: ResponsePrompt | null;\n    reasoning?: Reasoning | null;\n    safety_identifier?: string;\n    service_tier?: \"auto\" | \"default\" | \"flex\" | \"scale\" | \"priority\" | null;\n    status?: ResponseStatus;\n    text?: ResponseTextConfig;\n    truncation?: \"auto\" | \"disabled\" | null;\n    usage?: ResponseUsage;\n};\ntype EasyInputMessage = {\n    content: string | ResponseInputMessageContentList;\n    role: \"user\" | \"assistant\" | \"system\" | \"developer\";\n    type?: \"message\";\n};\ntype ResponsesFunctionTool = {\n    name: string;\n    parameters: {\n        [key: string]: unknown;\n    } | null;\n    strict: boolean | null;\n    type: \"function\";\n    description?: string | null;\n};\ntype ResponseIncompleteDetails = {\n    reason?: \"max_output_tokens\" | \"content_filter\";\n};\ntype ResponsePrompt = {\n    id: string;\n    variables?: {\n        [key: string]: string | ResponseInputText | ResponseInputImage;\n    } | null;\n    version?: string | null;\n};\ntype Reasoning = {\n    effort?: ReasoningEffort | null;\n    generate_summary?: \"auto\" | \"concise\" | \"detailed\" | null;\n    summary?: \"auto\" | \"concise\" | \"detailed\" | null;\n};\ntype ResponseContent =\n    | ResponseInputText\n    | ResponseInputImage\n    | ResponseOutputText\n    | ResponseOutputRefusal\n    | ResponseContentReasoningText;\ntype ResponseContentReasoningText = {\n    text: string;\n    type: \"reasoning_text\";\n};\ntype ResponseConversationParam = {\n    id: string;\n};\ntype ResponseCreatedEvent = {\n    response: Response;\n    sequence_number: number;\n    type: \"response.created\";\n};\ntype ResponseCustomToolCallOutput = {\n    call_id: string;\n    output: string | Array<ResponseInputText | ResponseInputImage>;\n    type: \"custom_tool_call_output\";\n    id?: string;\n};\ntype ResponseError = {\n    code:\n        | \"server_error\"\n        | \"rate_limit_exceeded\"\n        | \"invalid_prompt\"\n        | \"vector_store_timeout\"\n        | \"invalid_image\"\n        | \"invalid_image_format\"\n        | \"invalid_base64_image\"\n        | \"invalid_image_url\"\n        | \"image_too_large\"\n        | \"image_too_small\"\n        | \"image_parse_error\"\n        | \"image_content_policy_violation\"\n        | \"invalid_image_mode\"\n        | \"image_file_too_large\"\n        | \"unsupported_image_media_type\"\n        | \"empty_image_file\"\n        | \"failed_to_download_image\"\n        | \"image_file_not_found\";\n    message: string;\n};\ntype ResponseErrorEvent = {\n    code: string | null;\n    message: string;\n    param: string | null;\n    sequence_number: number;\n    type: \"error\";\n};\ntype ResponseFailedEvent = {\n    response: Response;\n    sequence_number: number;\n    type: \"response.failed\";\n};\ntype ResponseFormatText = {\n    type: \"text\";\n};\ntype ResponseFormatJSONObject = {\n    type: \"json_object\";\n};\ntype ResponseFormatTextConfig = ResponseFormatText | ResponseFormatTextJSONSchemaConfig | ResponseFormatJSONObject;\ntype ResponseFormatTextJSONSchemaConfig = {\n    name: string;\n    schema: {\n        [key: string]: unknown;\n    };\n    type: \"json_schema\";\n    description?: string;\n    strict?: boolean | null;\n};\ntype ResponseFunctionCallArgumentsDeltaEvent = {\n    delta: string;\n    item_id: string;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.function_call_arguments.delta\";\n};\ntype ResponseFunctionCallArgumentsDoneEvent = {\n    arguments: string;\n    item_id: string;\n    name: string;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.function_call_arguments.done\";\n};\ntype ResponseFunctionCallOutputItem = ResponseInputTextContent | ResponseInputImageContent;\ntype ResponseFunctionCallOutputItemList = Array<ResponseFunctionCallOutputItem>;\ntype ResponseFunctionToolCall = {\n    arguments: string;\n    call_id: string;\n    name: string;\n    type: \"function_call\";\n    id?: string;\n    status?: \"in_progress\" | \"completed\" | \"incomplete\";\n};\ninterface ResponseFunctionToolCallItem extends ResponseFunctionToolCall {\n    id: string;\n}\ntype ResponseFunctionToolCallOutputItem = {\n    id: string;\n    call_id: string;\n    output: string | Array<ResponseInputText | ResponseInputImage>;\n    type: \"function_call_output\";\n    status?: \"in_progress\" | \"completed\" | \"incomplete\";\n};\ntype ResponseIncludable = \"message.input_image.image_url\" | \"message.output_text.logprobs\";\ntype ResponseIncompleteEvent = {\n    response: Response;\n    sequence_number: number;\n    type: \"response.incomplete\";\n};\ntype ResponseInput = Array<ResponseInputItem>;\ntype ResponseInputContent = ResponseInputText | ResponseInputImage;\ntype ResponseInputImage = {\n    detail: \"low\" | \"high\" | \"auto\";\n    type: \"input_image\";\n    /**\n     * Base64 encoded image\n     */\n    image_url?: string | null;\n};\ntype ResponseInputImageContent = {\n    type: \"input_image\";\n    detail?: \"low\" | \"high\" | \"auto\" | null;\n    /**\n     * Base64 encoded image\n     */\n    image_url?: string | null;\n};\ntype ResponseInputItem =\n    | EasyInputMessage\n    | ResponseInputItemMessage\n    | ResponseOutputMessage\n    | ResponseFunctionToolCall\n    | ResponseInputItemFunctionCallOutput\n    | ResponseReasoningItem;\ntype ResponseInputItemFunctionCallOutput = {\n    call_id: string;\n    output: string | ResponseFunctionCallOutputItemList;\n    type: \"function_call_output\";\n    id?: string | null;\n    status?: \"in_progress\" | \"completed\" | \"incomplete\" | null;\n};\ntype ResponseInputItemMessage = {\n    content: ResponseInputMessageContentList;\n    role: \"user\" | \"system\" | \"developer\";\n    status?: \"in_progress\" | \"completed\" | \"incomplete\";\n    type?: \"message\";\n};\ntype ResponseInputMessageContentList = Array<ResponseInputContent>;\ntype ResponseInputMessageItem = {\n    id: string;\n    content: ResponseInputMessageContentList;\n    role: \"user\" | \"system\" | \"developer\";\n    status?: \"in_progress\" | \"completed\" | \"incomplete\";\n    type?: \"message\";\n};\ntype ResponseInputText = {\n    text: string;\n    type: \"input_text\";\n};\ntype ResponseInputTextContent = {\n    text: string;\n    type: \"input_text\";\n};\ntype ResponseItem =\n    | ResponseInputMessageItem\n    | ResponseOutputMessage\n    | ResponseFunctionToolCallItem\n    | ResponseFunctionToolCallOutputItem;\ntype ResponseOutputItem = ResponseOutputMessage | ResponseFunctionToolCall | ResponseReasoningItem;\ntype ResponseOutputItemAddedEvent = {\n    item: ResponseOutputItem;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.output_item.added\";\n};\ntype ResponseOutputItemDoneEvent = {\n    item: ResponseOutputItem;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.output_item.done\";\n};\ntype ResponseOutputMessage = {\n    id: string;\n    content: Array<ResponseOutputText | ResponseOutputRefusal>;\n    role: \"assistant\";\n    status: \"in_progress\" | \"completed\" | \"incomplete\";\n    type: \"message\";\n};\ntype ResponseOutputRefusal = {\n    refusal: string;\n    type: \"refusal\";\n};\ntype ResponseOutputText = {\n    text: string;\n    type: \"output_text\";\n    logprobs?: Array<Logprob>;\n};\ntype ResponseReasoningItem = {\n    id: string;\n    summary: Array<ResponseReasoningSummaryItem>;\n    type: \"reasoning\";\n    content?: Array<ResponseReasoningContentItem>;\n    encrypted_content?: string | null;\n    status?: \"in_progress\" | \"completed\" | \"incomplete\";\n};\ntype ResponseReasoningSummaryItem = {\n    text: string;\n    type: \"summary_text\";\n};\ntype ResponseReasoningContentItem = {\n    text: string;\n    type: \"reasoning_text\";\n};\ntype ResponseReasoningTextDeltaEvent = {\n    content_index: number;\n    delta: string;\n    item_id: string;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.reasoning_text.delta\";\n};\ntype ResponseReasoningTextDoneEvent = {\n    content_index: number;\n    item_id: string;\n    output_index: number;\n    sequence_number: number;\n    text: string;\n    type: \"response.reasoning_text.done\";\n};\ntype ResponseRefusalDeltaEvent = {\n    content_index: number;\n    delta: string;\n    item_id: string;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.refusal.delta\";\n};\ntype ResponseRefusalDoneEvent = {\n    content_index: number;\n    item_id: string;\n    output_index: number;\n    refusal: string;\n    sequence_number: number;\n    type: \"response.refusal.done\";\n};\ntype ResponseStatus = \"completed\" | \"failed\" | \"in_progress\" | \"cancelled\" | \"queued\" | \"incomplete\";\ntype ResponseStreamEvent =\n    | ResponseCompletedEvent\n    | ResponseCreatedEvent\n    | ResponseErrorEvent\n    | ResponseFunctionCallArgumentsDeltaEvent\n    | ResponseFunctionCallArgumentsDoneEvent\n    | ResponseFailedEvent\n    | ResponseIncompleteEvent\n    | ResponseOutputItemAddedEvent\n    | ResponseOutputItemDoneEvent\n    | ResponseReasoningTextDeltaEvent\n    | ResponseReasoningTextDoneEvent\n    | ResponseRefusalDeltaEvent\n    | ResponseRefusalDoneEvent\n    | ResponseTextDeltaEvent\n    | ResponseTextDoneEvent;\ntype ResponseCompletedEvent = {\n    response: Response;\n    sequence_number: number;\n    type: \"response.completed\";\n};\ntype ResponseTextConfig = {\n    format?: ResponseFormatTextConfig;\n    verbosity?: \"low\" | \"medium\" | \"high\" | null;\n};\ntype ResponseTextDeltaEvent = {\n    content_index: number;\n    delta: string;\n    item_id: string;\n    logprobs: Array<Logprob>;\n    output_index: number;\n    sequence_number: number;\n    type: \"response.output_text.delta\";\n};\ntype ResponseTextDoneEvent = {\n    content_index: number;\n    item_id: string;\n    logprobs: Array<Logprob>;\n    output_index: number;\n    sequence_number: number;\n    text: string;\n    type: \"response.output_text.done\";\n};\ntype Logprob = {\n    token: string;\n    logprob: number;\n    top_logprobs?: Array<TopLogprob>;\n};\ntype TopLogprob = {\n    token?: string;\n    logprob?: number;\n};\ntype ResponseUsage = {\n    input_tokens: number;\n    output_tokens: number;\n    total_tokens: number;\n};\ntype Tool = ResponsesFunctionTool;\ntype ToolChoiceFunction = {\n    name: string;\n    type: \"function\";\n};\ntype ToolChoiceOptions = \"none\";\ntype ReasoningEffort = \"minimal\" | \"low\" | \"medium\" | \"high\" | null;\ntype StreamOptions = {\n    include_obfuscation?: boolean;\n};\ntype Ai_Cf_Baai_Bge_Base_En_V1_5_Input =\n    | {\n          text: string | string[];\n          /**\n           * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | {\n          /**\n           * Batch of the embeddings requests to run using async-queue\n           */\n          requests: {\n              text: string | string[];\n              /**\n               * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n               */\n              pooling?: \"mean\" | \"cls\";\n          }[];\n      };\ntype Ai_Cf_Baai_Bge_Base_En_V1_5_Output =\n    | {\n          shape?: number[];\n          /**\n           * Embeddings of the requested text values\n           */\n          data?: number[][];\n          /**\n           * The pooling method used in the embedding process.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse;\ninterface Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 {\n    inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input;\n    postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output;\n}\ntype Ai_Cf_Openai_Whisper_Input =\n    | string\n    | {\n          /**\n           * An array of integers that represent the audio data constrained to 8-bit unsigned integer values\n           */\n          audio: number[];\n      };\ninterface Ai_Cf_Openai_Whisper_Output {\n    /**\n     * The transcription\n     */\n    text: string;\n    word_count?: number;\n    words?: {\n        word?: string;\n        /**\n         * The second this word begins in the recording\n         */\n        start?: number;\n        /**\n         * The ending second when the word completes\n         */\n        end?: number;\n    }[];\n    vtt?: string;\n}\ndeclare abstract class Base_Ai_Cf_Openai_Whisper {\n    inputs: Ai_Cf_Openai_Whisper_Input;\n    postProcessedOutputs: Ai_Cf_Openai_Whisper_Output;\n}\ntype Ai_Cf_Meta_M2M100_1_2B_Input =\n    | {\n          /**\n           * The text to be translated\n           */\n          text: string;\n          /**\n           * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified\n           */\n          source_lang?: string;\n          /**\n           * The language code to translate the text into (e.g., 'es' for Spanish)\n           */\n          target_lang: string;\n      }\n    | {\n          /**\n           * Batch of the embeddings requests to run using async-queue\n           */\n          requests: {\n              /**\n               * The text to be translated\n               */\n              text: string;\n              /**\n               * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified\n               */\n              source_lang?: string;\n              /**\n               * The language code to translate the text into (e.g., 'es' for Spanish)\n               */\n              target_lang: string;\n          }[];\n      };\ntype Ai_Cf_Meta_M2M100_1_2B_Output =\n    | {\n          /**\n           * The translated text in the target language\n           */\n          translated_text?: string;\n      }\n    | Ai_Cf_Meta_M2M100_1_2B_AsyncResponse;\ninterface Ai_Cf_Meta_M2M100_1_2B_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Meta_M2M100_1_2B {\n    inputs: Ai_Cf_Meta_M2M100_1_2B_Input;\n    postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output;\n}\ntype Ai_Cf_Baai_Bge_Small_En_V1_5_Input =\n    | {\n          text: string | string[];\n          /**\n           * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | {\n          /**\n           * Batch of the embeddings requests to run using async-queue\n           */\n          requests: {\n              text: string | string[];\n              /**\n               * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n               */\n              pooling?: \"mean\" | \"cls\";\n          }[];\n      };\ntype Ai_Cf_Baai_Bge_Small_En_V1_5_Output =\n    | {\n          shape?: number[];\n          /**\n           * Embeddings of the requested text values\n           */\n          data?: number[][];\n          /**\n           * The pooling method used in the embedding process.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse;\ninterface Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 {\n    inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input;\n    postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output;\n}\ntype Ai_Cf_Baai_Bge_Large_En_V1_5_Input =\n    | {\n          text: string | string[];\n          /**\n           * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | {\n          /**\n           * Batch of the embeddings requests to run using async-queue\n           */\n          requests: {\n              text: string | string[];\n              /**\n               * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy.\n               */\n              pooling?: \"mean\" | \"cls\";\n          }[];\n      };\ntype Ai_Cf_Baai_Bge_Large_En_V1_5_Output =\n    | {\n          shape?: number[];\n          /**\n           * Embeddings of the requested text values\n           */\n          data?: number[][];\n          /**\n           * The pooling method used in the embedding process.\n           */\n          pooling?: \"mean\" | \"cls\";\n      }\n    | Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse;\ninterface Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 {\n    inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input;\n    postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output;\n}\ntype Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input =\n    | string\n    | {\n          /**\n           * The input text prompt for the model to generate a response.\n           */\n          prompt?: string;\n          /**\n           * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n           */\n          raw?: boolean;\n          /**\n           * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n           */\n          top_p?: number;\n          /**\n           * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n           */\n          top_k?: number;\n          /**\n           * Random seed for reproducibility of the generation.\n           */\n          seed?: number;\n          /**\n           * Penalty for repeated tokens; higher values discourage repetition.\n           */\n          repetition_penalty?: number;\n          /**\n           * Decreases the likelihood of the model repeating the same lines verbatim.\n           */\n          frequency_penalty?: number;\n          /**\n           * Increases the likelihood of the model introducing new topics.\n           */\n          presence_penalty?: number;\n          image: number[] | (string & NonNullable<unknown>);\n          /**\n           * The maximum number of tokens to generate in the response.\n           */\n          max_tokens?: number;\n      };\ninterface Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output {\n    description?: string;\n}\ndeclare abstract class Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M {\n    inputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input;\n    postProcessedOutputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output;\n}\ntype Ai_Cf_Openai_Whisper_Tiny_En_Input =\n    | string\n    | {\n          /**\n           * An array of integers that represent the audio data constrained to 8-bit unsigned integer values\n           */\n          audio: number[];\n      };\ninterface Ai_Cf_Openai_Whisper_Tiny_En_Output {\n    /**\n     * The transcription\n     */\n    text: string;\n    word_count?: number;\n    words?: {\n        word?: string;\n        /**\n         * The second this word begins in the recording\n         */\n        start?: number;\n        /**\n         * The ending second when the word completes\n         */\n        end?: number;\n    }[];\n    vtt?: string;\n}\ndeclare abstract class Base_Ai_Cf_Openai_Whisper_Tiny_En {\n    inputs: Ai_Cf_Openai_Whisper_Tiny_En_Input;\n    postProcessedOutputs: Ai_Cf_Openai_Whisper_Tiny_En_Output;\n}\ninterface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input {\n    /**\n     * Base64 encoded value of the audio data.\n     */\n    audio: string;\n    /**\n     * Supported tasks are 'translate' or 'transcribe'.\n     */\n    task?: string;\n    /**\n     * The language of the audio being transcribed or translated.\n     */\n    language?: string;\n    /**\n     * Preprocess the audio with a voice activity detection model.\n     */\n    vad_filter?: boolean;\n    /**\n     * A text prompt to help provide context to the model on the contents of the audio.\n     */\n    initial_prompt?: string;\n    /**\n     * The prefix it appended the the beginning of the output of the transcription and can guide the transcription result.\n     */\n    prefix?: string;\n}\ninterface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output {\n    transcription_info?: {\n        /**\n         * The language of the audio being transcribed or translated.\n         */\n        language?: string;\n        /**\n         * The confidence level or probability of the detected language being accurate, represented as a decimal between 0 and 1.\n         */\n        language_probability?: number;\n        /**\n         * The total duration of the original audio file, in seconds.\n         */\n        duration?: number;\n        /**\n         * The duration of the audio after applying Voice Activity Detection (VAD) to remove silent or irrelevant sections, in seconds.\n         */\n        duration_after_vad?: number;\n    };\n    /**\n     * The complete transcription of the audio.\n     */\n    text: string;\n    /**\n     * The total number of words in the transcription.\n     */\n    word_count?: number;\n    segments?: {\n        /**\n         * The starting time of the segment within the audio, in seconds.\n         */\n        start?: number;\n        /**\n         * The ending time of the segment within the audio, in seconds.\n         */\n        end?: number;\n        /**\n         * The transcription of the segment.\n         */\n        text?: string;\n        /**\n         * The temperature used in the decoding process, controlling randomness in predictions. Lower values result in more deterministic outputs.\n         */\n        temperature?: number;\n        /**\n         * The average log probability of the predictions for the words in this segment, indicating overall confidence.\n         */\n        avg_logprob?: number;\n        /**\n         * The compression ratio of the input to the output, measuring how much the text was compressed during the transcription process.\n         */\n        compression_ratio?: number;\n        /**\n         * The probability that the segment contains no speech, represented as a decimal between 0 and 1.\n         */\n        no_speech_prob?: number;\n        words?: {\n            /**\n             * The individual word transcribed from the audio.\n             */\n            word?: string;\n            /**\n             * The starting time of the word within the audio, in seconds.\n             */\n            start?: number;\n            /**\n             * The ending time of the word within the audio, in seconds.\n             */\n            end?: number;\n        }[];\n    }[];\n    /**\n     * The transcription in WebVTT format, which includes timing and text information for use in subtitles.\n     */\n    vtt?: string;\n}\ndeclare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo {\n    inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input;\n    postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output;\n}\ntype Ai_Cf_Baai_Bge_M3_Input =\n    | Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts\n    | Ai_Cf_Baai_Bge_M3_Input_Embedding\n    | {\n          /**\n           * Batch of the embeddings requests to run using async-queue\n           */\n          requests: (Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 | Ai_Cf_Baai_Bge_M3_Input_Embedding_1)[];\n      };\ninterface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts {\n    /**\n     * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts\n     */\n    query?: string;\n    /**\n     * List of provided contexts. Note that the index in this array is important, as the response will refer to it.\n     */\n    contexts: {\n        /**\n         * One of the provided context content\n         */\n        text?: string;\n    }[];\n    /**\n     * When provided with too long context should the model error out or truncate the context to fit?\n     */\n    truncate_inputs?: boolean;\n}\ninterface Ai_Cf_Baai_Bge_M3_Input_Embedding {\n    text: string | string[];\n    /**\n     * When provided with too long context should the model error out or truncate the context to fit?\n     */\n    truncate_inputs?: boolean;\n}\ninterface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 {\n    /**\n     * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts\n     */\n    query?: string;\n    /**\n     * List of provided contexts. Note that the index in this array is important, as the response will refer to it.\n     */\n    contexts: {\n        /**\n         * One of the provided context content\n         */\n        text?: string;\n    }[];\n    /**\n     * When provided with too long context should the model error out or truncate the context to fit?\n     */\n    truncate_inputs?: boolean;\n}\ninterface Ai_Cf_Baai_Bge_M3_Input_Embedding_1 {\n    text: string | string[];\n    /**\n     * When provided with too long context should the model error out or truncate the context to fit?\n     */\n    truncate_inputs?: boolean;\n}\ntype Ai_Cf_Baai_Bge_M3_Output =\n    | Ai_Cf_Baai_Bge_M3_Ouput_Query\n    | Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts\n    | Ai_Cf_Baai_Bge_M3_Ouput_Embedding\n    | Ai_Cf_Baai_Bge_M3_AsyncResponse;\ninterface Ai_Cf_Baai_Bge_M3_Ouput_Query {\n    response?: {\n        /**\n         * Index of the context in the request\n         */\n        id?: number;\n        /**\n         * Score of the context under the index.\n         */\n        score?: number;\n    }[];\n}\ninterface Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts {\n    response?: number[][];\n    shape?: number[];\n    /**\n     * The pooling method used in the embedding process.\n     */\n    pooling?: \"mean\" | \"cls\";\n}\ninterface Ai_Cf_Baai_Bge_M3_Ouput_Embedding {\n    shape?: number[];\n    /**\n     * Embeddings of the requested text values\n     */\n    data?: number[][];\n    /**\n     * The pooling method used in the embedding process.\n     */\n    pooling?: \"mean\" | \"cls\";\n}\ninterface Ai_Cf_Baai_Bge_M3_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Baai_Bge_M3 {\n    inputs: Ai_Cf_Baai_Bge_M3_Input;\n    postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output;\n}\ninterface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input {\n    /**\n     * A text description of the image you want to generate.\n     */\n    prompt: string;\n    /**\n     * The number of diffusion steps; higher values can improve quality but take longer.\n     */\n    steps?: number;\n}\ninterface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output {\n    /**\n     * The generated image in Base64 format.\n     */\n    image?: string;\n}\ndeclare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell {\n    inputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input;\n    postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output;\n}\ntype Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input =\n    | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt\n    | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages;\ninterface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    image?: number[] | (string & NonNullable<unknown>);\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n}\ninterface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        /**\n         * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001\n         */\n        tool_call_id?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[]\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              };\n    }[];\n    image?: number[] | (string & NonNullable<unknown>);\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    /**\n     * If true, the response will be streamed back incrementally.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ntype Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response?: string;\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The arguments passed to be passed to the tool call request\n         */\n        arguments?: object;\n        /**\n         * The name of the tool to be called\n         */\n        name?: string;\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct {\n    inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input;\n    postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output;\n}\ntype Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input =\n    | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt\n    | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages\n    | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch;\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch {\n    requests?: {\n        /**\n         * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique.\n         */\n        external_reference?: string;\n        /**\n         * Prompt for the text generation model\n         */\n        prompt?: string;\n        /**\n         * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n         */\n        stream?: boolean;\n        /**\n         * The maximum number of tokens to generate in the response.\n         */\n        max_tokens?: number;\n        /**\n         * Controls the randomness of the output; higher values produce more random results.\n         */\n        temperature?: number;\n        /**\n         * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n         */\n        top_p?: number;\n        /**\n         * Random seed for reproducibility of the generation.\n         */\n        seed?: number;\n        /**\n         * Penalty for repeated tokens; higher values discourage repetition.\n         */\n        repetition_penalty?: number;\n        /**\n         * Decreases the likelihood of the model repeating the same lines verbatim.\n         */\n        frequency_penalty?: number;\n        /**\n         * Increases the likelihood of the model introducing new topics.\n         */\n        presence_penalty?: number;\n        response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2;\n    }[];\n}\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ntype Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output =\n    | {\n          /**\n           * The generated text response from the model\n           */\n          response: string;\n          /**\n           * Usage statistics for the inference request\n           */\n          usage?: {\n              /**\n               * Total number of tokens in input\n               */\n              prompt_tokens?: number;\n              /**\n               * Total number of tokens in output\n               */\n              completion_tokens?: number;\n              /**\n               * Total number of input and output tokens\n               */\n              total_tokens?: number;\n          };\n          /**\n           * An array of tool calls requests made during the response generation\n           */\n          tool_calls?: {\n              /**\n               * The arguments passed to be passed to the tool call request\n               */\n              arguments?: object;\n              /**\n               * The name of the tool to be called\n               */\n              name?: string;\n          }[];\n      }\n    | string\n    | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse;\ninterface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast {\n    inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input;\n    postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output;\n}\ninterface Ai_Cf_Meta_Llama_Guard_3_8B_Input {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender must alternate between 'user' and 'assistant'.\n         */\n        role: \"user\" | \"assistant\";\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Dictate the output format of the generated response.\n     */\n    response_format?: {\n        /**\n         * Set to json_object to process and output generated text as JSON.\n         */\n        type?: string;\n    };\n}\ninterface Ai_Cf_Meta_Llama_Guard_3_8B_Output {\n    response?:\n        | string\n        | {\n              /**\n               * Whether the conversation is safe or not.\n               */\n              safe?: boolean;\n              /**\n               * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe.\n               */\n              categories?: string[];\n          };\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n}\ndeclare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B {\n    inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input;\n    postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output;\n}\ninterface Ai_Cf_Baai_Bge_Reranker_Base_Input {\n    /**\n     * A query you wish to perform against the provided contexts.\n     */\n    /**\n     * Number of returned results starting with the best score.\n     */\n    top_k?: number;\n    /**\n     * List of provided contexts. Note that the index in this array is important, as the response will refer to it.\n     */\n    contexts: {\n        /**\n         * One of the provided context content\n         */\n        text?: string;\n    }[];\n}\ninterface Ai_Cf_Baai_Bge_Reranker_Base_Output {\n    response?: {\n        /**\n         * Index of the context in the request\n         */\n        id?: number;\n        /**\n         * Score of the context under the index.\n         */\n        score?: number;\n    }[];\n}\ndeclare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base {\n    inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input;\n    postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output;\n}\ntype Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input =\n    | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt\n    | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages;\ninterface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ntype Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response: string;\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The arguments passed to be passed to the tool call request\n         */\n        arguments?: object;\n        /**\n         * The name of the tool to be called\n         */\n        name?: string;\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct {\n    inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input;\n    postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output;\n}\ntype Ai_Cf_Qwen_Qwq_32B_Input = Ai_Cf_Qwen_Qwq_32B_Prompt | Ai_Cf_Qwen_Qwq_32B_Messages;\ninterface Ai_Cf_Qwen_Qwq_32B_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * JSON schema that should be fulfilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwq_32B_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        /**\n         * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001\n         */\n        tool_call_id?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[]\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              };\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ntype Ai_Cf_Qwen_Qwq_32B_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response: string;\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The arguments passed to be passed to the tool call request\n         */\n        arguments?: object;\n        /**\n         * The name of the tool to be called\n         */\n        name?: string;\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Qwen_Qwq_32B {\n    inputs: Ai_Cf_Qwen_Qwq_32B_Input;\n    postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output;\n}\ntype Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input =\n    | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt\n    | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages;\ninterface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * JSON schema that should be fulfilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        /**\n         * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001\n         */\n        tool_call_id?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[]\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              };\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ntype Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response: string;\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The arguments passed to be passed to the tool call request\n         */\n        arguments?: object;\n        /**\n         * The name of the tool to be called\n         */\n        name?: string;\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct {\n    inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input;\n    postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output;\n}\ntype Ai_Cf_Google_Gemma_3_12B_It_Input = Ai_Cf_Google_Gemma_3_12B_It_Prompt | Ai_Cf_Google_Gemma_3_12B_It_Messages;\ninterface Ai_Cf_Google_Gemma_3_12B_It_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Google_Gemma_3_12B_It_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[];\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ntype Ai_Cf_Google_Gemma_3_12B_It_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response: string;\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The arguments passed to be passed to the tool call request\n         */\n        arguments?: object;\n        /**\n         * The name of the tool to be called\n         */\n        name?: string;\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It {\n    inputs: Ai_Cf_Google_Gemma_3_12B_It_Input;\n    postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output;\n}\ntype Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input =\n    | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt\n    | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages\n    | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch;\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * JSON schema that should be fulfilled for the response.\n     */\n    guided_json?: object;\n    response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        /**\n         * The tool call id. If you don't know what to put here you can fall back to 000000001\n         */\n        tool_call_id?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[]\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              };\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch {\n    requests: (\n        | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner\n        | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner\n    )[];\n}\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * JSON schema that should be fulfilled for the response.\n     */\n    guided_json?: object;\n    response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role?: string;\n        /**\n         * The tool call id. If you don't know what to put here you can fall back to 000000001\n         */\n        tool_call_id?: string;\n        content?:\n            | string\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              }[]\n            | {\n                  /**\n                   * Type of the content provided\n                   */\n                  type?: string;\n                  text?: string;\n                  image_url?: {\n                      /**\n                       * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted\n                       */\n                      url?: string;\n                  };\n              };\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode;\n    /**\n     * JSON schema that should be fufilled for the response.\n     */\n    guided_json?: object;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ntype Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = {\n    /**\n     * The generated text response from the model\n     */\n    response: string;\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * An array of tool calls requests made during the response generation\n     */\n    tool_calls?: {\n        /**\n         * The tool call id.\n         */\n        id?: string;\n        /**\n         * Specifies the type of tool (e.g., 'function').\n         */\n        type?: string;\n        /**\n         * Details of the function tool.\n         */\n        function?: {\n            /**\n             * The name of the tool to be called\n             */\n            name?: string;\n            /**\n             * The arguments passed to be passed to the tool call request\n             */\n            arguments?: object;\n        };\n    }[];\n};\ndeclare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct {\n    inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input;\n    postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output;\n}\ntype Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input =\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch;\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch {\n    requests: (Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1)[];\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1 {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ntype Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output =\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response\n    | string\n    | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse;\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response {\n    /**\n     * Unique identifier for the completion\n     */\n    id?: string;\n    /**\n     * Object type identifier\n     */\n    object?: \"chat.completion\";\n    /**\n     * Unix timestamp of when the completion was created\n     */\n    created?: number;\n    /**\n     * Model used for the completion\n     */\n    model?: string;\n    /**\n     * List of completion choices\n     */\n    choices?: {\n        /**\n         * Index of the choice in the list\n         */\n        index?: number;\n        /**\n         * The message generated by the model\n         */\n        message?: {\n            /**\n             * Role of the message author\n             */\n            role: string;\n            /**\n             * The content of the message\n             */\n            content: string;\n            /**\n             * Internal reasoning content (if available)\n             */\n            reasoning_content?: string;\n            /**\n             * Tool calls made by the assistant\n             */\n            tool_calls?: {\n                /**\n                 * Unique identifier for the tool call\n                 */\n                id: string;\n                /**\n                 * Type of tool call\n                 */\n                type: \"function\";\n                function: {\n                    /**\n                     * Name of the function to call\n                     */\n                    name: string;\n                    /**\n                     * JSON string of arguments for the function\n                     */\n                    arguments: string;\n                };\n            }[];\n        };\n        /**\n         * Reason why the model stopped generating\n         */\n        finish_reason?: string;\n        /**\n         * Stop reason (may be null)\n         */\n        stop_reason?: string | null;\n        /**\n         * Log probabilities (if requested)\n         */\n        logprobs?: {} | null;\n    }[];\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * Log probabilities for the prompt (if requested)\n     */\n    prompt_logprobs?: {} | null;\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response {\n    /**\n     * Unique identifier for the completion\n     */\n    id?: string;\n    /**\n     * Object type identifier\n     */\n    object?: \"text_completion\";\n    /**\n     * Unix timestamp of when the completion was created\n     */\n    created?: number;\n    /**\n     * Model used for the completion\n     */\n    model?: string;\n    /**\n     * List of completion choices\n     */\n    choices?: {\n        /**\n         * Index of the choice in the list\n         */\n        index: number;\n        /**\n         * The generated text completion\n         */\n        text: string;\n        /**\n         * Reason why the model stopped generating\n         */\n        finish_reason: string;\n        /**\n         * Stop reason (may be null)\n         */\n        stop_reason?: string | null;\n        /**\n         * Log probabilities (if requested)\n         */\n        logprobs?: {} | null;\n        /**\n         * Log probabilities for the prompt (if requested)\n         */\n        prompt_logprobs?: {} | null;\n    }[];\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n}\ninterface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8 {\n    inputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input;\n    postProcessedOutputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output;\n}\ninterface Ai_Cf_Deepgram_Nova_3_Input {\n    audio: {\n        body: object;\n        contentType: string;\n    };\n    /**\n     * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param.\n     */\n    custom_topic_mode?: \"extended\" | \"strict\";\n    /**\n     * Custom topics you want the model to detect within your input audio or text if present Submit up to 100\n     */\n    custom_topic?: string;\n    /**\n     * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param\n     */\n    custom_intent_mode?: \"extended\" | \"strict\";\n    /**\n     * Custom intents you want the model to detect within your input audio if present\n     */\n    custom_intent?: string;\n    /**\n     * Identifies and extracts key entities from content in submitted audio\n     */\n    detect_entities?: boolean;\n    /**\n     * Identifies the dominant language spoken in submitted audio\n     */\n    detect_language?: boolean;\n    /**\n     * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0\n     */\n    diarize?: boolean;\n    /**\n     * Identify and extract key entities from content in submitted audio\n     */\n    dictation?: boolean;\n    /**\n     * Specify the expected encoding of your submitted audio\n     */\n    encoding?: \"linear16\" | \"flac\" | \"mulaw\" | \"amr-nb\" | \"amr-wb\" | \"opus\" | \"speex\" | \"g729\";\n    /**\n     * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing\n     */\n    extra?: string;\n    /**\n     * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um'\n     */\n    filler_words?: boolean;\n    /**\n     * Key term prompting can boost or suppress specialized terminology and brands.\n     */\n    keyterm?: string;\n    /**\n     * Keywords can boost or suppress specialized terminology and brands.\n     */\n    keywords?: string;\n    /**\n     * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available.\n     */\n    language?: string;\n    /**\n     * Spoken measurements will be converted to their corresponding abbreviations.\n     */\n    measurements?: boolean;\n    /**\n     * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip.\n     */\n    mip_opt_out?: boolean;\n    /**\n     * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio\n     */\n    mode?: \"general\" | \"medical\" | \"finance\";\n    /**\n     * Transcribe each audio channel independently.\n     */\n    multichannel?: boolean;\n    /**\n     * Numerals converts numbers from written format to numerical format.\n     */\n    numerals?: boolean;\n    /**\n     * Splits audio into paragraphs to improve transcript readability.\n     */\n    paragraphs?: boolean;\n    /**\n     * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely.\n     */\n    profanity_filter?: boolean;\n    /**\n     * Add punctuation and capitalization to the transcript.\n     */\n    punctuate?: boolean;\n    /**\n     * Redaction removes sensitive information from your transcripts.\n     */\n    redact?: string;\n    /**\n     * Search for terms or phrases in submitted audio and replaces them.\n     */\n    replace?: string;\n    /**\n     * Search for terms or phrases in submitted audio.\n     */\n    search?: string;\n    /**\n     * Recognizes the sentiment throughout a transcript or text.\n     */\n    sentiment?: boolean;\n    /**\n     * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability.\n     */\n    smart_format?: boolean;\n    /**\n     * Detect topics throughout a transcript or text.\n     */\n    topics?: boolean;\n    /**\n     * Segments speech into meaningful semantic units.\n     */\n    utterances?: boolean;\n    /**\n     * Seconds to wait before detecting a pause between words in submitted audio.\n     */\n    utt_split?: number;\n    /**\n     * The number of channels in the submitted audio\n     */\n    channels?: number;\n    /**\n     * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets.\n     */\n    interim_results?: boolean;\n    /**\n     * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing\n     */\n    endpointing?: string;\n    /**\n     * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets.\n     */\n    vad_events?: boolean;\n    /**\n     * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets.\n     */\n    utterance_end_ms?: boolean;\n}\ninterface Ai_Cf_Deepgram_Nova_3_Output {\n    results?: {\n        channels?: {\n            alternatives?: {\n                confidence?: number;\n                transcript?: string;\n                words?: {\n                    confidence?: number;\n                    end?: number;\n                    start?: number;\n                    word?: string;\n                }[];\n            }[];\n        }[];\n        summary?: {\n            result?: string;\n            short?: string;\n        };\n        sentiments?: {\n            segments?: {\n                text?: string;\n                start_word?: number;\n                end_word?: number;\n                sentiment?: string;\n                sentiment_score?: number;\n            }[];\n            average?: {\n                sentiment?: string;\n                sentiment_score?: number;\n            };\n        };\n    };\n}\ndeclare abstract class Base_Ai_Cf_Deepgram_Nova_3 {\n    inputs: Ai_Cf_Deepgram_Nova_3_Input;\n    postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output;\n}\ninterface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input {\n    queries?: string | string[];\n    /**\n     * Optional instruction for the task\n     */\n    instruction?: string;\n    documents?: string | string[];\n    text?: string | string[];\n}\ninterface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output {\n    data?: number[][];\n    shape?: number[];\n}\ndeclare abstract class Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B {\n    inputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input;\n    postProcessedOutputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output;\n}\ntype Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input =\n    | {\n          /**\n           * readable stream with audio data and content-type specified for that data\n           */\n          audio: {\n              body: object;\n              contentType: string;\n          };\n          /**\n           * type of data PCM data that's sent to the inference server as raw array\n           */\n          dtype?: \"uint8\" | \"float32\" | \"float64\";\n      }\n    | {\n          /**\n           * base64 encoded audio data\n           */\n          audio: string;\n          /**\n           * type of data PCM data that's sent to the inference server as raw array\n           */\n          dtype?: \"uint8\" | \"float32\" | \"float64\";\n      };\ninterface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output {\n    /**\n     * if true, end-of-turn was detected\n     */\n    is_complete?: boolean;\n    /**\n     * probability of the end-of-turn detection\n     */\n    probability?: number;\n}\ndeclare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 {\n    inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input;\n    postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output;\n}\ndeclare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B {\n    inputs: ResponsesInput;\n    postProcessedOutputs: ResponsesOutput;\n}\ndeclare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B {\n    inputs: ResponsesInput;\n    postProcessedOutputs: ResponsesOutput;\n}\ninterface Ai_Cf_Leonardo_Phoenix_1_0_Input {\n    /**\n     * A text description of the image you want to generate.\n     */\n    prompt: string;\n    /**\n     * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt\n     */\n    guidance?: number;\n    /**\n     * Random seed for reproducibility of the image generation\n     */\n    seed?: number;\n    /**\n     * The height of the generated image in pixels\n     */\n    height?: number;\n    /**\n     * The width of the generated image in pixels\n     */\n    width?: number;\n    /**\n     * The number of diffusion steps; higher values can improve quality but take longer\n     */\n    num_steps?: number;\n    /**\n     * Specify what to exclude from the generated images\n     */\n    negative_prompt?: string;\n}\n/**\n * The generated image in JPEG format\n */\ntype Ai_Cf_Leonardo_Phoenix_1_0_Output = string;\ndeclare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 {\n    inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input;\n    postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output;\n}\ninterface Ai_Cf_Leonardo_Lucid_Origin_Input {\n    /**\n     * A text description of the image you want to generate.\n     */\n    prompt: string;\n    /**\n     * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt\n     */\n    guidance?: number;\n    /**\n     * Random seed for reproducibility of the image generation\n     */\n    seed?: number;\n    /**\n     * The height of the generated image in pixels\n     */\n    height?: number;\n    /**\n     * The width of the generated image in pixels\n     */\n    width?: number;\n    /**\n     * The number of diffusion steps; higher values can improve quality but take longer\n     */\n    num_steps?: number;\n    /**\n     * The number of diffusion steps; higher values can improve quality but take longer\n     */\n    steps?: number;\n}\ninterface Ai_Cf_Leonardo_Lucid_Origin_Output {\n    /**\n     * The generated image in Base64 format.\n     */\n    image?: string;\n}\ndeclare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin {\n    inputs: Ai_Cf_Leonardo_Lucid_Origin_Input;\n    postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output;\n}\ninterface Ai_Cf_Deepgram_Aura_1_Input {\n    /**\n     * Speaker used to produce the audio.\n     */\n    speaker?:\n        | \"angus\"\n        | \"asteria\"\n        | \"arcas\"\n        | \"orion\"\n        | \"orpheus\"\n        | \"athena\"\n        | \"luna\"\n        | \"zeus\"\n        | \"perseus\"\n        | \"helios\"\n        | \"hera\"\n        | \"stella\";\n    /**\n     * Encoding of the output audio.\n     */\n    encoding?: \"linear16\" | \"flac\" | \"mulaw\" | \"alaw\" | \"mp3\" | \"opus\" | \"aac\";\n    /**\n     * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type..\n     */\n    container?: \"none\" | \"wav\" | \"ogg\";\n    /**\n     * The text content to be converted to speech\n     */\n    text: string;\n    /**\n     * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable\n     */\n    sample_rate?: number;\n    /**\n     * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type.\n     */\n    bit_rate?: number;\n}\n/**\n * The generated audio in MP3 format\n */\ntype Ai_Cf_Deepgram_Aura_1_Output = string;\ndeclare abstract class Base_Ai_Cf_Deepgram_Aura_1 {\n    inputs: Ai_Cf_Deepgram_Aura_1_Input;\n    postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output;\n}\ninterface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input {\n    /**\n     * Input text to translate. Can be a single string or a list of strings.\n     */\n    text: string | string[];\n    /**\n     * Target langauge to translate to\n     */\n    target_language:\n        | \"asm_Beng\"\n        | \"awa_Deva\"\n        | \"ben_Beng\"\n        | \"bho_Deva\"\n        | \"brx_Deva\"\n        | \"doi_Deva\"\n        | \"eng_Latn\"\n        | \"gom_Deva\"\n        | \"gon_Deva\"\n        | \"guj_Gujr\"\n        | \"hin_Deva\"\n        | \"hne_Deva\"\n        | \"kan_Knda\"\n        | \"kas_Arab\"\n        | \"kas_Deva\"\n        | \"kha_Latn\"\n        | \"lus_Latn\"\n        | \"mag_Deva\"\n        | \"mai_Deva\"\n        | \"mal_Mlym\"\n        | \"mar_Deva\"\n        | \"mni_Beng\"\n        | \"mni_Mtei\"\n        | \"npi_Deva\"\n        | \"ory_Orya\"\n        | \"pan_Guru\"\n        | \"san_Deva\"\n        | \"sat_Olck\"\n        | \"snd_Arab\"\n        | \"snd_Deva\"\n        | \"tam_Taml\"\n        | \"tel_Telu\"\n        | \"urd_Arab\"\n        | \"unr_Deva\";\n}\ninterface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output {\n    /**\n     * Translated texts\n     */\n    translations: string[];\n}\ndeclare abstract class Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B {\n    inputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input;\n    postProcessedOutputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output;\n}\ntype Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input =\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch;\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch {\n    requests: (\n        | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1\n        | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1\n    )[];\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 {\n    /**\n     * The input text prompt for the model to generate a response.\n     */\n    prompt: string;\n    /**\n     * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model.\n     */\n    lora?: string;\n    response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1 {\n    /**\n     * An array of message objects representing the conversation history.\n     */\n    messages: {\n        /**\n         * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool').\n         */\n        role: string;\n        /**\n         * The content of the message as a string.\n         */\n        content: string;\n    }[];\n    functions?: {\n        name: string;\n        code: string;\n    }[];\n    /**\n     * A list of tools available for the assistant to use.\n     */\n    tools?: (\n        | {\n              /**\n               * The name of the tool. More descriptive the better.\n               */\n              name: string;\n              /**\n               * A brief description of what the tool does.\n               */\n              description: string;\n              /**\n               * Schema defining the parameters accepted by the tool.\n               */\n              parameters: {\n                  /**\n                   * The type of the parameters object (usually 'object').\n                   */\n                  type: string;\n                  /**\n                   * List of required parameter names.\n                   */\n                  required?: string[];\n                  /**\n                   * Definitions of each parameter.\n                   */\n                  properties: {\n                      [k: string]: {\n                          /**\n                           * The data type of the parameter.\n                           */\n                          type: string;\n                          /**\n                           * A description of the expected parameter.\n                           */\n                          description: string;\n                      };\n                  };\n              };\n          }\n        | {\n              /**\n               * Specifies the type of tool (e.g., 'function').\n               */\n              type: string;\n              /**\n               * Details of the function tool.\n               */\n              function: {\n                  /**\n                   * The name of the function.\n                   */\n                  name: string;\n                  /**\n                   * A brief description of what the function does.\n                   */\n                  description: string;\n                  /**\n                   * Schema defining the parameters accepted by the function.\n                   */\n                  parameters: {\n                      /**\n                       * The type of the parameters object (usually 'object').\n                       */\n                      type: string;\n                      /**\n                       * List of required parameter names.\n                       */\n                      required?: string[];\n                      /**\n                       * Definitions of each parameter.\n                       */\n                      properties: {\n                          [k: string]: {\n                              /**\n                               * The data type of the parameter.\n                               */\n                              type: string;\n                              /**\n                               * A description of the expected parameter.\n                               */\n                              description: string;\n                          };\n                      };\n                  };\n              };\n          }\n    )[];\n    response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3;\n    /**\n     * If true, a chat template is not applied and you must adhere to the specific model's expected formatting.\n     */\n    raw?: boolean;\n    /**\n     * If true, the response will be streamed back incrementally using SSE, Server Sent Events.\n     */\n    stream?: boolean;\n    /**\n     * The maximum number of tokens to generate in the response.\n     */\n    max_tokens?: number;\n    /**\n     * Controls the randomness of the output; higher values produce more random results.\n     */\n    temperature?: number;\n    /**\n     * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses.\n     */\n    top_p?: number;\n    /**\n     * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises.\n     */\n    top_k?: number;\n    /**\n     * Random seed for reproducibility of the generation.\n     */\n    seed?: number;\n    /**\n     * Penalty for repeated tokens; higher values discourage repetition.\n     */\n    repetition_penalty?: number;\n    /**\n     * Decreases the likelihood of the model repeating the same lines verbatim.\n     */\n    frequency_penalty?: number;\n    /**\n     * Increases the likelihood of the model introducing new topics.\n     */\n    presence_penalty?: number;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3 {\n    type?: \"json_object\" | \"json_schema\";\n    json_schema?: unknown;\n}\ntype Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output =\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response\n    | string\n    | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse;\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response {\n    /**\n     * Unique identifier for the completion\n     */\n    id?: string;\n    /**\n     * Object type identifier\n     */\n    object?: \"chat.completion\";\n    /**\n     * Unix timestamp of when the completion was created\n     */\n    created?: number;\n    /**\n     * Model used for the completion\n     */\n    model?: string;\n    /**\n     * List of completion choices\n     */\n    choices?: {\n        /**\n         * Index of the choice in the list\n         */\n        index?: number;\n        /**\n         * The message generated by the model\n         */\n        message?: {\n            /**\n             * Role of the message author\n             */\n            role: string;\n            /**\n             * The content of the message\n             */\n            content: string;\n            /**\n             * Internal reasoning content (if available)\n             */\n            reasoning_content?: string;\n            /**\n             * Tool calls made by the assistant\n             */\n            tool_calls?: {\n                /**\n                 * Unique identifier for the tool call\n                 */\n                id: string;\n                /**\n                 * Type of tool call\n                 */\n                type: \"function\";\n                function: {\n                    /**\n                     * Name of the function to call\n                     */\n                    name: string;\n                    /**\n                     * JSON string of arguments for the function\n                     */\n                    arguments: string;\n                };\n            }[];\n        };\n        /**\n         * Reason why the model stopped generating\n         */\n        finish_reason?: string;\n        /**\n         * Stop reason (may be null)\n         */\n        stop_reason?: string | null;\n        /**\n         * Log probabilities (if requested)\n         */\n        logprobs?: {} | null;\n    }[];\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n    /**\n     * Log probabilities for the prompt (if requested)\n     */\n    prompt_logprobs?: {} | null;\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response {\n    /**\n     * Unique identifier for the completion\n     */\n    id?: string;\n    /**\n     * Object type identifier\n     */\n    object?: \"text_completion\";\n    /**\n     * Unix timestamp of when the completion was created\n     */\n    created?: number;\n    /**\n     * Model used for the completion\n     */\n    model?: string;\n    /**\n     * List of completion choices\n     */\n    choices?: {\n        /**\n         * Index of the choice in the list\n         */\n        index: number;\n        /**\n         * The generated text completion\n         */\n        text: string;\n        /**\n         * Reason why the model stopped generating\n         */\n        finish_reason: string;\n        /**\n         * Stop reason (may be null)\n         */\n        stop_reason?: string | null;\n        /**\n         * Log probabilities (if requested)\n         */\n        logprobs?: {} | null;\n        /**\n         * Log probabilities for the prompt (if requested)\n         */\n        prompt_logprobs?: {} | null;\n    }[];\n    /**\n     * Usage statistics for the inference request\n     */\n    usage?: {\n        /**\n         * Total number of tokens in input\n         */\n        prompt_tokens?: number;\n        /**\n         * Total number of tokens in output\n         */\n        completion_tokens?: number;\n        /**\n         * Total number of input and output tokens\n         */\n        total_tokens?: number;\n    };\n}\ninterface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse {\n    /**\n     * The async request id that can be used to obtain the results.\n     */\n    request_id?: string;\n}\ndeclare abstract class Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It {\n    inputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input;\n    postProcessedOutputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output;\n}\ninterface Ai_Cf_Pfnet_Plamo_Embedding_1B_Input {\n    /**\n     * Input text to embed. Can be a single string or a list of strings.\n     */\n    text: string | string[];\n}\ninterface Ai_Cf_Pfnet_Plamo_Embedding_1B_Output {\n    /**\n     * Embedding vectors, where each vector is a list of floats.\n     */\n    data: number[][];\n    /**\n     * Shape of the embedding data as [number_of_embeddings, embedding_dimension].\n     *\n     * @minItems 2\n     * @maxItems 2\n     */\n    shape: [number, number];\n}\ndeclare abstract class Base_Ai_Cf_Pfnet_Plamo_Embedding_1B {\n    inputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Input;\n    postProcessedOutputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Output;\n}\ninterface Ai_Cf_Deepgram_Flux_Input {\n    /**\n     * Encoding of the audio stream. Currently only supports raw signed little-endian 16-bit PCM.\n     */\n    encoding: \"linear16\";\n    /**\n     * Sample rate of the audio stream in Hz.\n     */\n    sample_rate: string;\n    /**\n     * End-of-turn confidence required to fire an eager end-of-turn event. When set, enables EagerEndOfTurn and TurnResumed events. Valid Values 0.3 - 0.9.\n     */\n    eager_eot_threshold?: string;\n    /**\n     * End-of-turn confidence required to finish a turn. Valid Values 0.5 - 0.9.\n     */\n    eot_threshold?: string;\n    /**\n     * A turn will be finished when this much time has passed after speech, regardless of EOT confidence.\n     */\n    eot_timeout_ms?: string;\n    /**\n     * Keyterm prompting can improve recognition of specialized terminology. Pass multiple keyterm query parameters to boost multiple keyterms.\n     */\n    keyterm?: string;\n    /**\n     * Opts out requests from the Deepgram Model Improvement Program. Refer to Deepgram Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip\n     */\n    mip_opt_out?: \"true\" | \"false\";\n    /**\n     * Label your requests for the purpose of identification during usage reporting\n     */\n    tag?: string;\n}\n/**\n * Output will be returned as websocket messages.\n */\ninterface Ai_Cf_Deepgram_Flux_Output {\n    /**\n     * The unique identifier of the request (uuid)\n     */\n    request_id?: string;\n    /**\n     * Starts at 0 and increments for each message the server sends to the client.\n     */\n    sequence_id?: number;\n    /**\n     * The type of event being reported.\n     */\n    event?: \"Update\" | \"StartOfTurn\" | \"EagerEndOfTurn\" | \"TurnResumed\" | \"EndOfTurn\";\n    /**\n     * The index of the current turn\n     */\n    turn_index?: number;\n    /**\n     * Start time in seconds of the audio range that was transcribed\n     */\n    audio_window_start?: number;\n    /**\n     * End time in seconds of the audio range that was transcribed\n     */\n    audio_window_end?: number;\n    /**\n     * Text that was said over the course of the current turn\n     */\n    transcript?: string;\n    /**\n     * The words in the transcript\n     */\n    words?: {\n        /**\n         * The individual punctuated, properly-cased word from the transcript\n         */\n        word: string;\n        /**\n         * Confidence that this word was transcribed correctly\n         */\n        confidence: number;\n    }[];\n    /**\n     * Confidence that no more speech is coming in this turn\n     */\n    end_of_turn_confidence?: number;\n}\ndeclare abstract class Base_Ai_Cf_Deepgram_Flux {\n    inputs: Ai_Cf_Deepgram_Flux_Input;\n    postProcessedOutputs: Ai_Cf_Deepgram_Flux_Output;\n}\ninterface Ai_Cf_Deepgram_Aura_2_En_Input {\n    /**\n     * Speaker used to produce the audio.\n     */\n    speaker?:\n        | \"amalthea\"\n        | \"andromeda\"\n        | \"apollo\"\n        | \"arcas\"\n        | \"aries\"\n        | \"asteria\"\n        | \"athena\"\n        | \"atlas\"\n        | \"aurora\"\n        | \"callista\"\n        | \"cora\"\n        | \"cordelia\"\n        | \"delia\"\n        | \"draco\"\n        | \"electra\"\n        | \"harmonia\"\n        | \"helena\"\n        | \"hera\"\n        | \"hermes\"\n        | \"hyperion\"\n        | \"iris\"\n        | \"janus\"\n        | \"juno\"\n        | \"jupiter\"\n        | \"luna\"\n        | \"mars\"\n        | \"minerva\"\n        | \"neptune\"\n        | \"odysseus\"\n        | \"ophelia\"\n        | \"orion\"\n        | \"orpheus\"\n        | \"pandora\"\n        | \"phoebe\"\n        | \"pluto\"\n        | \"saturn\"\n        | \"thalia\"\n        | \"theia\"\n        | \"vesta\"\n        | \"zeus\";\n    /**\n     * Encoding of the output audio.\n     */\n    encoding?: \"linear16\" | \"flac\" | \"mulaw\" | \"alaw\" | \"mp3\" | \"opus\" | \"aac\";\n    /**\n     * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type..\n     */\n    container?: \"none\" | \"wav\" | \"ogg\";\n    /**\n     * The text content to be converted to speech\n     */\n    text: string;\n    /**\n     * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable\n     */\n    sample_rate?: number;\n    /**\n     * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type.\n     */\n    bit_rate?: number;\n}\n/**\n * The generated audio in MP3 format\n */\ntype Ai_Cf_Deepgram_Aura_2_En_Output = string;\ndeclare abstract class Base_Ai_Cf_Deepgram_Aura_2_En {\n    inputs: Ai_Cf_Deepgram_Aura_2_En_Input;\n    postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_En_Output;\n}\ninterface Ai_Cf_Deepgram_Aura_2_Es_Input {\n    /**\n     * Speaker used to produce the audio.\n     */\n    speaker?:\n        | \"sirio\"\n        | \"nestor\"\n        | \"carina\"\n        | \"celeste\"\n        | \"alvaro\"\n        | \"diana\"\n        | \"aquila\"\n        | \"selena\"\n        | \"estrella\"\n        | \"javier\";\n    /**\n     * Encoding of the output audio.\n     */\n    encoding?: \"linear16\" | \"flac\" | \"mulaw\" | \"alaw\" | \"mp3\" | \"opus\" | \"aac\";\n    /**\n     * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type..\n     */\n    container?: \"none\" | \"wav\" | \"ogg\";\n    /**\n     * The text content to be converted to speech\n     */\n    text: string;\n    /**\n     * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable\n     */\n    sample_rate?: number;\n    /**\n     * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type.\n     */\n    bit_rate?: number;\n}\n/**\n * The generated audio in MP3 format\n */\ntype Ai_Cf_Deepgram_Aura_2_Es_Output = string;\ndeclare abstract class Base_Ai_Cf_Deepgram_Aura_2_Es {\n    inputs: Ai_Cf_Deepgram_Aura_2_Es_Input;\n    postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_Es_Output;\n}\ninterface AiModels {\n    \"@cf/huggingface/distilbert-sst-2-int8\": BaseAiTextClassification;\n    \"@cf/stabilityai/stable-diffusion-xl-base-1.0\": BaseAiTextToImage;\n    \"@cf/runwayml/stable-diffusion-v1-5-inpainting\": BaseAiTextToImage;\n    \"@cf/runwayml/stable-diffusion-v1-5-img2img\": BaseAiTextToImage;\n    \"@cf/lykon/dreamshaper-8-lcm\": BaseAiTextToImage;\n    \"@cf/bytedance/stable-diffusion-xl-lightning\": BaseAiTextToImage;\n    \"@cf/myshell-ai/melotts\": BaseAiTextToSpeech;\n    \"@cf/google/embeddinggemma-300m\": BaseAiTextEmbeddings;\n    \"@cf/microsoft/resnet-50\": BaseAiImageClassification;\n    \"@cf/meta/llama-2-7b-chat-int8\": BaseAiTextGeneration;\n    \"@cf/mistral/mistral-7b-instruct-v0.1\": BaseAiTextGeneration;\n    \"@cf/meta/llama-2-7b-chat-fp16\": BaseAiTextGeneration;\n    \"@hf/thebloke/llama-2-13b-chat-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/mistral-7b-instruct-v0.1-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/zephyr-7b-beta-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/openhermes-2.5-mistral-7b-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/neural-chat-7b-v3-1-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/llamaguard-7b-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/deepseek-coder-6.7b-base-awq\": BaseAiTextGeneration;\n    \"@hf/thebloke/deepseek-coder-6.7b-instruct-awq\": BaseAiTextGeneration;\n    \"@cf/deepseek-ai/deepseek-math-7b-instruct\": BaseAiTextGeneration;\n    \"@cf/defog/sqlcoder-7b-2\": BaseAiTextGeneration;\n    \"@cf/openchat/openchat-3.5-0106\": BaseAiTextGeneration;\n    \"@cf/tiiuae/falcon-7b-instruct\": BaseAiTextGeneration;\n    \"@cf/thebloke/discolm-german-7b-v1-awq\": BaseAiTextGeneration;\n    \"@cf/qwen/qwen1.5-0.5b-chat\": BaseAiTextGeneration;\n    \"@cf/qwen/qwen1.5-7b-chat-awq\": BaseAiTextGeneration;\n    \"@cf/qwen/qwen1.5-14b-chat-awq\": BaseAiTextGeneration;\n    \"@cf/tinyllama/tinyllama-1.1b-chat-v1.0\": BaseAiTextGeneration;\n    \"@cf/microsoft/phi-2\": BaseAiTextGeneration;\n    \"@cf/qwen/qwen1.5-1.8b-chat\": BaseAiTextGeneration;\n    \"@cf/mistral/mistral-7b-instruct-v0.2-lora\": BaseAiTextGeneration;\n    \"@hf/nousresearch/hermes-2-pro-mistral-7b\": BaseAiTextGeneration;\n    \"@hf/nexusflow/starling-lm-7b-beta\": BaseAiTextGeneration;\n    \"@hf/google/gemma-7b-it\": BaseAiTextGeneration;\n    \"@cf/meta-llama/llama-2-7b-chat-hf-lora\": BaseAiTextGeneration;\n    \"@cf/google/gemma-2b-it-lora\": BaseAiTextGeneration;\n    \"@cf/google/gemma-7b-it-lora\": BaseAiTextGeneration;\n    \"@hf/mistral/mistral-7b-instruct-v0.2\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3-8b-instruct\": BaseAiTextGeneration;\n    \"@cf/fblgit/una-cybertron-7b-v2-bf16\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3-8b-instruct-awq\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3.1-8b-instruct-fp8\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3.1-8b-instruct-awq\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3.2-3b-instruct\": BaseAiTextGeneration;\n    \"@cf/meta/llama-3.2-1b-instruct\": BaseAiTextGeneration;\n    \"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b\": BaseAiTextGeneration;\n    \"@cf/ibm-granite/granite-4.0-h-micro\": BaseAiTextGeneration;\n    \"@cf/facebook/bart-large-cnn\": BaseAiSummarization;\n    \"@cf/llava-hf/llava-1.5-7b-hf\": BaseAiImageToText;\n    \"@cf/baai/bge-base-en-v1.5\": Base_Ai_Cf_Baai_Bge_Base_En_V1_5;\n    \"@cf/openai/whisper\": Base_Ai_Cf_Openai_Whisper;\n    \"@cf/meta/m2m100-1.2b\": Base_Ai_Cf_Meta_M2M100_1_2B;\n    \"@cf/baai/bge-small-en-v1.5\": Base_Ai_Cf_Baai_Bge_Small_En_V1_5;\n    \"@cf/baai/bge-large-en-v1.5\": Base_Ai_Cf_Baai_Bge_Large_En_V1_5;\n    \"@cf/unum/uform-gen2-qwen-500m\": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M;\n    \"@cf/openai/whisper-tiny-en\": Base_Ai_Cf_Openai_Whisper_Tiny_En;\n    \"@cf/openai/whisper-large-v3-turbo\": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo;\n    \"@cf/baai/bge-m3\": Base_Ai_Cf_Baai_Bge_M3;\n    \"@cf/black-forest-labs/flux-1-schnell\": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell;\n    \"@cf/meta/llama-3.2-11b-vision-instruct\": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct;\n    \"@cf/meta/llama-3.3-70b-instruct-fp8-fast\": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast;\n    \"@cf/meta/llama-guard-3-8b\": Base_Ai_Cf_Meta_Llama_Guard_3_8B;\n    \"@cf/baai/bge-reranker-base\": Base_Ai_Cf_Baai_Bge_Reranker_Base;\n    \"@cf/qwen/qwen2.5-coder-32b-instruct\": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct;\n    \"@cf/qwen/qwq-32b\": Base_Ai_Cf_Qwen_Qwq_32B;\n    \"@cf/mistralai/mistral-small-3.1-24b-instruct\": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct;\n    \"@cf/google/gemma-3-12b-it\": Base_Ai_Cf_Google_Gemma_3_12B_It;\n    \"@cf/meta/llama-4-scout-17b-16e-instruct\": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct;\n    \"@cf/qwen/qwen3-30b-a3b-fp8\": Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8;\n    \"@cf/deepgram/nova-3\": Base_Ai_Cf_Deepgram_Nova_3;\n    \"@cf/qwen/qwen3-embedding-0.6b\": Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B;\n    \"@cf/pipecat-ai/smart-turn-v2\": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2;\n    \"@cf/openai/gpt-oss-120b\": Base_Ai_Cf_Openai_Gpt_Oss_120B;\n    \"@cf/openai/gpt-oss-20b\": Base_Ai_Cf_Openai_Gpt_Oss_20B;\n    \"@cf/leonardo/phoenix-1.0\": Base_Ai_Cf_Leonardo_Phoenix_1_0;\n    \"@cf/leonardo/lucid-origin\": Base_Ai_Cf_Leonardo_Lucid_Origin;\n    \"@cf/deepgram/aura-1\": Base_Ai_Cf_Deepgram_Aura_1;\n    \"@cf/ai4bharat/indictrans2-en-indic-1B\": Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B;\n    \"@cf/aisingapore/gemma-sea-lion-v4-27b-it\": Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It;\n    \"@cf/pfnet/plamo-embedding-1b\": Base_Ai_Cf_Pfnet_Plamo_Embedding_1B;\n    \"@cf/deepgram/flux\": Base_Ai_Cf_Deepgram_Flux;\n    \"@cf/deepgram/aura-2-en\": Base_Ai_Cf_Deepgram_Aura_2_En;\n    \"@cf/deepgram/aura-2-es\": Base_Ai_Cf_Deepgram_Aura_2_Es;\n}\ntype AiOptions = {\n    /**\n     * Send requests as an asynchronous batch job, only works for supported models\n     * https://developers.cloudflare.com/workers-ai/features/batch-api\n     */\n    queueRequest?: boolean;\n    /**\n     * Establish websocket connections, only works for supported models\n     */\n    websocket?: boolean;\n    /**\n     * Tag your requests to group and view them in Cloudflare dashboard.\n     *\n     * Rules:\n     * Tags must only contain letters, numbers, and the symbols: : - . / @\n     * Each tag can have maximum 50 characters.\n     * Maximum 5 tags are allowed each request.\n     * Duplicate tags will removed.\n     */\n    tags?: string[];\n    gateway?: GatewayOptions;\n    returnRawResponse?: boolean;\n    prefix?: string;\n    extraHeaders?: object;\n};\ntype AiModelsSearchParams = {\n    author?: string;\n    hide_experimental?: boolean;\n    page?: number;\n    per_page?: number;\n    search?: string;\n    source?: number;\n    task?: string;\n};\ntype AiModelsSearchObject = {\n    id: string;\n    source: number;\n    name: string;\n    description: string;\n    task: {\n        id: string;\n        name: string;\n        description: string;\n    };\n    tags: string[];\n    properties: {\n        property_id: string;\n        value: string;\n    }[];\n};\ninterface InferenceUpstreamError extends Error {}\ninterface AiInternalError extends Error {}\ntype AiModelListType = Record<string, any>;\ndeclare abstract class Ai<AiModelList extends AiModelListType = AiModels> {\n    aiGatewayLogId: string | null;\n    gateway(gatewayId: string): AiGateway;\n    autorag(autoragId: string): AutoRAG;\n    run<Name extends keyof AiModelList, Options extends AiOptions, InputOptions extends AiModelList[Name][\"inputs\"]>(\n        model: Name,\n        inputs: InputOptions,\n        options?: Options,\n    ): Promise<\n        Options extends\n            | {\n                  returnRawResponse: true;\n              }\n            | {\n                  websocket: true;\n              }\n            ? Response\n            : InputOptions extends {\n                    stream: true;\n                }\n              ? ReadableStream\n              : AiModelList[Name][\"postProcessedOutputs\"]\n    >;\n    models(params?: AiModelsSearchParams): Promise<AiModelsSearchObject[]>;\n    toMarkdown(): ToMarkdownService;\n    toMarkdown(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise<ConversionResponse[]>;\n    toMarkdown(files: MarkdownDocument, options?: ConversionRequestOptions): Promise<ConversionResponse>;\n}\ntype GatewayRetries = {\n    maxAttempts?: 1 | 2 | 3 | 4 | 5;\n    retryDelayMs?: number;\n    backoff?: \"constant\" | \"linear\" | \"exponential\";\n};\ntype GatewayOptions = {\n    id: string;\n    cacheKey?: string;\n    cacheTtl?: number;\n    skipCache?: boolean;\n    metadata?: Record<string, number | string | boolean | null | bigint>;\n    collectLog?: boolean;\n    eventId?: string;\n    requestTimeoutMs?: number;\n    retries?: GatewayRetries;\n};\ntype UniversalGatewayOptions = Exclude<GatewayOptions, \"id\"> & {\n    /**\n     ** @deprecated\n     */\n    id?: string;\n};\ntype AiGatewayPatchLog = {\n    score?: number | null;\n    feedback?: -1 | 1 | null;\n    metadata?: Record<string, number | string | boolean | null | bigint> | null;\n};\ntype AiGatewayLog = {\n    id: string;\n    provider: string;\n    model: string;\n    model_type?: string;\n    path: string;\n    duration: number;\n    request_type?: string;\n    request_content_type?: string;\n    status_code: number;\n    response_content_type?: string;\n    success: boolean;\n    cached: boolean;\n    tokens_in?: number;\n    tokens_out?: number;\n    metadata?: Record<string, number | string | boolean | null | bigint>;\n    step?: number;\n    cost?: number;\n    custom_cost?: boolean;\n    request_size: number;\n    request_head?: string;\n    request_head_complete: boolean;\n    response_size: number;\n    response_head?: string;\n    response_head_complete: boolean;\n    created_at: Date;\n};\ntype AIGatewayProviders =\n    | \"workers-ai\"\n    | \"anthropic\"\n    | \"aws-bedrock\"\n    | \"azure-openai\"\n    | \"google-vertex-ai\"\n    | \"huggingface\"\n    | \"openai\"\n    | \"perplexity-ai\"\n    | \"replicate\"\n    | \"groq\"\n    | \"cohere\"\n    | \"google-ai-studio\"\n    | \"mistral\"\n    | \"grok\"\n    | \"openrouter\"\n    | \"deepseek\"\n    | \"cerebras\"\n    | \"cartesia\"\n    | \"elevenlabs\"\n    | \"adobe-firefly\";\ntype AIGatewayHeaders = {\n    \"cf-aig-metadata\": Record<string, number | string | boolean | null | bigint> | string;\n    \"cf-aig-custom-cost\":\n        | {\n              per_token_in?: number;\n              per_token_out?: number;\n          }\n        | {\n              total_cost?: number;\n          }\n        | string;\n    \"cf-aig-cache-ttl\": number | string;\n    \"cf-aig-skip-cache\": boolean | string;\n    \"cf-aig-cache-key\": string;\n    \"cf-aig-event-id\": string;\n    \"cf-aig-request-timeout\": number | string;\n    \"cf-aig-max-attempts\": number | string;\n    \"cf-aig-retry-delay\": number | string;\n    \"cf-aig-backoff\": string;\n    \"cf-aig-collect-log\": boolean | string;\n    Authorization: string;\n    \"Content-Type\": string;\n    [key: string]: string | number | boolean | object;\n};\ntype AIGatewayUniversalRequest = {\n    provider: AIGatewayProviders | string; // eslint-disable-line\n    endpoint: string;\n    headers: Partial<AIGatewayHeaders>;\n    query: unknown;\n};\ninterface AiGatewayInternalError extends Error {}\ninterface AiGatewayLogNotFound extends Error {}\ndeclare abstract class AiGateway {\n    patchLog(logId: string, data: AiGatewayPatchLog): Promise<void>;\n    getLog(logId: string): Promise<AiGatewayLog>;\n    run(\n        data: AIGatewayUniversalRequest | AIGatewayUniversalRequest[],\n        options?: {\n            gateway?: UniversalGatewayOptions;\n            extraHeaders?: object;\n        },\n    ): Promise<Response>;\n    getUrl(provider?: AIGatewayProviders | string): Promise<string>; // eslint-disable-line\n}\ninterface AutoRAGInternalError extends Error {}\ninterface AutoRAGNotFoundError extends Error {}\ninterface AutoRAGUnauthorizedError extends Error {}\ninterface AutoRAGNameNotSetError extends Error {}\ntype ComparisonFilter = {\n    key: string;\n    type: \"eq\" | \"ne\" | \"gt\" | \"gte\" | \"lt\" | \"lte\";\n    value: string | number | boolean;\n};\ntype CompoundFilter = {\n    type: \"and\" | \"or\";\n    filters: ComparisonFilter[];\n};\ntype AutoRagSearchRequest = {\n    query: string;\n    filters?: CompoundFilter | ComparisonFilter;\n    max_num_results?: number;\n    ranking_options?: {\n        ranker?: string;\n        score_threshold?: number;\n    };\n    reranking?: {\n        enabled?: boolean;\n        model?: string;\n    };\n    rewrite_query?: boolean;\n};\ntype AutoRagAiSearchRequest = AutoRagSearchRequest & {\n    stream?: boolean;\n    system_prompt?: string;\n};\ntype AutoRagAiSearchRequestStreaming = Omit<AutoRagAiSearchRequest, \"stream\"> & {\n    stream: true;\n};\ntype AutoRagSearchResponse = {\n    object: \"vector_store.search_results.page\";\n    search_query: string;\n    data: {\n        file_id: string;\n        filename: string;\n        score: number;\n        attributes: Record<string, string | number | boolean | null>;\n        content: {\n            type: \"text\";\n            text: string;\n        }[];\n    }[];\n    has_more: boolean;\n    next_page: string | null;\n};\ntype AutoRagListResponse = {\n    id: string;\n    enable: boolean;\n    type: string;\n    source: string;\n    vectorize_name: string;\n    paused: boolean;\n    status: string;\n}[];\ntype AutoRagAiSearchResponse = AutoRagSearchResponse & {\n    response: string;\n};\ndeclare abstract class AutoRAG {\n    list(): Promise<AutoRagListResponse>;\n    search(params: AutoRagSearchRequest): Promise<AutoRagSearchResponse>;\n    aiSearch(params: AutoRagAiSearchRequestStreaming): Promise<Response>;\n    aiSearch(params: AutoRagAiSearchRequest): Promise<AutoRagAiSearchResponse>;\n    aiSearch(params: AutoRagAiSearchRequest): Promise<AutoRagAiSearchResponse | Response>;\n}\ninterface BasicImageTransformations {\n    /**\n     * Maximum width in image pixels. The value must be an integer.\n     */\n    width?: number;\n    /**\n     * Maximum height in image pixels. The value must be an integer.\n     */\n    height?: number;\n    /**\n     * Resizing mode as a string. It affects interpretation of width and height\n     * options:\n     *  - scale-down: Similar to contain, but the image is never enlarged. If\n     *    the image is larger than given width or height, it will be resized.\n     *    Otherwise its original size will be kept.\n     *  - contain: Resizes to maximum size that fits within the given width and\n     *    height. If only a single dimension is given (e.g. only width), the\n     *    image will be shrunk or enlarged to exactly match that dimension.\n     *    Aspect ratio is always preserved.\n     *  - cover: Resizes (shrinks or enlarges) to fill the entire area of width\n     *    and height. If the image has an aspect ratio different from the ratio\n     *    of width and height, it will be cropped to fit.\n     *  - crop: The image will be shrunk and cropped to fit within the area\n     *    specified by width and height. The image will not be enlarged. For images\n     *    smaller than the given dimensions it's the same as scale-down. For\n     *    images larger than the given dimensions, it's the same as cover.\n     *    See also trim.\n     *  - pad: Resizes to the maximum size that fits within the given width and\n     *    height, and then fills the remaining area with a background color\n     *    (white by default). Use of this mode is not recommended, as the same\n     *    effect can be more efficiently achieved with the contain mode and the\n     *    CSS object-fit: contain property.\n     *  - squeeze: Stretches and deforms to the width and height given, even if it\n     *    breaks aspect ratio\n     */\n    fit?: \"scale-down\" | \"contain\" | \"cover\" | \"crop\" | \"pad\" | \"squeeze\";\n    /**\n     * Image segmentation using artificial intelligence models. Sets pixels not\n     * within selected segment area to transparent e.g \"foreground\" sets every\n     * background pixel as transparent.\n     */\n    segment?: \"foreground\";\n    /**\n     * When cropping with fit: \"cover\", this defines the side or point that should\n     * be left uncropped. The value is either a string\n     * \"left\", \"right\", \"top\", \"bottom\", \"auto\", or \"center\" (the default),\n     * or an object {x, y} containing focal point coordinates in the original\n     * image expressed as fractions ranging from 0.0 (top or left) to 1.0\n     * (bottom or right), 0.5 being the center. {fit: \"cover\", gravity: \"top\"} will\n     * crop bottom or left and right sides as necessary, but won’t crop anything\n     * from the top. {fit: \"cover\", gravity: {x:0.5, y:0.2}} will crop each side to\n     * preserve as much as possible around a point at 20% of the height of the\n     * source image.\n     */\n    gravity?:\n        | \"face\"\n        | \"left\"\n        | \"right\"\n        | \"top\"\n        | \"bottom\"\n        | \"center\"\n        | \"auto\"\n        | \"entropy\"\n        | BasicImageTransformationsGravityCoordinates;\n    /**\n     * Background color to add underneath the image. Applies only to images with\n     * transparency (such as PNG). Accepts any CSS color (#RRGGBB, rgba(…),\n     * hsl(…), etc.)\n     */\n    background?: string;\n    /**\n     * Number of degrees (90, 180, 270) to rotate the image by. width and height\n     * options refer to axes after rotation.\n     */\n    rotate?: 0 | 90 | 180 | 270 | 360;\n}\ninterface BasicImageTransformationsGravityCoordinates {\n    x?: number;\n    y?: number;\n    mode?: \"remainder\" | \"box-center\";\n}\n/**\n * In addition to the properties you can set in the RequestInit dict\n * that you pass as an argument to the Request constructor, you can\n * set certain properties of a `cf` object to control how Cloudflare\n * features are applied to that new Request.\n *\n * Note: Currently, these properties cannot be tested in the\n * playground.\n */\ninterface RequestInitCfProperties extends Record<string, unknown> {\n    cacheEverything?: boolean;\n    /**\n     * A request's cache key is what determines if two requests are\n     * \"the same\" for caching purposes. If a request has the same cache key\n     * as some previous request, then we can serve the same cached response for\n     * both. (e.g. 'some-key')\n     *\n     * Only available for Enterprise customers.\n     */\n    cacheKey?: string;\n    /**\n     * This allows you to append additional Cache-Tag response headers\n     * to the origin response without modifications to the origin server.\n     * This will allow for greater control over the Purge by Cache Tag feature\n     * utilizing changes only in the Workers process.\n     *\n     * Only available for Enterprise customers.\n     */\n    cacheTags?: string[];\n    /**\n     * Force response to be cached for a given number of seconds. (e.g. 300)\n     */\n    cacheTtl?: number;\n    /**\n     * Force response to be cached for a given number of seconds based on the Origin status code.\n     * (e.g. { '200-299': 86400, '404': 1, '500-599': 0 })\n     */\n    cacheTtlByStatus?: Record<string, number>;\n    scrapeShield?: boolean;\n    apps?: boolean;\n    image?: RequestInitCfPropertiesImage;\n    minify?: RequestInitCfPropertiesImageMinify;\n    mirage?: boolean;\n    polish?: \"lossy\" | \"lossless\" | \"off\";\n    r2?: RequestInitCfPropertiesR2;\n    /**\n     * Redirects the request to an alternate origin server. You can use this,\n     * for example, to implement load balancing across several origins.\n     * (e.g.us-east.example.com)\n     *\n     * Note - For security reasons, the hostname set in resolveOverride must\n     * be proxied on the same Cloudflare zone of the incoming request.\n     * Otherwise, the setting is ignored. CNAME hosts are allowed, so to\n     * resolve to a host under a different domain or a DNS only domain first\n     * declare a CNAME record within your own zone’s DNS mapping to the\n     * external hostname, set proxy on Cloudflare, then set resolveOverride\n     * to point to that CNAME record.\n     */\n    resolveOverride?: string;\n}\ninterface RequestInitCfPropertiesImageDraw extends BasicImageTransformations {\n    /**\n     * Absolute URL of the image file to use for the drawing. It can be any of\n     * the supported file formats. For drawing of watermarks or non-rectangular\n     * overlays we recommend using PNG or WebP images.\n     */\n    url: string;\n    /**\n     * Floating-point number between 0 (transparent) and 1 (opaque).\n     * For example, opacity: 0.5 makes overlay semitransparent.\n     */\n    opacity?: number;\n    /**\n     * - If set to true, the overlay image will be tiled to cover the entire\n     *   area. This is useful for stock-photo-like watermarks.\n     * - If set to \"x\", the overlay image will be tiled horizontally only\n     *   (form a line).\n     * - If set to \"y\", the overlay image will be tiled vertically only\n     *   (form a line).\n     */\n    repeat?: true | \"x\" | \"y\";\n    /**\n     * Position of the overlay image relative to a given edge. Each property is\n     * an offset in pixels. 0 aligns exactly to the edge. For example, left: 10\n     * positions left side of the overlay 10 pixels from the left edge of the\n     * image it's drawn over. bottom: 0 aligns bottom of the overlay with bottom\n     * of the background image.\n     *\n     * Setting both left & right, or both top & bottom is an error.\n     *\n     * If no position is specified, the image will be centered.\n     */\n    top?: number;\n    left?: number;\n    bottom?: number;\n    right?: number;\n}\ninterface RequestInitCfPropertiesImage extends BasicImageTransformations {\n    /**\n     * Device Pixel Ratio. Default 1. Multiplier for width/height that makes it\n     * easier to specify higher-DPI sizes in <img srcset>.\n     */\n    dpr?: number;\n    /**\n     * Allows you to trim your image. Takes dpr into account and is performed before\n     * resizing or rotation.\n     *\n     * It can be used as:\n     * - left, top, right, bottom - it will specify the number of pixels to cut\n     *   off each side\n     * - width, height - the width/height you'd like to end up with - can be used\n     *   in combination with the properties above\n     * - border - this will automatically trim the surroundings of an image based on\n     *   it's color. It consists of three properties:\n     *    - color: rgb or hex representation of the color you wish to trim (todo: verify the rgba bit)\n     *    - tolerance: difference from color to treat as color\n     *    - keep: the number of pixels of border to keep\n     */\n    trim?:\n        | \"border\"\n        | {\n              top?: number;\n              bottom?: number;\n              left?: number;\n              right?: number;\n              width?: number;\n              height?: number;\n              border?:\n                  | boolean\n                  | {\n                        color?: string;\n                        tolerance?: number;\n                        keep?: number;\n                    };\n          };\n    /**\n     * Quality setting from 1-100 (useful values are in 60-90 range). Lower values\n     * make images look worse, but load faster. The default is 85. It applies only\n     * to JPEG and WebP images. It doesn’t have any effect on PNG.\n     */\n    quality?: number | \"low\" | \"medium-low\" | \"medium-high\" | \"high\";\n    /**\n     * Output format to generate. It can be:\n     *  - avif: generate images in AVIF format.\n     *  - webp: generate images in Google WebP format. Set quality to 100 to get\n     *    the WebP-lossless format.\n     *  - json: instead of generating an image, outputs information about the\n     *    image, in JSON format. The JSON object will contain image size\n     *    (before and after resizing), source image’s MIME type, file size, etc.\n     * - jpeg: generate images in JPEG format.\n     * - png: generate images in PNG format.\n     */\n    format?: \"avif\" | \"webp\" | \"json\" | \"jpeg\" | \"png\" | \"baseline-jpeg\" | \"png-force\" | \"svg\";\n    /**\n     * Whether to preserve animation frames from input files. Default is true.\n     * Setting it to false reduces animations to still images. This setting is\n     * recommended when enlarging images or processing arbitrary user content,\n     * because large GIF animations can weigh tens or even hundreds of megabytes.\n     * It is also useful to set anim:false when using format:\"json\" to get the\n     * response quicker without the number of frames.\n     */\n    anim?: boolean;\n    /**\n     * What EXIF data should be preserved in the output image. Note that EXIF\n     * rotation and embedded color profiles are always applied (\"baked in\" into\n     * the image), and aren't affected by this option. Note that if the Polish\n     * feature is enabled, all metadata may have been removed already and this\n     * option may have no effect.\n     *  - keep: Preserve most of EXIF metadata, including GPS location if there's\n     *    any.\n     *  - copyright: Only keep the copyright tag, and discard everything else.\n     *    This is the default behavior for JPEG files.\n     *  - none: Discard all invisible EXIF metadata. Currently WebP and PNG\n     *    output formats always discard metadata.\n     */\n    metadata?: \"keep\" | \"copyright\" | \"none\";\n    /**\n     * Strength of sharpening filter to apply to the image. Floating-point\n     * number between 0 (no sharpening, default) and 10 (maximum). 1.0 is a\n     * recommended value for downscaled images.\n     */\n    sharpen?: number;\n    /**\n     * Radius of a blur filter (approximate gaussian). Maximum supported radius\n     * is 250.\n     */\n    blur?: number;\n    /**\n     * Overlays are drawn in the order they appear in the array (last array\n     * entry is the topmost layer).\n     */\n    draw?: RequestInitCfPropertiesImageDraw[];\n    /**\n     * Fetching image from authenticated origin. Setting this property will\n     * pass authentication headers (Authorization, Cookie, etc.) through to\n     * the origin.\n     */\n    \"origin-auth\"?: \"share-publicly\";\n    /**\n     * Adds a border around the image. The border is added after resizing. Border\n     * width takes dpr into account, and can be specified either using a single\n     * width property, or individually for each side.\n     */\n    border?:\n        | {\n              color: string;\n              width: number;\n          }\n        | {\n              color: string;\n              top: number;\n              right: number;\n              bottom: number;\n              left: number;\n          };\n    /**\n     * Increase brightness by a factor. A value of 1.0 equals no change, a value\n     * of 0.5 equals half brightness, and a value of 2.0 equals twice as bright.\n     * 0 is ignored.\n     */\n    brightness?: number;\n    /**\n     * Increase contrast by a factor. A value of 1.0 equals no change, a value of\n     * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is\n     * ignored.\n     */\n    contrast?: number;\n    /**\n     * Increase exposure by a factor. A value of 1.0 equals no change, a value of\n     * 0.5 darkens the image, and a value of 2.0 lightens the image. 0 is ignored.\n     */\n    gamma?: number;\n    /**\n     * Increase contrast by a factor. A value of 1.0 equals no change, a value of\n     * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is\n     * ignored.\n     */\n    saturation?: number;\n    /**\n     * Flips the images horizontally, vertically, or both. Flipping is applied before\n     * rotation, so if you apply flip=h,rotate=90 then the image will be flipped\n     * horizontally, then rotated by 90 degrees.\n     */\n    flip?: \"h\" | \"v\" | \"hv\";\n    /**\n     * Slightly reduces latency on a cache miss by selecting a\n     * quickest-to-compress file format, at a cost of increased file size and\n     * lower image quality. It will usually override the format option and choose\n     * JPEG over WebP or AVIF. We do not recommend using this option, except in\n     * unusual circumstances like resizing uncacheable dynamically-generated\n     * images.\n     */\n    compression?: \"fast\";\n}\ninterface RequestInitCfPropertiesImageMinify {\n    javascript?: boolean;\n    css?: boolean;\n    html?: boolean;\n}\ninterface RequestInitCfPropertiesR2 {\n    /**\n     * Colo id of bucket that an object is stored in\n     */\n    bucketColoId?: number;\n}\n/**\n * Request metadata provided by Cloudflare's edge.\n */\ntype IncomingRequestCfProperties<HostMetadata = unknown> = IncomingRequestCfPropertiesBase &\n    IncomingRequestCfPropertiesBotManagementEnterprise &\n    IncomingRequestCfPropertiesCloudflareForSaaSEnterprise<HostMetadata> &\n    IncomingRequestCfPropertiesGeographicInformation &\n    IncomingRequestCfPropertiesCloudflareAccessOrApiShield;\ninterface IncomingRequestCfPropertiesBase extends Record<string, unknown> {\n    /**\n     * [ASN](https://www.iana.org/assignments/as-numbers/as-numbers.xhtml) of the incoming request.\n     *\n     * @example 395747\n     */\n    asn?: number;\n    /**\n     * The organization which owns the ASN of the incoming request.\n     *\n     * @example \"Google Cloud\"\n     */\n    asOrganization?: string;\n    /**\n     * The original value of the `Accept-Encoding` header if Cloudflare modified it.\n     *\n     * @example \"gzip, deflate, br\"\n     */\n    clientAcceptEncoding?: string;\n    /**\n     * The number of milliseconds it took for the request to reach your worker.\n     *\n     * @example 22\n     */\n    clientTcpRtt?: number;\n    /**\n     * The three-letter [IATA](https://en.wikipedia.org/wiki/IATA_airport_code)\n     * airport code of the data center that the request hit.\n     *\n     * @example \"DFW\"\n     */\n    colo: string;\n    /**\n     * Represents the upstream's response to a\n     * [TCP `keepalive` message](https://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html)\n     * from cloudflare.\n     *\n     * For workers with no upstream, this will always be `1`.\n     *\n     * @example 3\n     */\n    edgeRequestKeepAliveStatus: IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus;\n    /**\n     * The HTTP Protocol the request used.\n     *\n     * @example \"HTTP/2\"\n     */\n    httpProtocol: string;\n    /**\n     * The browser-requested prioritization information in the request object.\n     *\n     * If no information was set, defaults to the empty string `\"\"`\n     *\n     * @example \"weight=192;exclusive=0;group=3;group-weight=127\"\n     * @default \"\"\n     */\n    requestPriority: string;\n    /**\n     * The TLS version of the connection to Cloudflare.\n     * In requests served over plaintext (without TLS), this property is the empty string `\"\"`.\n     *\n     * @example \"TLSv1.3\"\n     */\n    tlsVersion: string;\n    /**\n     * The cipher for the connection to Cloudflare.\n     * In requests served over plaintext (without TLS), this property is the empty string `\"\"`.\n     *\n     * @example \"AEAD-AES128-GCM-SHA256\"\n     */\n    tlsCipher: string;\n    /**\n     * Metadata containing the [`HELLO`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2) and [`FINISHED`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9) messages from this request's TLS handshake.\n     *\n     * If the incoming request was served over plaintext (without TLS) this field is undefined.\n     */\n    tlsExportedAuthenticator?: IncomingRequestCfPropertiesExportedAuthenticatorMetadata;\n}\ninterface IncomingRequestCfPropertiesBotManagementBase {\n    /**\n     * Cloudflare’s [level of certainty](https://developers.cloudflare.com/bots/concepts/bot-score/) that a request comes from a bot,\n     * represented as an integer percentage between `1` (almost certainly a bot) and `99` (almost certainly human).\n     *\n     * @example 54\n     */\n    score: number;\n    /**\n     * A boolean value that is true if the request comes from a good bot, like Google or Bing.\n     * Most customers choose to allow this traffic. For more details, see [Traffic from known bots](https://developers.cloudflare.com/firewall/known-issues-and-faq/#how-does-firewall-rules-handle-traffic-from-known-bots).\n     */\n    verifiedBot: boolean;\n    /**\n     * A boolean value that is true if the request originates from a\n     * Cloudflare-verified proxy service.\n     */\n    corporateProxy: boolean;\n    /**\n     * A boolean value that's true if the request matches [file extensions](https://developers.cloudflare.com/bots/reference/static-resources/) for many types of static resources.\n     */\n    staticResource: boolean;\n    /**\n     * List of IDs that correlate to the Bot Management heuristic detections made on a request (you can have multiple heuristic detections on the same request).\n     */\n    detectionIds: number[];\n}\ninterface IncomingRequestCfPropertiesBotManagement {\n    /**\n     * Results of Cloudflare's Bot Management analysis\n     */\n    botManagement: IncomingRequestCfPropertiesBotManagementBase;\n    /**\n     * Duplicate of `botManagement.score`.\n     *\n     * @deprecated\n     */\n    clientTrustScore: number;\n}\ninterface IncomingRequestCfPropertiesBotManagementEnterprise extends IncomingRequestCfPropertiesBotManagement {\n    /**\n     * Results of Cloudflare's Bot Management analysis\n     */\n    botManagement: IncomingRequestCfPropertiesBotManagementBase & {\n        /**\n         * A [JA3 Fingerprint](https://developers.cloudflare.com/bots/concepts/ja3-fingerprint/) to help profile specific SSL/TLS clients\n         * across different destination IPs, Ports, and X509 certificates.\n         */\n        ja3Hash: string;\n    };\n}\ninterface IncomingRequestCfPropertiesCloudflareForSaaSEnterprise<HostMetadata> {\n    /**\n     * Custom metadata set per-host in [Cloudflare for SaaS](https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/).\n     *\n     * This field is only present if you have Cloudflare for SaaS enabled on your account\n     * and you have followed the [required steps to enable it]((https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/domain-support/custom-metadata/)).\n     */\n    hostMetadata?: HostMetadata;\n}\ninterface IncomingRequestCfPropertiesCloudflareAccessOrApiShield {\n    /**\n     * Information about the client certificate presented to Cloudflare.\n     *\n     * This is populated when the incoming request is served over TLS using\n     * either Cloudflare Access or API Shield (mTLS)\n     * and the presented SSL certificate has a valid\n     * [Certificate Serial Number](https://ldapwiki.com/wiki/Certificate%20Serial%20Number)\n     * (i.e., not `null` or `\"\"`).\n     *\n     * Otherwise, a set of placeholder values are used.\n     *\n     * The property `certPresented` will be set to `\"1\"` when\n     * the object is populated (i.e. the above conditions were met).\n     */\n    tlsClientAuth: IncomingRequestCfPropertiesTLSClientAuth | IncomingRequestCfPropertiesTLSClientAuthPlaceholder;\n}\n/**\n * Metadata about the request's TLS handshake\n */\ninterface IncomingRequestCfPropertiesExportedAuthenticatorMetadata {\n    /**\n     * The client's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal\n     *\n     * @example \"44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d\"\n     */\n    clientHandshake: string;\n    /**\n     * The server's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal\n     *\n     * @example \"44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d\"\n     */\n    serverHandshake: string;\n    /**\n     * The client's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal\n     *\n     * @example \"084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b\"\n     */\n    clientFinished: string;\n    /**\n     * The server's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal\n     *\n     * @example \"084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b\"\n     */\n    serverFinished: string;\n}\n/**\n * Geographic data about the request's origin.\n */\ninterface IncomingRequestCfPropertiesGeographicInformation {\n    /**\n     * The [ISO 3166-1 Alpha 2](https://www.iso.org/iso-3166-country-codes.html) country code the request originated from.\n     *\n     * If your worker is [configured to accept TOR connections](https://support.cloudflare.com/hc/en-us/articles/203306930-Understanding-Cloudflare-Tor-support-and-Onion-Routing), this may also be `\"T1\"`, indicating a request that originated over TOR.\n     *\n     * If Cloudflare is unable to determine where the request originated this property is omitted.\n     *\n     * The country code `\"T1\"` is used for requests originating on TOR.\n     *\n     * @example \"GB\"\n     */\n    country?: Iso3166Alpha2Code | \"T1\";\n    /**\n     * If present, this property indicates that the request originated in the EU\n     *\n     * @example \"1\"\n     */\n    isEUCountry?: \"1\";\n    /**\n     * A two-letter code indicating the continent the request originated from.\n     *\n     * @example \"AN\"\n     */\n    continent?: ContinentCode;\n    /**\n     * The city the request originated from\n     *\n     * @example \"Austin\"\n     */\n    city?: string;\n    /**\n     * Postal code of the incoming request\n     *\n     * @example \"78701\"\n     */\n    postalCode?: string;\n    /**\n     * Latitude of the incoming request\n     *\n     * @example \"30.27130\"\n     */\n    latitude?: string;\n    /**\n     * Longitude of the incoming request\n     *\n     * @example \"-97.74260\"\n     */\n    longitude?: string;\n    /**\n     * Timezone of the incoming request\n     *\n     * @example \"America/Chicago\"\n     */\n    timezone?: string;\n    /**\n     * If known, the ISO 3166-2 name for the first level region associated with\n     * the IP address of the incoming request\n     *\n     * @example \"Texas\"\n     */\n    region?: string;\n    /**\n     * If known, the ISO 3166-2 code for the first-level region associated with\n     * the IP address of the incoming request\n     *\n     * @example \"TX\"\n     */\n    regionCode?: string;\n    /**\n     * Metro code (DMA) of the incoming request\n     *\n     * @example \"635\"\n     */\n    metroCode?: string;\n}\n/** Data about the incoming request's TLS certificate */\ninterface IncomingRequestCfPropertiesTLSClientAuth {\n    /** Always `\"1\"`, indicating that the certificate was presented */\n    certPresented: \"1\";\n    /**\n     * Result of certificate verification.\n     *\n     * @example \"FAILED:self signed certificate\"\n     */\n    certVerified: Exclude<CertVerificationStatus, \"NONE\">;\n    /** The presented certificate's revokation status.\n     *\n     * - A value of `\"1\"` indicates the certificate has been revoked\n     * - A value of `\"0\"` indicates the certificate has not been revoked\n     */\n    certRevoked: \"1\" | \"0\";\n    /**\n     * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html)\n     *\n     * @example \"CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare\"\n     */\n    certIssuerDN: string;\n    /**\n     * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html)\n     *\n     * @example \"CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare\"\n     */\n    certSubjectDN: string;\n    /**\n     * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted)\n     *\n     * @example \"CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare\"\n     */\n    certIssuerDNRFC2253: string;\n    /**\n     * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted)\n     *\n     * @example \"CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare\"\n     */\n    certSubjectDNRFC2253: string;\n    /** The certificate issuer's distinguished name (legacy policies) */\n    certIssuerDNLegacy: string;\n    /** The certificate subject's distinguished name (legacy policies) */\n    certSubjectDNLegacy: string;\n    /**\n     * The certificate's serial number\n     *\n     * @example \"00936EACBE07F201DF\"\n     */\n    certSerial: string;\n    /**\n     * The certificate issuer's serial number\n     *\n     * @example \"2489002934BDFEA34\"\n     */\n    certIssuerSerial: string;\n    /**\n     * The certificate's Subject Key Identifier\n     *\n     * @example \"BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4\"\n     */\n    certSKI: string;\n    /**\n     * The certificate issuer's Subject Key Identifier\n     *\n     * @example \"BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4\"\n     */\n    certIssuerSKI: string;\n    /**\n     * The certificate's SHA-1 fingerprint\n     *\n     * @example \"6b9109f323999e52259cda7373ff0b4d26bd232e\"\n     */\n    certFingerprintSHA1: string;\n    /**\n     * The certificate's SHA-256 fingerprint\n     *\n     * @example \"acf77cf37b4156a2708e34c4eb755f9b5dbbe5ebb55adfec8f11493438d19e6ad3f157f81fa3b98278453d5652b0c1fd1d71e5695ae4d709803a4d3f39de9dea\"\n     */\n    certFingerprintSHA256: string;\n    /**\n     * The effective starting date of the certificate\n     *\n     * @example \"Dec 22 19:39:00 2018 GMT\"\n     */\n    certNotBefore: string;\n    /**\n     * The effective expiration date of the certificate\n     *\n     * @example \"Dec 22 19:39:00 2018 GMT\"\n     */\n    certNotAfter: string;\n}\n/** Placeholder values for TLS Client Authorization */\ninterface IncomingRequestCfPropertiesTLSClientAuthPlaceholder {\n    certPresented: \"0\";\n    certVerified: \"NONE\";\n    certRevoked: \"0\";\n    certIssuerDN: \"\";\n    certSubjectDN: \"\";\n    certIssuerDNRFC2253: \"\";\n    certSubjectDNRFC2253: \"\";\n    certIssuerDNLegacy: \"\";\n    certSubjectDNLegacy: \"\";\n    certSerial: \"\";\n    certIssuerSerial: \"\";\n    certSKI: \"\";\n    certIssuerSKI: \"\";\n    certFingerprintSHA1: \"\";\n    certFingerprintSHA256: \"\";\n    certNotBefore: \"\";\n    certNotAfter: \"\";\n}\n/** Possible outcomes of TLS verification */\ndeclare type CertVerificationStatus =\n    /** Authentication succeeded */\n    | \"SUCCESS\"\n    /** No certificate was presented */\n    | \"NONE\"\n    /** Failed because the certificate was self-signed */\n    | \"FAILED:self signed certificate\"\n    /** Failed because the certificate failed a trust chain check */\n    | \"FAILED:unable to verify the first certificate\"\n    /** Failed because the certificate not yet valid */\n    | \"FAILED:certificate is not yet valid\"\n    /** Failed because the certificate is expired */\n    | \"FAILED:certificate has expired\"\n    /** Failed for another unspecified reason */\n    | \"FAILED\";\n/**\n * An upstream endpoint's response to a TCP `keepalive` message from Cloudflare.\n */\ndeclare type IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus =\n    | 0 /** Unknown */\n    | 1 /** no keepalives (not found) */\n    | 2 /** no connection re-use, opening keepalive connection failed */\n    | 3 /** no connection re-use, keepalive accepted and saved */\n    | 4 /** connection re-use, refused by the origin server (`TCP FIN`) */\n    | 5; /** connection re-use, accepted by the origin server */\n/** ISO 3166-1 Alpha-2 codes */\ndeclare type Iso3166Alpha2Code =\n    | \"AD\"\n    | \"AE\"\n    | \"AF\"\n    | \"AG\"\n    | \"AI\"\n    | \"AL\"\n    | \"AM\"\n    | \"AO\"\n    | \"AQ\"\n    | \"AR\"\n    | \"AS\"\n    | \"AT\"\n    | \"AU\"\n    | \"AW\"\n    | \"AX\"\n    | \"AZ\"\n    | \"BA\"\n    | \"BB\"\n    | \"BD\"\n    | \"BE\"\n    | \"BF\"\n    | \"BG\"\n    | \"BH\"\n    | \"BI\"\n    | \"BJ\"\n    | \"BL\"\n    | \"BM\"\n    | \"BN\"\n    | \"BO\"\n    | \"BQ\"\n    | \"BR\"\n    | \"BS\"\n    | \"BT\"\n    | \"BV\"\n    | \"BW\"\n    | \"BY\"\n    | \"BZ\"\n    | \"CA\"\n    | \"CC\"\n    | \"CD\"\n    | \"CF\"\n    | \"CG\"\n    | \"CH\"\n    | \"CI\"\n    | \"CK\"\n    | \"CL\"\n    | \"CM\"\n    | \"CN\"\n    | \"CO\"\n    | \"CR\"\n    | \"CU\"\n    | \"CV\"\n    | \"CW\"\n    | \"CX\"\n    | \"CY\"\n    | \"CZ\"\n    | \"DE\"\n    | \"DJ\"\n    | \"DK\"\n    | \"DM\"\n    | \"DO\"\n    | \"DZ\"\n    | \"EC\"\n    | \"EE\"\n    | \"EG\"\n    | \"EH\"\n    | \"ER\"\n    | \"ES\"\n    | \"ET\"\n    | \"FI\"\n    | \"FJ\"\n    | \"FK\"\n    | \"FM\"\n    | \"FO\"\n    | \"FR\"\n    | \"GA\"\n    | \"GB\"\n    | \"GD\"\n    | \"GE\"\n    | \"GF\"\n    | \"GG\"\n    | \"GH\"\n    | \"GI\"\n    | \"GL\"\n    | \"GM\"\n    | \"GN\"\n    | \"GP\"\n    | \"GQ\"\n    | \"GR\"\n    | \"GS\"\n    | \"GT\"\n    | \"GU\"\n    | \"GW\"\n    | \"GY\"\n    | \"HK\"\n    | \"HM\"\n    | \"HN\"\n    | \"HR\"\n    | \"HT\"\n    | \"HU\"\n    | \"ID\"\n    | \"IE\"\n    | \"IL\"\n    | \"IM\"\n    | \"IN\"\n    | \"IO\"\n    | \"IQ\"\n    | \"IR\"\n    | \"IS\"\n    | \"IT\"\n    | \"JE\"\n    | \"JM\"\n    | \"JO\"\n    | \"JP\"\n    | \"KE\"\n    | \"KG\"\n    | \"KH\"\n    | \"KI\"\n    | \"KM\"\n    | \"KN\"\n    | \"KP\"\n    | \"KR\"\n    | \"KW\"\n    | \"KY\"\n    | \"KZ\"\n    | \"LA\"\n    | \"LB\"\n    | \"LC\"\n    | \"LI\"\n    | \"LK\"\n    | \"LR\"\n    | \"LS\"\n    | \"LT\"\n    | \"LU\"\n    | \"LV\"\n    | \"LY\"\n    | \"MA\"\n    | \"MC\"\n    | \"MD\"\n    | \"ME\"\n    | \"MF\"\n    | \"MG\"\n    | \"MH\"\n    | \"MK\"\n    | \"ML\"\n    | \"MM\"\n    | \"MN\"\n    | \"MO\"\n    | \"MP\"\n    | \"MQ\"\n    | \"MR\"\n    | \"MS\"\n    | \"MT\"\n    | \"MU\"\n    | \"MV\"\n    | \"MW\"\n    | \"MX\"\n    | \"MY\"\n    | \"MZ\"\n    | \"NA\"\n    | \"NC\"\n    | \"NE\"\n    | \"NF\"\n    | \"NG\"\n    | \"NI\"\n    | \"NL\"\n    | \"NO\"\n    | \"NP\"\n    | \"NR\"\n    | \"NU\"\n    | \"NZ\"\n    | \"OM\"\n    | \"PA\"\n    | \"PE\"\n    | \"PF\"\n    | \"PG\"\n    | \"PH\"\n    | \"PK\"\n    | \"PL\"\n    | \"PM\"\n    | \"PN\"\n    | \"PR\"\n    | \"PS\"\n    | \"PT\"\n    | \"PW\"\n    | \"PY\"\n    | \"QA\"\n    | \"RE\"\n    | \"RO\"\n    | \"RS\"\n    | \"RU\"\n    | \"RW\"\n    | \"SA\"\n    | \"SB\"\n    | \"SC\"\n    | \"SD\"\n    | \"SE\"\n    | \"SG\"\n    | \"SH\"\n    | \"SI\"\n    | \"SJ\"\n    | \"SK\"\n    | \"SL\"\n    | \"SM\"\n    | \"SN\"\n    | \"SO\"\n    | \"SR\"\n    | \"SS\"\n    | \"ST\"\n    | \"SV\"\n    | \"SX\"\n    | \"SY\"\n    | \"SZ\"\n    | \"TC\"\n    | \"TD\"\n    | \"TF\"\n    | \"TG\"\n    | \"TH\"\n    | \"TJ\"\n    | \"TK\"\n    | \"TL\"\n    | \"TM\"\n    | \"TN\"\n    | \"TO\"\n    | \"TR\"\n    | \"TT\"\n    | \"TV\"\n    | \"TW\"\n    | \"TZ\"\n    | \"UA\"\n    | \"UG\"\n    | \"UM\"\n    | \"US\"\n    | \"UY\"\n    | \"UZ\"\n    | \"VA\"\n    | \"VC\"\n    | \"VE\"\n    | \"VG\"\n    | \"VI\"\n    | \"VN\"\n    | \"VU\"\n    | \"WF\"\n    | \"WS\"\n    | \"YE\"\n    | \"YT\"\n    | \"ZA\"\n    | \"ZM\"\n    | \"ZW\";\n/** The 2-letter continent codes Cloudflare uses */\ndeclare type ContinentCode = \"AF\" | \"AN\" | \"AS\" | \"EU\" | \"NA\" | \"OC\" | \"SA\";\ntype CfProperties<HostMetadata = unknown> = IncomingRequestCfProperties<HostMetadata> | RequestInitCfProperties;\ninterface D1Meta {\n    duration: number;\n    size_after: number;\n    rows_read: number;\n    rows_written: number;\n    last_row_id: number;\n    changed_db: boolean;\n    changes: number;\n    /**\n     * The region of the database instance that executed the query.\n     */\n    served_by_region?: string;\n    /**\n     * True if-and-only-if the database instance that executed the query was the primary.\n     */\n    served_by_primary?: boolean;\n    timings?: {\n        /**\n         * The duration of the SQL query execution by the database instance. It doesn't include any network time.\n         */\n        sql_duration_ms: number;\n    };\n    /**\n     * Number of total attempts to execute the query, due to automatic retries.\n     * Note: All other fields in the response like `timings` only apply to the last attempt.\n     */\n    total_attempts?: number;\n}\ninterface D1Response {\n    success: true;\n    meta: D1Meta & Record<string, unknown>;\n    error?: never;\n}\ntype D1Result<T = unknown> = D1Response & {\n    results: T[];\n};\ninterface D1ExecResult {\n    count: number;\n    duration: number;\n}\ntype D1SessionConstraint =\n    // Indicates that the first query should go to the primary, and the rest queries\n    // using the same D1DatabaseSession will go to any replica that is consistent with\n    // the bookmark maintained by the session (returned by the first query).\n    | \"first-primary\"\n    // Indicates that the first query can go anywhere (primary or replica), and the rest queries\n    // using the same D1DatabaseSession will go to any replica that is consistent with\n    // the bookmark maintained by the session (returned by the first query).\n    | \"first-unconstrained\";\ntype D1SessionBookmark = string;\ndeclare abstract class D1Database {\n    prepare(query: string): D1PreparedStatement;\n    batch<T = unknown>(statements: D1PreparedStatement[]): Promise<D1Result<T>[]>;\n    exec(query: string): Promise<D1ExecResult>;\n    /**\n     * Creates a new D1 Session anchored at the given constraint or the bookmark.\n     * All queries executed using the created session will have sequential consistency,\n     * meaning that all writes done through the session will be visible in subsequent reads.\n     *\n     * @param constraintOrBookmark Either the session constraint or the explicit bookmark to anchor the created session.\n     */\n    withSession(constraintOrBookmark?: D1SessionBookmark | D1SessionConstraint): D1DatabaseSession;\n    /**\n     * @deprecated dump() will be removed soon, only applies to deprecated alpha v1 databases.\n     */\n    dump(): Promise<ArrayBuffer>;\n}\ndeclare abstract class D1DatabaseSession {\n    prepare(query: string): D1PreparedStatement;\n    batch<T = unknown>(statements: D1PreparedStatement[]): Promise<D1Result<T>[]>;\n    /**\n     * @returns The latest session bookmark across all executed queries on the session.\n     *          If no query has been executed yet, `null` is returned.\n     */\n    getBookmark(): D1SessionBookmark | null;\n}\ndeclare abstract class D1PreparedStatement {\n    bind(...values: unknown[]): D1PreparedStatement;\n    first<T = unknown>(colName: string): Promise<T | null>;\n    first<T = Record<string, unknown>>(): Promise<T | null>;\n    run<T = Record<string, unknown>>(): Promise<D1Result<T>>;\n    all<T = Record<string, unknown>>(): Promise<D1Result<T>>;\n    raw<T = unknown[]>(options: { columnNames: true }): Promise<[string[], ...T[]]>;\n    raw<T = unknown[]>(options?: { columnNames?: false }): Promise<T[]>;\n}\n// `Disposable` was added to TypeScript's standard lib types in version 5.2.\n// To support older TypeScript versions, define an empty `Disposable` interface.\n// Users won't be able to use `using`/`Symbol.dispose` without upgrading to 5.2,\n// but this will ensure type checking on older versions still passes.\n// TypeScript's interface merging will ensure our empty interface is effectively\n// ignored when `Disposable` is included in the standard lib.\ninterface Disposable {}\n/**\n * An email message that can be sent from a Worker.\n */\ninterface EmailMessage {\n    /**\n     * Envelope From attribute of the email message.\n     */\n    readonly from: string;\n    /**\n     * Envelope To attribute of the email message.\n     */\n    readonly to: string;\n}\n/**\n * An email message that is sent to a consumer Worker and can be rejected/forwarded.\n */\ninterface ForwardableEmailMessage extends EmailMessage {\n    /**\n     * Stream of the email message content.\n     */\n    readonly raw: ReadableStream<Uint8Array>;\n    /**\n     * An [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers).\n     */\n    readonly headers: Headers;\n    /**\n     * Size of the email message content.\n     */\n    readonly rawSize: number;\n    /**\n     * Reject this email message by returning a permanent SMTP error back to the connecting client including the given reason.\n     * @param reason The reject reason.\n     * @returns void\n     */\n    setReject(reason: string): void;\n    /**\n     * Forward this email message to a verified destination address of the account.\n     * @param rcptTo Verified destination address.\n     * @param headers A [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers).\n     * @returns A promise that resolves when the email message is forwarded.\n     */\n    forward(rcptTo: string, headers?: Headers): Promise<void>;\n    /**\n     * Reply to the sender of this email message with a new EmailMessage object.\n     * @param message The reply message.\n     * @returns A promise that resolves when the email message is replied.\n     */\n    reply(message: EmailMessage): Promise<void>;\n}\n/**\n * A binding that allows a Worker to send email messages.\n */\ninterface SendEmail {\n    send(message: EmailMessage): Promise<void>;\n}\ndeclare abstract class EmailEvent extends ExtendableEvent {\n    readonly message: ForwardableEmailMessage;\n}\ndeclare type EmailExportedHandler<Env = unknown> = (\n    message: ForwardableEmailMessage,\n    env: Env,\n    ctx: ExecutionContext,\n) => void | Promise<void>;\ndeclare module \"cloudflare:email\" {\n    let _EmailMessage: {\n        prototype: EmailMessage;\n        new (from: string, to: string, raw: ReadableStream | string): EmailMessage;\n    };\n    export { _EmailMessage as EmailMessage };\n}\n/**\n * Hello World binding to serve as an explanatory example. DO NOT USE\n */\ninterface HelloWorldBinding {\n    /**\n     * Retrieve the current stored value\n     */\n    get(): Promise<{\n        value: string;\n        ms?: number;\n    }>;\n    /**\n     * Set a new stored value\n     */\n    set(value: string): Promise<void>;\n}\ninterface Hyperdrive {\n    /**\n     * Connect directly to Hyperdrive as if it's your database, returning a TCP socket.\n     *\n     * Calling this method returns an idential socket to if you call\n     * `connect(\"host:port\")` using the `host` and `port` fields from this object.\n     * Pick whichever approach works better with your preferred DB client library.\n     *\n     * Note that this socket is not yet authenticated -- it's expected that your\n     * code (or preferably, the client library of your choice) will authenticate\n     * using the information in this class's readonly fields.\n     */\n    connect(): Socket;\n    /**\n     * A valid DB connection string that can be passed straight into the typical\n     * client library/driver/ORM. This will typically be the easiest way to use\n     * Hyperdrive.\n     */\n    readonly connectionString: string;\n    /*\n     * A randomly generated hostname that is only valid within the context of the\n     * currently running Worker which, when passed into `connect()` function from\n     * the \"cloudflare:sockets\" module, will connect to the Hyperdrive instance\n     * for your database.\n     */\n    readonly host: string;\n    /*\n     * The port that must be paired the the host field when connecting.\n     */\n    readonly port: number;\n    /*\n     * The username to use when authenticating to your database via Hyperdrive.\n     * Unlike the host and password, this will be the same every time\n     */\n    readonly user: string;\n    /*\n     * The randomly generated password to use when authenticating to your\n     * database via Hyperdrive. Like the host field, this password is only valid\n     * within the context of the currently running Worker instance from which\n     * it's read.\n     */\n    readonly password: string;\n    /*\n     * The name of the database to connect to.\n     */\n    readonly database: string;\n}\n// Copyright (c) 2024 Cloudflare, Inc.\n// Licensed under the Apache 2.0 license found in the LICENSE file or at:\n//     https://opensource.org/licenses/Apache-2.0\ntype ImageInfoResponse =\n    | {\n          format: \"image/svg+xml\";\n      }\n    | {\n          format: string;\n          fileSize: number;\n          width: number;\n          height: number;\n      };\ntype ImageTransform = {\n    width?: number;\n    height?: number;\n    background?: string;\n    blur?: number;\n    border?:\n        | {\n              color?: string;\n              width?: number;\n          }\n        | {\n              top?: number;\n              bottom?: number;\n              left?: number;\n              right?: number;\n          };\n    brightness?: number;\n    contrast?: number;\n    fit?: \"scale-down\" | \"contain\" | \"pad\" | \"squeeze\" | \"cover\" | \"crop\";\n    flip?: \"h\" | \"v\" | \"hv\";\n    gamma?: number;\n    segment?: \"foreground\";\n    gravity?:\n        | \"face\"\n        | \"left\"\n        | \"right\"\n        | \"top\"\n        | \"bottom\"\n        | \"center\"\n        | \"auto\"\n        | \"entropy\"\n        | {\n              x?: number;\n              y?: number;\n              mode: \"remainder\" | \"box-center\";\n          };\n    rotate?: 0 | 90 | 180 | 270;\n    saturation?: number;\n    sharpen?: number;\n    trim?:\n        | \"border\"\n        | {\n              top?: number;\n              bottom?: number;\n              left?: number;\n              right?: number;\n              width?: number;\n              height?: number;\n              border?:\n                  | boolean\n                  | {\n                        color?: string;\n                        tolerance?: number;\n                        keep?: number;\n                    };\n          };\n};\ntype ImageDrawOptions = {\n    opacity?: number;\n    repeat?: boolean | string;\n    top?: number;\n    left?: number;\n    bottom?: number;\n    right?: number;\n};\ntype ImageInputOptions = {\n    encoding?: \"base64\";\n};\ntype ImageOutputOptions = {\n    format: \"image/jpeg\" | \"image/png\" | \"image/gif\" | \"image/webp\" | \"image/avif\" | \"rgb\" | \"rgba\";\n    quality?: number;\n    background?: string;\n    anim?: boolean;\n};\ninterface ImagesBinding {\n    /**\n     * Get image metadata (type, width and height)\n     * @throws {@link ImagesError} with code 9412 if input is not an image\n     * @param stream The image bytes\n     */\n    info(stream: ReadableStream<Uint8Array>, options?: ImageInputOptions): Promise<ImageInfoResponse>;\n    /**\n     * Begin applying a series of transformations to an image\n     * @param stream The image bytes\n     * @returns A transform handle\n     */\n    input(stream: ReadableStream<Uint8Array>, options?: ImageInputOptions): ImageTransformer;\n}\ninterface ImageTransformer {\n    /**\n     * Apply transform next, returning a transform handle.\n     * You can then apply more transformations, draw, or retrieve the output.\n     * @param transform\n     */\n    transform(transform: ImageTransform): ImageTransformer;\n    /**\n     * Draw an image on this transformer, returning a transform handle.\n     * You can then apply more transformations, draw, or retrieve the output.\n     * @param image The image (or transformer that will give the image) to draw\n     * @param options The options configuring how to draw the image\n     */\n    draw(image: ReadableStream<Uint8Array> | ImageTransformer, options?: ImageDrawOptions): ImageTransformer;\n    /**\n     * Retrieve the image that results from applying the transforms to the\n     * provided input\n     * @param options Options that apply to the output e.g. output format\n     */\n    output(options: ImageOutputOptions): Promise<ImageTransformationResult>;\n}\ntype ImageTransformationOutputOptions = {\n    encoding?: \"base64\";\n};\ninterface ImageTransformationResult {\n    /**\n     * The image as a response, ready to store in cache or return to users\n     */\n    response(): Response;\n    /**\n     * The content type of the returned image\n     */\n    contentType(): string;\n    /**\n     * The bytes of the response\n     */\n    image(options?: ImageTransformationOutputOptions): ReadableStream<Uint8Array>;\n}\ninterface ImagesError extends Error {\n    readonly code: number;\n    readonly message: string;\n    readonly stack?: string;\n}\n/**\n * Media binding for transforming media streams.\n * Provides the entry point for media transformation operations.\n */\ninterface MediaBinding {\n    /**\n     * Creates a media transformer from an input stream.\n     * @param media - The input media bytes\n     * @returns A MediaTransformer instance for applying transformations\n     */\n    input(media: ReadableStream<Uint8Array>): MediaTransformer;\n}\n/**\n * Media transformer for applying transformation operations to media content.\n * Handles sizing, fitting, and other input transformation parameters.\n */\ninterface MediaTransformer {\n    /**\n     * Applies transformation options to the media content.\n     * @param transform - Configuration for how the media should be transformed\n     * @returns A generator for producing the transformed media output\n     */\n    transform(transform: MediaTransformationInputOptions): MediaTransformationGenerator;\n}\n/**\n * Generator for producing media transformation results.\n * Configures the output format and parameters for the transformed media.\n */\ninterface MediaTransformationGenerator {\n    /**\n     * Generates the final media output with specified options.\n     * @param output - Configuration for the output format and parameters\n     * @returns The final transformation result containing the transformed media\n     */\n    output(output: MediaTransformationOutputOptions): MediaTransformationResult;\n}\n/**\n * Result of a media transformation operation.\n * Provides multiple ways to access the transformed media content.\n */\ninterface MediaTransformationResult {\n    /**\n     * Returns the transformed media as a readable stream of bytes.\n     * @returns A stream containing the transformed media data\n     */\n    media(): ReadableStream<Uint8Array>;\n    /**\n     * Returns the transformed media as an HTTP response object.\n     * @returns The transformed media as a Response, ready to store in cache or return to users\n     */\n    response(): Response;\n    /**\n     * Returns the MIME type of the transformed media.\n     * @returns The content type string (e.g., 'image/jpeg', 'video/mp4')\n     */\n    contentType(): string;\n}\n/**\n * Configuration options for transforming media input.\n * Controls how the media should be resized and fitted.\n */\ntype MediaTransformationInputOptions = {\n    /** How the media should be resized to fit the specified dimensions */\n    fit?: \"contain\" | \"cover\" | \"scale-down\";\n    /** Target width in pixels */\n    width?: number;\n    /** Target height in pixels */\n    height?: number;\n};\n/**\n * Configuration options for Media Transformations output.\n * Controls the format, timing, and type of the generated output.\n */\ntype MediaTransformationOutputOptions = {\n    /**\n     * Output mode determining the type of media to generate\n     */\n    mode?: \"video\" | \"spritesheet\" | \"frame\" | \"audio\";\n    /** Whether to include audio in the output */\n    audio?: boolean;\n    /**\n     * Starting timestamp for frame extraction or start time for clips. (e.g. '2s').\n     */\n    time?: string;\n    /**\n     * Duration for video clips, audio extraction, and spritesheet generation (e.g. '5s').\n     */\n    duration?: string;\n    /**\n     * Number of frames in the spritesheet.\n     */\n    imageCount?: number;\n    /**\n     * Output format for the generated media.\n     */\n    format?: \"jpg\" | \"png\" | \"m4a\";\n};\n/**\n * Error object for media transformation operations.\n * Extends the standard Error interface with additional media-specific information.\n */\ninterface MediaError extends Error {\n    readonly code: number;\n    readonly message: string;\n    readonly stack?: string;\n}\ndeclare module \"cloudflare:node\" {\n    interface NodeStyleServer {\n        listen(...args: unknown[]): this;\n        address(): {\n            port?: number | null | undefined;\n        };\n    }\n    export function httpServerHandler(port: number): ExportedHandler;\n    export function httpServerHandler(options: { port: number }): ExportedHandler;\n    export function httpServerHandler(server: NodeStyleServer): ExportedHandler;\n}\ntype Params<P extends string = any> = Record<P, string | string[]>;\ntype EventContext<Env, P extends string, Data> = {\n    request: Request<unknown, IncomingRequestCfProperties<unknown>>;\n    functionPath: string;\n    waitUntil: (promise: Promise<any>) => void;\n    passThroughOnException: () => void;\n    next: (input?: Request | string, init?: RequestInit) => Promise<Response>;\n    env: Env & {\n        ASSETS: {\n            fetch: typeof fetch;\n        };\n    };\n    params: Params<P>;\n    data: Data;\n};\ntype PagesFunction<\n    Env = unknown,\n    Params extends string = any,\n    Data extends Record<string, unknown> = Record<string, unknown>,\n> = (context: EventContext<Env, Params, Data>) => Response | Promise<Response>;\ntype EventPluginContext<Env, P extends string, Data, PluginArgs> = {\n    request: Request<unknown, IncomingRequestCfProperties<unknown>>;\n    functionPath: string;\n    waitUntil: (promise: Promise<any>) => void;\n    passThroughOnException: () => void;\n    next: (input?: Request | string, init?: RequestInit) => Promise<Response>;\n    env: Env & {\n        ASSETS: {\n            fetch: typeof fetch;\n        };\n    };\n    params: Params<P>;\n    data: Data;\n    pluginArgs: PluginArgs;\n};\ntype PagesPluginFunction<\n    Env = unknown,\n    Params extends string = any,\n    Data extends Record<string, unknown> = Record<string, unknown>,\n    PluginArgs = unknown,\n> = (context: EventPluginContext<Env, Params, Data, PluginArgs>) => Response | Promise<Response>;\ndeclare module \"assets:*\" {\n    export const onRequest: PagesFunction;\n}\n// Copyright (c) 2022-2023 Cloudflare, Inc.\n// Licensed under the Apache 2.0 license found in the LICENSE file or at:\n//     https://opensource.org/licenses/Apache-2.0\ndeclare module \"cloudflare:pipelines\" {\n    export abstract class PipelineTransformationEntrypoint<\n        Env = unknown,\n        I extends PipelineRecord = PipelineRecord,\n        O extends PipelineRecord = PipelineRecord,\n    > {\n        protected env: Env;\n        protected ctx: ExecutionContext;\n        constructor(ctx: ExecutionContext, env: Env);\n        /**\n         * run recieves an array of PipelineRecord which can be\n         * transformed and returned to the pipeline\n         * @param records Incoming records from the pipeline to be transformed\n         * @param metadata Information about the specific pipeline calling the transformation entrypoint\n         * @returns A promise containing the transformed PipelineRecord array\n         */\n        public run(records: I[], metadata: PipelineBatchMetadata): Promise<O[]>;\n    }\n    export type PipelineRecord = Record<string, unknown>;\n    export type PipelineBatchMetadata = {\n        pipelineId: string;\n        pipelineName: string;\n    };\n    export interface Pipeline<T extends PipelineRecord = PipelineRecord> {\n        /**\n         * The Pipeline interface represents the type of a binding to a Pipeline\n         *\n         * @param records The records to send to the pipeline\n         */\n        send(records: T[]): Promise<void>;\n    }\n}\n// PubSubMessage represents an incoming PubSub message.\n// The message includes metadata about the broker, the client, and the payload\n// itself.\n// https://developers.cloudflare.com/pub-sub/\ninterface PubSubMessage {\n    // Message ID\n    readonly mid: number;\n    // MQTT broker FQDN in the form mqtts://BROKER.NAMESPACE.cloudflarepubsub.com:PORT\n    readonly broker: string;\n    // The MQTT topic the message was sent on.\n    readonly topic: string;\n    // The client ID of the client that published this message.\n    readonly clientId: string;\n    // The unique identifier (JWT ID) used by the client to authenticate, if token\n    // auth was used.\n    readonly jti?: string;\n    // A Unix timestamp (seconds from Jan 1, 1970), set when the Pub/Sub Broker\n    // received the message from the client.\n    readonly receivedAt: number;\n    // An (optional) string with the MIME type of the payload, if set by the\n    // client.\n    readonly contentType: string;\n    // Set to 1 when the payload is a UTF-8 string\n    // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901063\n    readonly payloadFormatIndicator: number;\n    // Pub/Sub (MQTT) payloads can be UTF-8 strings, or byte arrays.\n    // You can use payloadFormatIndicator to inspect this before decoding.\n    payload: string | Uint8Array;\n}\n// JsonWebKey extended by kid parameter\ninterface JsonWebKeyWithKid extends JsonWebKey {\n    // Key Identifier of the JWK\n    readonly kid: string;\n}\ninterface RateLimitOptions {\n    key: string;\n}\ninterface RateLimitOutcome {\n    success: boolean;\n}\ninterface RateLimit {\n    /**\n     * Rate limit a request based on the provided options.\n     * @see https://developers.cloudflare.com/workers/runtime-apis/bindings/rate-limit/\n     * @returns A promise that resolves with the outcome of the rate limit.\n     */\n    limit(options: RateLimitOptions): Promise<RateLimitOutcome>;\n}\n// Namespace for RPC utility types. Unfortunately, we can't use a `module` here as these types need\n// to referenced by `Fetcher`. This is included in the \"importable\" version of the types which\n// strips all `module` blocks.\ndeclare namespace Rpc {\n    // Branded types for identifying `WorkerEntrypoint`/`DurableObject`/`Target`s.\n    // TypeScript uses *structural* typing meaning anything with the same shape as type `T` is a `T`.\n    // For the classes exported by `cloudflare:workers` we want *nominal* typing (i.e. we only want to\n    // accept `WorkerEntrypoint` from `cloudflare:workers`, not any other class with the same shape)\n    export const __RPC_STUB_BRAND: \"__RPC_STUB_BRAND\";\n    export const __RPC_TARGET_BRAND: \"__RPC_TARGET_BRAND\";\n    export const __WORKER_ENTRYPOINT_BRAND: \"__WORKER_ENTRYPOINT_BRAND\";\n    export const __DURABLE_OBJECT_BRAND: \"__DURABLE_OBJECT_BRAND\";\n    export const __WORKFLOW_ENTRYPOINT_BRAND: \"__WORKFLOW_ENTRYPOINT_BRAND\";\n    export interface RpcTargetBranded {\n        [__RPC_TARGET_BRAND]: never;\n    }\n    export interface WorkerEntrypointBranded {\n        [__WORKER_ENTRYPOINT_BRAND]: never;\n    }\n    export interface DurableObjectBranded {\n        [__DURABLE_OBJECT_BRAND]: never;\n    }\n    export interface WorkflowEntrypointBranded {\n        [__WORKFLOW_ENTRYPOINT_BRAND]: never;\n    }\n    export type EntrypointBranded = WorkerEntrypointBranded | DurableObjectBranded | WorkflowEntrypointBranded;\n    // Types that can be used through `Stub`s\n    export type Stubable = RpcTargetBranded | ((...args: any[]) => any);\n    // Types that can be passed over RPC\n    // The reason for using a generic type here is to build a serializable subset of structured\n    //   cloneable composite types. This allows types defined with the \"interface\" keyword to pass the\n    //   serializable check as well. Otherwise, only types defined with the \"type\" keyword would pass.\n    type Serializable<T> =\n        // Structured cloneables\n        | BaseType\n        // Structured cloneable composites\n        | Map<\n              T extends Map<infer U, unknown> ? Serializable<U> : never,\n              T extends Map<unknown, infer U> ? Serializable<U> : never\n          >\n        | Set<T extends Set<infer U> ? Serializable<U> : never>\n        | ReadonlyArray<T extends ReadonlyArray<infer U> ? Serializable<U> : never>\n        | {\n              [K in keyof T]: K extends number | string ? Serializable<T[K]> : never;\n          }\n        // Special types\n        | Stub<Stubable>\n        // Serialized as stubs, see `Stubify`\n        | Stubable;\n    // Base type for all RPC stubs, including common memory management methods.\n    // `T` is used as a marker type for unwrapping `Stub`s later.\n    interface StubBase<T extends Stubable> extends Disposable {\n        [__RPC_STUB_BRAND]: T;\n        dup(): this;\n    }\n    export type Stub<T extends Stubable> = Provider<T> & StubBase<T>;\n    // This represents all the types that can be sent as-is over an RPC boundary\n    type BaseType =\n        | void\n        | undefined\n        | null\n        | boolean\n        | number\n        | bigint\n        | string\n        | TypedArray\n        | ArrayBuffer\n        | DataView\n        | Date\n        | Error\n        | RegExp\n        | ReadableStream<Uint8Array>\n        | WritableStream<Uint8Array>\n        | Request\n        | Response\n        | Headers;\n    // Recursively rewrite all `Stubable` types with `Stub`s\n    // prettier-ignore\n    type Stubify<T> = T extends Stubable ? Stub<T> : T extends Map<infer K, infer V> ? Map<Stubify<K>, Stubify<V>> : T extends Set<infer V> ? Set<Stubify<V>> : T extends Array<infer V> ? Array<Stubify<V>> : T extends ReadonlyArray<infer V> ? ReadonlyArray<Stubify<V>> : T extends BaseType ? T : T extends {\n        [key: string | number]: any;\n    } ? {\n        [K in keyof T]: Stubify<T[K]>;\n    } : T;\n    // Recursively rewrite all `Stub<T>`s with the corresponding `T`s.\n    // Note we use `StubBase` instead of `Stub` here to avoid circular dependencies:\n    // `Stub` depends on `Provider`, which depends on `Unstubify`, which would depend on `Stub`.\n    // prettier-ignore\n    type Unstubify<T> = T extends StubBase<infer V> ? V : T extends Map<infer K, infer V> ? Map<Unstubify<K>, Unstubify<V>> : T extends Set<infer V> ? Set<Unstubify<V>> : T extends Array<infer V> ? Array<Unstubify<V>> : T extends ReadonlyArray<infer V> ? ReadonlyArray<Unstubify<V>> : T extends BaseType ? T : T extends {\n        [key: string | number]: unknown;\n    } ? {\n        [K in keyof T]: Unstubify<T[K]>;\n    } : T;\n    type UnstubifyAll<A extends any[]> = {\n        [I in keyof A]: Unstubify<A[I]>;\n    };\n    // Utility type for adding `Provider`/`Disposable`s to `object` types only.\n    // Note `unknown & T` is equivalent to `T`.\n    type MaybeProvider<T> = T extends object ? Provider<T> : unknown;\n    type MaybeDisposable<T> = T extends object ? Disposable : unknown;\n    // Type for method return or property on an RPC interface.\n    // - Stubable types are replaced by stubs.\n    // - Serializable types are passed by value, with stubable types replaced by stubs\n    //   and a top-level `Disposer`.\n    // Everything else can't be passed over PRC.\n    // Technically, we use custom thenables here, but they quack like `Promise`s.\n    // Intersecting with `(Maybe)Provider` allows pipelining.\n    // prettier-ignore\n    type Result<R> = R extends Stubable ? Promise<Stub<R>> & Provider<R> : R extends Serializable<R> ? Promise<Stubify<R> & MaybeDisposable<R>> & MaybeProvider<R> : never;\n    // Type for method or property on an RPC interface.\n    // For methods, unwrap `Stub`s in parameters, and rewrite returns to be `Result`s.\n    // Unwrapping `Stub`s allows calling with `Stubable` arguments.\n    // For properties, rewrite types to be `Result`s.\n    // In each case, unwrap `Promise`s.\n    type MethodOrProperty<V> = V extends (...args: infer P) => infer R\n        ? (...args: UnstubifyAll<P>) => Result<Awaited<R>>\n        : Result<Awaited<V>>;\n    // Type for the callable part of an `Provider` if `T` is callable.\n    // This is intersected with methods/properties.\n    type MaybeCallableProvider<T> = T extends (...args: any[]) => any ? MethodOrProperty<T> : unknown;\n    // Base type for all other types providing RPC-like interfaces.\n    // Rewrites all methods/properties to be `MethodOrProperty`s, while preserving callable types.\n    // `Reserved` names (e.g. stub method names like `dup()`) and symbols can't be accessed over RPC.\n    export type Provider<T extends object, Reserved extends string = never> = MaybeCallableProvider<T> &\n        Pick<\n            {\n                [K in keyof T]: MethodOrProperty<T[K]>;\n            },\n            Exclude<keyof T, Reserved | symbol | keyof StubBase<never>>\n        >;\n}\ndeclare namespace Cloudflare {\n    // Type of `env`.\n    //\n    // The specific project can extend `Env` by redeclaring it in project-specific files. Typescript\n    // will merge all declarations.\n    //\n    // You can use `wrangler types` to generate the `Env` type automatically.\n    interface Env {}\n    // Project-specific parameters used to inform types.\n    //\n    // This interface is, again, intended to be declared in project-specific files, and then that\n    // declaration will be merged with this one.\n    //\n    // A project should have a declaration like this:\n    //\n    //     interface GlobalProps {\n    //       // Declares the main module's exports. Used to populate Cloudflare.Exports aka the type\n    //       // of `ctx.exports`.\n    //       mainModule: typeof import(\"my-main-module\");\n    //\n    //       // Declares which of the main module's exports are configured with durable storage, and\n    //       // thus should behave as Durable Object namsepace bindings.\n    //       durableNamespaces: \"MyDurableObject\" | \"AnotherDurableObject\";\n    //     }\n    //\n    // You can use `wrangler types` to generate `GlobalProps` automatically.\n    interface GlobalProps {}\n    // Evaluates to the type of a property in GlobalProps, defaulting to `Default` if it is not\n    // present.\n    type GlobalProp<K extends string, Default> = K extends keyof GlobalProps ? GlobalProps[K] : Default;\n    // The type of the program's main module exports, if known. Requires `GlobalProps` to declare the\n    // `mainModule` property.\n    type MainModule = GlobalProp<\"mainModule\", {}>;\n    // The type of ctx.exports, which contains loopback bindings for all top-level exports.\n    type Exports = {\n        [K in keyof MainModule]: LoopbackForExport<MainModule[K]> &\n            // If the export is listed in `durableNamespaces`, then it is also a\n            // DurableObjectNamespace.\n            (K extends GlobalProp<\"durableNamespaces\", never>\n                ? MainModule[K] extends new (...args: any[]) => infer DoInstance\n                    ? DoInstance extends Rpc.DurableObjectBranded\n                        ? DurableObjectNamespace<DoInstance>\n                        : DurableObjectNamespace<undefined>\n                    : DurableObjectNamespace<undefined>\n                : {});\n    };\n}\ndeclare namespace CloudflareWorkersModule {\n    export type RpcStub<T extends Rpc.Stubable> = Rpc.Stub<T>;\n    export const RpcStub: {\n        new <T extends Rpc.Stubable>(value: T): Rpc.Stub<T>;\n    };\n    export abstract class RpcTarget implements Rpc.RpcTargetBranded {\n        [Rpc.__RPC_TARGET_BRAND]: never;\n    }\n    // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC\n    export abstract class WorkerEntrypoint<Env = Cloudflare.Env, Props = {}> implements Rpc.WorkerEntrypointBranded {\n        [Rpc.__WORKER_ENTRYPOINT_BRAND]: never;\n        protected ctx: ExecutionContext<Props>;\n        protected env: Env;\n        constructor(ctx: ExecutionContext, env: Env);\n        email?(message: ForwardableEmailMessage): void | Promise<void>;\n        fetch?(request: Request): Response | Promise<Response>;\n        queue?(batch: MessageBatch<unknown>): void | Promise<void>;\n        scheduled?(controller: ScheduledController): void | Promise<void>;\n        tail?(events: TraceItem[]): void | Promise<void>;\n        tailStream?(\n            event: TailStream.TailEvent<TailStream.Onset>,\n        ): TailStream.TailEventHandlerType | Promise<TailStream.TailEventHandlerType>;\n        test?(controller: TestController): void | Promise<void>;\n        trace?(traces: TraceItem[]): void | Promise<void>;\n    }\n    export abstract class DurableObject<Env = Cloudflare.Env, Props = {}> implements Rpc.DurableObjectBranded {\n        [Rpc.__DURABLE_OBJECT_BRAND]: never;\n        protected ctx: DurableObjectState<Props>;\n        protected env: Env;\n        constructor(ctx: DurableObjectState, env: Env);\n        alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise<void>;\n        fetch?(request: Request): Response | Promise<Response>;\n        webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise<void>;\n        webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise<void>;\n        webSocketError?(ws: WebSocket, error: unknown): void | Promise<void>;\n    }\n    export type WorkflowDurationLabel = \"second\" | \"minute\" | \"hour\" | \"day\" | \"week\" | \"month\" | \"year\";\n    export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${\"s\" | \"\"}` | number;\n    export type WorkflowDelayDuration = WorkflowSleepDuration;\n    export type WorkflowTimeoutDuration = WorkflowSleepDuration;\n    export type WorkflowRetentionDuration = WorkflowSleepDuration;\n    export type WorkflowBackoff = \"constant\" | \"linear\" | \"exponential\";\n    export type WorkflowStepConfig = {\n        retries?: {\n            limit: number;\n            delay: WorkflowDelayDuration | number;\n            backoff?: WorkflowBackoff;\n        };\n        timeout?: WorkflowTimeoutDuration | number;\n    };\n    export type WorkflowEvent<T> = {\n        payload: Readonly<T>;\n        timestamp: Date;\n        instanceId: string;\n    };\n    export type WorkflowStepEvent<T> = {\n        payload: Readonly<T>;\n        timestamp: Date;\n        type: string;\n    };\n    export abstract class WorkflowStep {\n        do<T extends Rpc.Serializable<T>>(name: string, callback: () => Promise<T>): Promise<T>;\n        do<T extends Rpc.Serializable<T>>(\n            name: string,\n            config: WorkflowStepConfig,\n            callback: () => Promise<T>,\n        ): Promise<T>;\n        sleep: (name: string, duration: WorkflowSleepDuration) => Promise<void>;\n        sleepUntil: (name: string, timestamp: Date | number) => Promise<void>;\n        waitForEvent<T extends Rpc.Serializable<T>>(\n            name: string,\n            options: {\n                type: string;\n                timeout?: WorkflowTimeoutDuration | number;\n            },\n        ): Promise<WorkflowStepEvent<T>>;\n    }\n    export abstract class WorkflowEntrypoint<Env = unknown, T extends Rpc.Serializable<T> | unknown = unknown>\n        implements Rpc.WorkflowEntrypointBranded\n    {\n        [Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never;\n        protected ctx: ExecutionContext;\n        protected env: Env;\n        constructor(ctx: ExecutionContext, env: Env);\n        run(event: Readonly<WorkflowEvent<T>>, step: WorkflowStep): Promise<unknown>;\n    }\n    export function waitUntil(promise: Promise<unknown>): void;\n    export function withEnv(newEnv: unknown, fn: () => unknown): unknown;\n    export function withExports(newExports: unknown, fn: () => unknown): unknown;\n    export function withEnvAndExports(newEnv: unknown, newExports: unknown, fn: () => unknown): unknown;\n    export const env: Cloudflare.Env;\n    export const exports: Cloudflare.Exports;\n}\ndeclare module \"cloudflare:workers\" {\n    export = CloudflareWorkersModule;\n}\ninterface SecretsStoreSecret {\n    /**\n     * Get a secret from the Secrets Store, returning a string of the secret value\n     * if it exists, or throws an error if it does not exist\n     */\n    get(): Promise<string>;\n}\ndeclare module \"cloudflare:sockets\" {\n    function _connect(address: string | SocketAddress, options?: SocketOptions): Socket;\n    export { _connect as connect };\n}\ntype MarkdownDocument = {\n    name: string;\n    blob: Blob;\n};\ntype ConversionResponse =\n    | {\n          name: string;\n          mimeType: string;\n          format: \"markdown\";\n          tokens: number;\n          data: string;\n      }\n    | {\n          name: string;\n          mimeType: string;\n          format: \"error\";\n          error: string;\n      };\ntype ImageConversionOptions = {\n    descriptionLanguage?: \"en\" | \"es\" | \"fr\" | \"it\" | \"pt\" | \"de\";\n};\ntype EmbeddedImageConversionOptions = ImageConversionOptions & {\n    convert?: boolean;\n    maxConvertedImages?: number;\n};\ntype ConversionOptions = {\n    html?: {\n        images?: EmbeddedImageConversionOptions & {\n            convertOGImage?: boolean;\n        };\n    };\n    docx?: {\n        images?: EmbeddedImageConversionOptions;\n    };\n    image?: ImageConversionOptions;\n    pdf?: {\n        images?: EmbeddedImageConversionOptions;\n        metadata?: boolean;\n    };\n};\ntype ConversionRequestOptions = {\n    gateway?: GatewayOptions;\n    extraHeaders?: object;\n    conversionOptions?: ConversionOptions;\n};\ntype SupportedFileFormat = {\n    mimeType: string;\n    extension: string;\n};\ndeclare abstract class ToMarkdownService {\n    transform(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise<ConversionResponse[]>;\n    transform(files: MarkdownDocument, options?: ConversionRequestOptions): Promise<ConversionResponse>;\n    supported(): Promise<SupportedFileFormat[]>;\n}\ndeclare namespace TailStream {\n    interface Header {\n        readonly name: string;\n        readonly value: string;\n    }\n    interface FetchEventInfo {\n        readonly type: \"fetch\";\n        readonly method: string;\n        readonly url: string;\n        readonly cfJson?: object;\n        readonly headers: Header[];\n    }\n    interface JsRpcEventInfo {\n        readonly type: \"jsrpc\";\n    }\n    interface ScheduledEventInfo {\n        readonly type: \"scheduled\";\n        readonly scheduledTime: Date;\n        readonly cron: string;\n    }\n    interface AlarmEventInfo {\n        readonly type: \"alarm\";\n        readonly scheduledTime: Date;\n    }\n    interface QueueEventInfo {\n        readonly type: \"queue\";\n        readonly queueName: string;\n        readonly batchSize: number;\n    }\n    interface EmailEventInfo {\n        readonly type: \"email\";\n        readonly mailFrom: string;\n        readonly rcptTo: string;\n        readonly rawSize: number;\n    }\n    interface TraceEventInfo {\n        readonly type: \"trace\";\n        readonly traces: (string | null)[];\n    }\n    interface HibernatableWebSocketEventInfoMessage {\n        readonly type: \"message\";\n    }\n    interface HibernatableWebSocketEventInfoError {\n        readonly type: \"error\";\n    }\n    interface HibernatableWebSocketEventInfoClose {\n        readonly type: \"close\";\n        readonly code: number;\n        readonly wasClean: boolean;\n    }\n    interface HibernatableWebSocketEventInfo {\n        readonly type: \"hibernatableWebSocket\";\n        readonly info:\n            | HibernatableWebSocketEventInfoClose\n            | HibernatableWebSocketEventInfoError\n            | HibernatableWebSocketEventInfoMessage;\n    }\n    interface CustomEventInfo {\n        readonly type: \"custom\";\n    }\n    interface FetchResponseInfo {\n        readonly type: \"fetch\";\n        readonly statusCode: number;\n    }\n    type EventOutcome =\n        | \"ok\"\n        | \"canceled\"\n        | \"exception\"\n        | \"unknown\"\n        | \"killSwitch\"\n        | \"daemonDown\"\n        | \"exceededCpu\"\n        | \"exceededMemory\"\n        | \"loadShed\"\n        | \"responseStreamDisconnected\"\n        | \"scriptNotFound\";\n    interface ScriptVersion {\n        readonly id: string;\n        readonly tag?: string;\n        readonly message?: string;\n    }\n    interface Onset {\n        readonly type: \"onset\";\n        readonly attributes: Attribute[];\n        // id for the span being opened by this Onset event.\n        readonly spanId: string;\n        readonly dispatchNamespace?: string;\n        readonly entrypoint?: string;\n        readonly executionModel: string;\n        readonly scriptName?: string;\n        readonly scriptTags?: string[];\n        readonly scriptVersion?: ScriptVersion;\n        readonly info:\n            | FetchEventInfo\n            | JsRpcEventInfo\n            | ScheduledEventInfo\n            | AlarmEventInfo\n            | QueueEventInfo\n            | EmailEventInfo\n            | TraceEventInfo\n            | HibernatableWebSocketEventInfo\n            | CustomEventInfo;\n    }\n    interface Outcome {\n        readonly type: \"outcome\";\n        readonly outcome: EventOutcome;\n        readonly cpuTime: number;\n        readonly wallTime: number;\n    }\n    interface SpanOpen {\n        readonly type: \"spanOpen\";\n        readonly name: string;\n        // id for the span being opened by this SpanOpen event.\n        readonly spanId: string;\n        readonly info?: FetchEventInfo | JsRpcEventInfo | Attributes;\n    }\n    interface SpanClose {\n        readonly type: \"spanClose\";\n        readonly outcome: EventOutcome;\n    }\n    interface DiagnosticChannelEvent {\n        readonly type: \"diagnosticChannel\";\n        readonly channel: string;\n        readonly message: any;\n    }\n    interface Exception {\n        readonly type: \"exception\";\n        readonly name: string;\n        readonly message: string;\n        readonly stack?: string;\n    }\n    interface Log {\n        readonly type: \"log\";\n        readonly level: \"debug\" | \"error\" | \"info\" | \"log\" | \"warn\";\n        readonly message: object;\n    }\n    // This marks the worker handler return information.\n    // This is separate from Outcome because the worker invocation can live for a long time after\n    // returning. For example - Websockets that return an http upgrade response but then continue\n    // streaming information or SSE http connections.\n    interface Return {\n        readonly type: \"return\";\n        readonly info?: FetchResponseInfo;\n    }\n    interface Attribute {\n        readonly name: string;\n        readonly value: string | string[] | boolean | boolean[] | number | number[] | bigint | bigint[];\n    }\n    interface Attributes {\n        readonly type: \"attributes\";\n        readonly info: Attribute[];\n    }\n    type EventType =\n        | Onset\n        | Outcome\n        | SpanOpen\n        | SpanClose\n        | DiagnosticChannelEvent\n        | Exception\n        | Log\n        | Return\n        | Attributes;\n    // Context in which this trace event lives.\n    interface SpanContext {\n        // Single id for the entire top-level invocation\n        // This should be a new traceId for the first worker stage invoked in the eyeball request and then\n        // same-account service-bindings should reuse the same traceId but cross-account service-bindings\n        // should use a new traceId.\n        readonly traceId: string;\n        // spanId in which this event is handled\n        // for Onset and SpanOpen events this would be the parent span id\n        // for Outcome and SpanClose these this would be the span id of the opening Onset and SpanOpen events\n        // For Hibernate and Mark this would be the span under which they were emitted.\n        // spanId is not set ONLY if:\n        //  1. This is an Onset event\n        //  2. We are not inherting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation)\n        readonly spanId?: string;\n    }\n    interface TailEvent<Event extends EventType> {\n        // invocation id of the currently invoked worker stage.\n        // invocation id will always be unique to every Onset event and will be the same until the Outcome event.\n        readonly invocationId: string;\n        // Inherited spanContext for this event.\n        readonly spanContext: SpanContext;\n        readonly timestamp: Date;\n        readonly sequence: number;\n        readonly event: Event;\n    }\n    type TailEventHandler<Event extends EventType = EventType> = (event: TailEvent<Event>) => void | Promise<void>;\n    type TailEventHandlerObject = {\n        outcome?: TailEventHandler<Outcome>;\n        spanOpen?: TailEventHandler<SpanOpen>;\n        spanClose?: TailEventHandler<SpanClose>;\n        diagnosticChannel?: TailEventHandler<DiagnosticChannelEvent>;\n        exception?: TailEventHandler<Exception>;\n        log?: TailEventHandler<Log>;\n        return?: TailEventHandler<Return>;\n        attributes?: TailEventHandler<Attributes>;\n    };\n    type TailEventHandlerType = TailEventHandler | TailEventHandlerObject;\n}\n// Copyright (c) 2022-2023 Cloudflare, Inc.\n// Licensed under the Apache 2.0 license found in the LICENSE file or at:\n//     https://opensource.org/licenses/Apache-2.0\n/**\n * Data types supported for holding vector metadata.\n */\ntype VectorizeVectorMetadataValue = string | number | boolean | string[];\n/**\n * Additional information to associate with a vector.\n */\ntype VectorizeVectorMetadata = VectorizeVectorMetadataValue | Record<string, VectorizeVectorMetadataValue>;\ntype VectorFloatArray = Float32Array | Float64Array;\ninterface VectorizeError {\n    code?: number;\n    error: string;\n}\n/**\n * Comparison logic/operation to use for metadata filtering.\n *\n * This list is expected to grow as support for more operations are released.\n */\ntype VectorizeVectorMetadataFilterOp = \"$eq\" | \"$ne\" | \"$lt\" | \"$lte\" | \"$gt\" | \"$gte\";\ntype VectorizeVectorMetadataFilterCollectionOp = \"$in\" | \"$nin\";\n/**\n * Filter criteria for vector metadata used to limit the retrieved query result set.\n */\ntype VectorizeVectorMetadataFilter = {\n    [field: string]:\n        | Exclude<VectorizeVectorMetadataValue, string[]>\n        | null\n        | {\n              [Op in VectorizeVectorMetadataFilterOp]?: Exclude<VectorizeVectorMetadataValue, string[]> | null;\n          }\n        | {\n              [Op in VectorizeVectorMetadataFilterCollectionOp]?: Exclude<VectorizeVectorMetadataValue, string[]>[];\n          };\n};\n/**\n * Supported distance metrics for an index.\n * Distance metrics determine how other \"similar\" vectors are determined.\n */\ntype VectorizeDistanceMetric = \"euclidean\" | \"cosine\" | \"dot-product\";\n/**\n * Metadata return levels for a Vectorize query.\n *\n * Default to \"none\".\n *\n * @property all      Full metadata for the vector return set, including all fields (including those un-indexed) without truncation. This is a more expensive retrieval, as it requires additional fetching & reading of un-indexed data.\n * @property indexed  Return all metadata fields configured for indexing in the vector return set. This level of retrieval is \"free\" in that no additional overhead is incurred returning this data. However, note that indexed metadata is subject to truncation (especially for larger strings).\n * @property none     No indexed metadata will be returned.\n */\ntype VectorizeMetadataRetrievalLevel = \"all\" | \"indexed\" | \"none\";\ninterface VectorizeQueryOptions {\n    topK?: number;\n    namespace?: string;\n    returnValues?: boolean;\n    returnMetadata?: boolean | VectorizeMetadataRetrievalLevel;\n    filter?: VectorizeVectorMetadataFilter;\n}\n/**\n * Information about the configuration of an index.\n */\ntype VectorizeIndexConfig =\n    | {\n          dimensions: number;\n          metric: VectorizeDistanceMetric;\n      }\n    | {\n          preset: string; // keep this generic, as we'll be adding more presets in the future and this is only in a read capacity\n      };\n/**\n * Metadata about an existing index.\n *\n * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released.\n * See {@link VectorizeIndexInfo} for its post-beta equivalent.\n */\ninterface VectorizeIndexDetails {\n    /** The unique ID of the index */\n    readonly id: string;\n    /** The name of the index. */\n    name: string;\n    /** (optional) A human readable description for the index. */\n    description?: string;\n    /** The index configuration, including the dimension size and distance metric. */\n    config: VectorizeIndexConfig;\n    /** The number of records containing vectors within the index. */\n    vectorsCount: number;\n}\n/**\n * Metadata about an existing index.\n */\ninterface VectorizeIndexInfo {\n    /** The number of records containing vectors within the index. */\n    vectorCount: number;\n    /** Number of dimensions the index has been configured for. */\n    dimensions: number;\n    /** ISO 8601 datetime of the last processed mutation on in the index. All changes before this mutation will be reflected in the index state. */\n    processedUpToDatetime: number;\n    /** UUIDv4 of the last mutation processed by the index. All changes before this mutation will be reflected in the index state. */\n    processedUpToMutation: number;\n}\n/**\n * Represents a single vector value set along with its associated metadata.\n */\ninterface VectorizeVector {\n    /** The ID for the vector. This can be user-defined, and must be unique. It should uniquely identify the object, and is best set based on the ID of what the vector represents. */\n    id: string;\n    /** The vector values */\n    values: VectorFloatArray | number[];\n    /** The namespace this vector belongs to. */\n    namespace?: string;\n    /** Metadata associated with the vector. Includes the values of other fields and potentially additional details. */\n    metadata?: Record<string, VectorizeVectorMetadata>;\n}\n/**\n * Represents a matched vector for a query along with its score and (if specified) the matching vector information.\n */\ntype VectorizeMatch = Pick<Partial<VectorizeVector>, \"values\"> &\n    Omit<VectorizeVector, \"values\"> & {\n        /** The score or rank for similarity, when returned as a result */\n        score: number;\n    };\n/**\n * A set of matching {@link VectorizeMatch} for a particular query.\n */\ninterface VectorizeMatches {\n    matches: VectorizeMatch[];\n    count: number;\n}\n/**\n * Results of an operation that performed a mutation on a set of vectors.\n * Here, `ids` is a list of vectors that were successfully processed.\n *\n * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released.\n * See {@link VectorizeAsyncMutation} for its post-beta equivalent.\n */\ninterface VectorizeVectorMutation {\n    /* List of ids of vectors that were successfully processed. */\n    ids: string[];\n    /* Total count of the number of processed vectors. */\n    count: number;\n}\n/**\n * Result type indicating a mutation on the Vectorize Index.\n * Actual mutations are processed async where the `mutationId` is the unique identifier for the operation.\n */\ninterface VectorizeAsyncMutation {\n    /** The unique identifier for the async mutation operation containing the changeset. */\n    mutationId: string;\n}\n/**\n * A Vectorize Vector Search Index for querying vectors/embeddings.\n *\n * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released.\n * See {@link Vectorize} for its new implementation.\n */\ndeclare abstract class VectorizeIndex {\n    /**\n     * Get information about the currently bound index.\n     * @returns A promise that resolves with information about the current index.\n     */\n    public describe(): Promise<VectorizeIndexDetails>;\n    /**\n     * Use the provided vector to perform a similarity search across the index.\n     * @param vector Input vector that will be used to drive the similarity search.\n     * @param options Configuration options to massage the returned data.\n     * @returns A promise that resolves with matched and scored vectors.\n     */\n    public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise<VectorizeMatches>;\n    /**\n     * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown.\n     * @param vectors List of vectors that will be inserted.\n     * @returns A promise that resolves with the ids & count of records that were successfully processed.\n     */\n    public insert(vectors: VectorizeVector[]): Promise<VectorizeVectorMutation>;\n    /**\n     * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values.\n     * @param vectors List of vectors that will be upserted.\n     * @returns A promise that resolves with the ids & count of records that were successfully processed.\n     */\n    public upsert(vectors: VectorizeVector[]): Promise<VectorizeVectorMutation>;\n    /**\n     * Delete a list of vectors with a matching id.\n     * @param ids List of vector ids that should be deleted.\n     * @returns A promise that resolves with the ids & count of records that were successfully processed (and thus deleted).\n     */\n    public deleteByIds(ids: string[]): Promise<VectorizeVectorMutation>;\n    /**\n     * Get a list of vectors with a matching id.\n     * @param ids List of vector ids that should be returned.\n     * @returns A promise that resolves with the raw unscored vectors matching the id set.\n     */\n    public getByIds(ids: string[]): Promise<VectorizeVector[]>;\n}\n/**\n * A Vectorize Vector Search Index for querying vectors/embeddings.\n *\n * Mutations in this version are async, returning a mutation id.\n */\ndeclare abstract class Vectorize {\n    /**\n     * Get information about the currently bound index.\n     * @returns A promise that resolves with information about the current index.\n     */\n    public describe(): Promise<VectorizeIndexInfo>;\n    /**\n     * Use the provided vector to perform a similarity search across the index.\n     * @param vector Input vector that will be used to drive the similarity search.\n     * @param options Configuration options to massage the returned data.\n     * @returns A promise that resolves with matched and scored vectors.\n     */\n    public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise<VectorizeMatches>;\n    /**\n     * Use the provided vector-id to perform a similarity search across the index.\n     * @param vectorId Id for a vector in the index against which the index should be queried.\n     * @param options Configuration options to massage the returned data.\n     * @returns A promise that resolves with matched and scored vectors.\n     */\n    public queryById(vectorId: string, options?: VectorizeQueryOptions): Promise<VectorizeMatches>;\n    /**\n     * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown.\n     * @param vectors List of vectors that will be inserted.\n     * @returns A promise that resolves with a unique identifier of a mutation containing the insert changeset.\n     */\n    public insert(vectors: VectorizeVector[]): Promise<VectorizeAsyncMutation>;\n    /**\n     * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values.\n     * @param vectors List of vectors that will be upserted.\n     * @returns A promise that resolves with a unique identifier of a mutation containing the upsert changeset.\n     */\n    public upsert(vectors: VectorizeVector[]): Promise<VectorizeAsyncMutation>;\n    /**\n     * Delete a list of vectors with a matching id.\n     * @param ids List of vector ids that should be deleted.\n     * @returns A promise that resolves with a unique identifier of a mutation containing the delete changeset.\n     */\n    public deleteByIds(ids: string[]): Promise<VectorizeAsyncMutation>;\n    /**\n     * Get a list of vectors with a matching id.\n     * @param ids List of vector ids that should be returned.\n     * @returns A promise that resolves with the raw unscored vectors matching the id set.\n     */\n    public getByIds(ids: string[]): Promise<VectorizeVector[]>;\n}\n/**\n * The interface for \"version_metadata\" binding\n * providing metadata about the Worker Version using this binding.\n */\ntype WorkerVersionMetadata = {\n    /** The ID of the Worker Version using this binding */\n    id: string;\n    /** The tag of the Worker Version using this binding */\n    tag: string;\n    /** The timestamp of when the Worker Version was uploaded */\n    timestamp: string;\n};\ninterface DynamicDispatchLimits {\n    /**\n     * Limit CPU time in milliseconds.\n     */\n    cpuMs?: number;\n    /**\n     * Limit number of subrequests.\n     */\n    subRequests?: number;\n}\ninterface DynamicDispatchOptions {\n    /**\n     * Limit resources of invoked Worker script.\n     */\n    limits?: DynamicDispatchLimits;\n    /**\n     * Arguments for outbound Worker script, if configured.\n     */\n    outbound?: {\n        [key: string]: any;\n    };\n}\ninterface DispatchNamespace {\n    /**\n     * @param name Name of the Worker script.\n     * @param args Arguments to Worker script.\n     * @param options Options for Dynamic Dispatch invocation.\n     * @returns A Fetcher object that allows you to send requests to the Worker script.\n     * @throws If the Worker script does not exist in this dispatch namespace, an error will be thrown.\n     */\n    get(\n        name: string,\n        args?: {\n            [key: string]: any;\n        },\n        options?: DynamicDispatchOptions,\n    ): Fetcher;\n}\ndeclare module \"cloudflare:workflows\" {\n    /**\n     * NonRetryableError allows for a user to throw a fatal error\n     * that makes a Workflow instance fail immediately without triggering a retry\n     */\n    export class NonRetryableError extends Error {\n        public constructor(message: string, name?: string);\n    }\n}\ndeclare abstract class Workflow<PARAMS = unknown> {\n    /**\n     * Get a handle to an existing instance of the Workflow.\n     * @param id Id for the instance of this Workflow\n     * @returns A promise that resolves with a handle for the Instance\n     */\n    public get(id: string): Promise<WorkflowInstance>;\n    /**\n     * Create a new instance and return a handle to it. If a provided id exists, an error will be thrown.\n     * @param options Options when creating an instance including id and params\n     * @returns A promise that resolves with a handle for the Instance\n     */\n    public create(options?: WorkflowInstanceCreateOptions<PARAMS>): Promise<WorkflowInstance>;\n    /**\n     * Create a batch of instances and return handle for all of them. If a provided id exists, an error will be thrown.\n     * `createBatch` is limited at 100 instances at a time or when the RPC limit for the batch (1MiB) is reached.\n     * @param batch List of Options when creating an instance including name and params\n     * @returns A promise that resolves with a list of handles for the created instances.\n     */\n    public createBatch(batch: WorkflowInstanceCreateOptions<PARAMS>[]): Promise<WorkflowInstance[]>;\n}\ntype WorkflowDurationLabel = \"second\" | \"minute\" | \"hour\" | \"day\" | \"week\" | \"month\" | \"year\";\ntype WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${\"s\" | \"\"}` | number;\ntype WorkflowRetentionDuration = WorkflowSleepDuration;\ninterface WorkflowInstanceCreateOptions<PARAMS = unknown> {\n    /**\n     * An id for your Workflow instance. Must be unique within the Workflow.\n     */\n    id?: string;\n    /**\n     * The event payload the Workflow instance is triggered with\n     */\n    params?: PARAMS;\n    /**\n     * The retention policy for Workflow instance.\n     * Defaults to the maximum retention period available for the owner's account.\n     */\n    retention?: {\n        successRetention?: WorkflowRetentionDuration;\n        errorRetention?: WorkflowRetentionDuration;\n    };\n}\ntype InstanceStatus = {\n    status:\n        | \"queued\" // means that instance is waiting to be started (see concurrency limits)\n        | \"running\"\n        | \"paused\"\n        | \"errored\"\n        | \"terminated\" // user terminated the instance while it was running\n        | \"complete\"\n        | \"waiting\" // instance is hibernating and waiting for sleep or event to finish\n        | \"waitingForPause\" // instance is finishing the current work to pause\n        | \"unknown\";\n    error?: {\n        name: string;\n        message: string;\n    };\n    output?: unknown;\n};\ninterface WorkflowError {\n    code?: number;\n    message: string;\n}\ndeclare abstract class WorkflowInstance {\n    public id: string;\n    /**\n     * Pause the instance.\n     */\n    public pause(): Promise<void>;\n    /**\n     * Resume the instance. If it is already running, an error will be thrown.\n     */\n    public resume(): Promise<void>;\n    /**\n     * Terminate the instance. If it is errored, terminated or complete, an error will be thrown.\n     */\n    public terminate(): Promise<void>;\n    /**\n     * Restart the instance.\n     */\n    public restart(): Promise<void>;\n    /**\n     * Returns the current status of the instance.\n     */\n    public status(): Promise<InstanceStatus>;\n    /**\n     * Send an event to this instance.\n     */\n    public sendEvent({ type, payload }: { type: string; payload: unknown }): Promise<void>;\n}\n"
  },
  {
    "path": "frontend/worker/wrangler.toml",
    "content": "name = \"detypify\"\nmain = \"src/index.ts\"\ncompatibility_date = \"2026-01-13\"\n\n[[d1_databases]]\nbinding = \"DB\"\ndatabase_name = \"detypify\"\ndatabase_id = \"440614c0-0265-4051-ae67-cec65b5b5c00\"\n"
  },
  {
    "path": "package.json",
    "content": "{\n    \"name\": \"detypify-frontend\",\n    \"scripts\": {\n        \"format\": \"prettier --write .\"\n    },\n    \"devDependencies\": {\n        \"@trivago/prettier-plugin-sort-imports\": \"^6.0.2\",\n        \"prettier\": \"^3.8.2\",\n        \"prettier-plugin-svelte\": \"^3.5.1\",\n        \"prettier-plugin-tailwindcss\": \"^0.7.2\"\n    },\n    \"prettier\": {\n        \"tabWidth\": 4,\n        \"printWidth\": 120,\n        \"tailwindStylesheet\": \"frontend/ui/src/app.css\",\n        \"plugins\": [\n            \"prettier-plugin-svelte\",\n            \"prettier-plugin-tailwindcss\",\n            \"@trivago/prettier-plugin-sort-imports\"\n        ],\n        \"overrides\": [\n            {\n                \"files\": \"*.svelte\",\n                \"options\": {\n                    \"parser\": \"svelte\"\n                }\n            }\n        ]\n    },\n    \"pnpm\": {\n        \"peerDependencyRules\": {\n            \"allowedVersions\": {\n                \"vite\": \"8.x\"\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "pnpm-workspace.yaml",
    "content": "packages:\n  - 'frontend/*'\n"
  },
  {
    "path": "prek.toml",
    "content": "[[repos]]\nrepo = \"builtin\"\nhooks = [\n    { id = \"trailing-whitespace\" },\n    { id = \"end-of-file-fixer\" },\n    { id = \"mixed-line-ending\" },\n    { id = \"check-symlinks\" },\n]\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"detypify-model\"\nversion = \"0.3.0\"\nrequires-python = \">=3.13, <3.14\"\nauthors = [\n  { name = \"QuarticCat\", email = \"QuarticCat@pm.me\" },\n  { name = \"Cloud0310\", email = \"gh@touchingfish.day\" },\n]\ndependencies = [\n  \"datasets>=4.5.0\",\n  \"lightning>=2.6.0\",\n  \"msgspec>=0.20.0\",\n  \"onnx>=1.20.0\",\n  \"onnxscript>=0.5.7\",\n  \"opencv-python-headless>=4.13.0.90\",\n  \"tensorrt>=10.13.3.9.post1\",\n  \"timm>=1.0.22\",\n  \"torch\",\n  \"torch-tensorrt>=2.9.0\",\n  \"torchvision\",\n  \"typer>=0.21.1\",\n]\n\n[project.optional-dependencies]\ndata = [\n  \"lxml>=6.0.2\",\n  \"lxml-stubs>=0.5.1\",\n  \"beautifulsoup4>=4.14.3\",\n  \"polars>=1.34.0\",\n  \"huggingface-hub>=1.3.3\",\n]\n\n[dependency-groups]\ndev = [\n  # data wrangler\n  \"ipykernel\",\n  # ort export\n  \"matplotlib>=3.10.8\",\n  \"onnxruntime>=1.23.2\",\n  # lsp\n  \"pyrefly\",\n  # formatter and linter\n  \"ruff\",\n  # training logs\n  \"setuptools==81.0\",\n  \"tensorboard>=2.20.0\",\n  \"tensorboardx>=2.6.4\",\n  \"torch-tb-profiler>=0.4.3\",\n  # profiler\n  \"viztracer>=1.1.1\",\n  # lsp, type checker\n  \"zuban\",\n]\n\n[tool.pyrefly]\nsearch-path = [\"./python\"]\n\n# linter and formatter, line length 88 as black default\n[tool.ruff]\nline-length = 120\nexclude = [\n  \".git\",\n  \".mypy_cache\",\n  \".ruff_cache\",\n  \".venv\",\n  \"dist\",\n  \"python/review_contrib.py\"\n]\n\n[tool.ruff.format]\ndocstring-code-line-length = 80\n\n# default set and isort\n[tool.ruff.lint]\npreview = true\nselect = [\n  \"A001\",\n  \"A002\",\n  \"A003\",\n  \"ARG001\",\n  \"ARG001\",\n  \"ARG002\",\n  \"ARG003\",\n  \"ARG004\",\n  \"ARG005\",\n  \"ASYNC100\",\n  \"B002\",\n  \"B003\",\n  \"B004\",\n  \"B005\",\n  \"B006\",\n  \"B007\",\n  \"B009\",\n  \"B010\",\n  \"B011\",\n  \"B012\",\n  \"B013\",\n  \"B014\",\n  \"B015\",\n  \"B016\",\n  \"B017\",\n  \"B018\",\n  \"B019\",\n  \"B020\",\n  \"B021\",\n  \"B022\",\n  \"B023\",\n  \"B024\",\n  \"B025\",\n  \"B026\",\n  \"B028\",\n  \"B029\",\n  \"B030\",\n  \"B031\",\n  \"B032\",\n  \"B033\",\n  \"B034\",\n  \"B035\",\n  \"B904\",\n  \"B905\",\n  \"B909\",\n  \"BLE001\",\n  \"C400\",\n  \"C401\",\n  \"C402\",\n  \"C403\",\n  \"C404\",\n  \"C405\",\n  \"C406\",\n  \"C408\",\n  \"C409\",\n  \"C410\",\n  \"C411\",\n  \"C413\",\n  \"C414\",\n  \"C415\",\n  \"C416\",\n  \"C417\",\n  \"C418\",\n  \"C419\",\n  \"COM818\",\n  \"DTZ001\",\n  \"DTZ002\",\n  \"DTZ003\",\n  \"DTZ004\",\n  \"DTZ005\",\n  \"DTZ006\",\n  \"DTZ007\",\n  \"DTZ011\",\n  \"DTZ012\",\n  \"E101\",\n  \"E112\",\n  \"E113\",\n  \"E115\",\n  \"E116\",\n  \"E201\",\n  \"E202\",\n  \"E203\",\n  \"E211\",\n  \"E221\",\n  \"E222\",\n  \"E223\",\n  \"E224\",\n  \"E225\",\n  \"E226\",\n  \"E227\",\n  \"E228\",\n  \"E231\",\n  \"E241\",\n  \"E242\",\n  \"E251\",\n  \"E252\",\n  \"E261\",\n  \"E262\",\n  \"E265\",\n  \"E266\",\n  \"E271\",\n  \"E272\",\n  \"E273\",\n  \"E274\",\n  \"E275\",\n  \"E401\",\n  \"E402\",\n  \"E502\",\n  \"E701\",\n  \"E702\",\n  \"E703\",\n  \"E711\",\n  \"E712\",\n  \"E713\",\n  \"E714\",\n  \"E721\",\n  \"E722\",\n  \"E731\",\n  \"E741\",\n  \"E742\",\n  \"E743\",\n  \"E902\",\n  \"EM101\",\n  \"EM102\",\n  \"EM103\",\n  \"EXE001\",\n  \"EXE002\",\n  \"EXE003\",\n  \"EXE004\",\n  \"EXE005\",\n  \"F401\",\n  \"F402\",\n  \"F403\",\n  \"F404\",\n  \"F405\",\n  \"F406\",\n  \"F407\",\n  \"F501\",\n  \"F502\",\n  \"F503\",\n  \"F504\",\n  \"F505\",\n  \"F506\",\n  \"F507\",\n  \"F508\",\n  \"F509\",\n  \"F521\",\n  \"F522\",\n  \"F523\",\n  \"F524\",\n  \"F525\",\n  \"F541\",\n  \"F601\",\n  \"F602\",\n  \"F621\",\n  \"F622\",\n  \"F631\",\n  \"F632\",\n  \"F633\",\n  \"F634\",\n  \"F701\",\n  \"F702\",\n  \"F704\",\n  \"F706\",\n  \"F707\",\n  \"F722\",\n  \"F811\",\n  \"F821\",\n  \"F822\",\n  \"F823\",\n  \"F841\",\n  \"F842\",\n  \"F901\",\n  \"FA100\",\n  \"FA102\",\n  \"FBT002\",\n  \"FLY002\",\n  \"FURB105\",\n  \"FURB110\",\n  \"FURB113\",\n  \"FURB116\",\n  \"FURB118\",\n  \"FURB129\",\n  \"FURB131\",\n  \"FURB132\",\n  \"FURB136\",\n  \"FURB142\",\n  \"FURB145\",\n  \"FURB148\",\n  \"FURB152\",\n  \"FURB157\",\n  \"FURB161\",\n  \"FURB163\",\n  \"FURB164\",\n  \"FURB166\",\n  \"FURB167\",\n  \"FURB168\",\n  \"FURB169\",\n  \"FURB171\",\n  \"FURB177\",\n  \"FURB180\",\n  \"FURB181\",\n  \"FURB187\",\n  \"FURB192\",\n  \"G001\",\n  \"G002\",\n  \"G003\",\n  \"G010\",\n  \"G101\",\n  \"G201\",\n  \"G202\",\n  \"I001\",\n  \"I002\",\n  \"ICN001\",\n  \"ICN002\",\n  \"ICN003\",\n  \"INP001\",\n  \"INT001\",\n  \"INT002\",\n  \"INT003\",\n  \"ISC003\",\n  \"LOG001\",\n  \"LOG002\",\n  \"LOG007\",\n  \"LOG009\",\n  \"N801\",\n  \"N802\",\n  \"N803\",\n  \"N804\",\n  \"N805\",\n  \"N806\",\n  \"N807\",\n  \"N811\",\n  \"N812\",\n  \"N813\",\n  \"N814\",\n  \"N815\",\n  \"N816\",\n  \"N817\",\n  \"N818\",\n  \"N999\",\n  \"PERF101\",\n  \"PERF102\",\n  \"PERF401\",\n  \"PERF402\",\n  \"PERF403\",\n  \"PGH005\",\n  \"PIE790\",\n  \"PIE794\",\n  \"PIE796\",\n  \"PIE800\",\n  \"PIE804\",\n  \"PIE807\",\n  \"PIE808\",\n  \"PIE810\",\n  \"PLC0105\",\n  \"PLC0131\",\n  \"PLC0132\",\n  \"PLC0205\",\n  \"PLC0208\",\n  \"PLC0414\",\n  \"PLC1901\",\n  \"PLC2401\",\n  \"PLC2403\",\n  \"PLC2701\",\n  \"PLC2801\",\n  \"PLC3002\",\n  \"PLE0100\",\n  \"PLE0101\",\n  \"PLE0115\",\n  \"PLE0116\",\n  \"PLE0117\",\n  \"PLE0118\",\n  \"PLE0237\",\n  \"PLE0241\",\n  \"PLE0302\",\n  \"PLE0303\",\n  \"PLE0304\",\n  \"PLE0305\",\n  \"PLE0307\",\n  \"PLE0308\",\n  \"PLE0309\",\n  \"PLE0604\",\n  \"PLE0605\",\n  \"PLE0643\",\n  \"PLE0704\",\n  \"PLE1132\",\n  \"PLE1141\",\n  \"PLE1142\",\n  \"PLE1205\",\n  \"PLE1206\",\n  \"PLE1300\",\n  \"PLE1307\",\n  \"PLE1310\",\n  \"PLE1507\",\n  \"PLE1519\",\n  \"PLE1520\",\n  \"PLE1700\",\n  \"PLE2502\",\n  \"PLE2510\",\n  \"PLE2512\",\n  \"PLE2513\",\n  \"PLE2514\",\n  \"PLE2515\",\n  \"PLE4703\",\n  \"PLR0124\",\n  \"PLR0133\",\n  \"PLR0202\",\n  \"PLR0203\",\n  \"PLR0206\",\n  \"PLR0402\",\n  \"PLR1704\",\n  \"PLR1711\",\n  \"PLR1714\",\n  \"PLR1722\",\n  \"PLR1730\",\n  \"PLR1733\",\n  \"PLR1736\",\n  \"PLR2004\",\n  \"PLR2044\",\n  \"PLR5501\",\n  \"PLR6104\",\n  \"PLR6201\",\n  \"PLR6301\",\n  \"PLW0108\",\n  \"PLW0120\",\n  \"PLW0127\",\n  \"PLW0128\",\n  \"PLW0129\",\n  \"PLW0131\",\n  \"PLW0133\",\n  \"PLW0177\",\n  \"PLW0211\",\n  \"PLW0245\",\n  \"PLW0406\",\n  \"PLW0602\",\n  \"PLW0603\",\n  \"PLW0604\",\n  \"PLW0642\",\n  \"PLW0711\",\n  \"PLW1501\",\n  \"PLW1508\",\n  \"PLW1509\",\n  \"PLW1510\",\n  \"PLW1514\",\n  \"PLW1641\",\n  \"PLW2101\",\n  \"PLW2901\",\n  \"PLW3201\",\n  \"PLW3301\",\n  \"PT001\",\n  \"PT002\",\n  \"PT003\",\n  \"PT006\",\n  \"PT007\",\n  \"PT008\",\n  \"PT009\",\n  \"PT010\",\n  \"PT011\",\n  \"PT012\",\n  \"PT013\",\n  \"PT014\",\n  \"PT015\",\n  \"PT016\",\n  \"PT017\",\n  \"PT018\",\n  \"PT019\",\n  \"PT020\",\n  \"PT021\",\n  \"PT022\",\n  \"PT023\",\n  \"PT024\",\n  \"PT025\",\n  \"PT026\",\n  \"PT027\",\n  \"PYI001\",\n  \"PYI002\",\n  \"PYI003\",\n  \"PYI004\",\n  \"PYI005\",\n  \"PYI006\",\n  \"PYI007\",\n  \"PYI008\",\n  \"PYI009\",\n  \"PYI010\",\n  \"PYI011\",\n  \"PYI012\",\n  \"PYI013\",\n  \"PYI014\",\n  \"PYI015\",\n  \"PYI016\",\n  \"PYI017\",\n  \"PYI018\",\n  \"PYI019\",\n  \"PYI020\",\n  \"PYI021\",\n  \"PYI024\",\n  \"PYI025\",\n  \"PYI026\",\n  \"PYI029\",\n  \"PYI030\",\n  \"PYI032\",\n  \"PYI033\",\n  \"PYI034\",\n  \"PYI035\",\n  \"PYI036\",\n  \"PYI041\",\n  \"PYI042\",\n  \"PYI043\",\n  \"PYI044\",\n  \"PYI045\",\n  \"PYI046\",\n  \"PYI047\",\n  \"PYI048\",\n  \"PYI049\",\n  \"PYI050\",\n  \"PYI051\",\n  \"PYI052\",\n  \"PYI053\",\n  \"PYI054\",\n  \"PYI055\",\n  \"PYI056\",\n  \"PYI058\",\n  \"PYI059\",\n  \"PYI062\",\n  \"RET503\",\n  \"RET504\",\n  \"RET505\",\n  \"RET506\",\n  \"RET507\",\n  \"RET508\",\n  \"RSE102\",\n  \"RUF001\",\n  \"RUF002\",\n  \"RUF003\",\n  \"RUF005\",\n  \"RUF006\",\n  \"RUF007\",\n  \"RUF008\",\n  \"RUF009\",\n  \"RUF010\",\n  \"RUF012\",\n  \"RUF013\",\n  \"RUF015\",\n  \"RUF016\",\n  \"RUF017\",\n  \"RUF018\",\n  \"RUF019\",\n  \"RUF020\",\n  \"RUF021\",\n  \"RUF022\",\n  \"RUF023\",\n  \"RUF024\",\n  \"RUF026\",\n  \"RUF027\",\n  \"RUF028\",\n  \"RUF029\",\n  \"RUF100\",\n  \"RUF101\",\n  \"S101\",\n  \"S102\",\n  \"S103\",\n  \"S104\",\n  \"S105\",\n  \"S106\",\n  \"S107\",\n  \"S108\",\n  \"S110\",\n  \"S112\",\n  \"S113\",\n  \"S201\",\n  \"S202\",\n  \"S301\",\n  \"S302\",\n  \"S303\",\n  \"S304\",\n  \"S305\",\n  \"S306\",\n  \"S307\",\n  \"S308\",\n  \"S310\",\n  \"S311\",\n  \"S312\",\n  \"S313\",\n  \"S314\",\n  \"S315\",\n  \"S316\",\n  \"S317\",\n  \"S318\",\n  \"S319\",\n  \"S321\",\n  \"S323\",\n  \"S324\",\n  \"S401\",\n  \"S402\",\n  \"S403\",\n  \"S405\",\n  \"S406\",\n  \"S407\",\n  \"S408\",\n  \"S409\",\n  \"S411\",\n  \"S412\",\n  \"S413\",\n  \"S415\",\n  \"S501\",\n  \"S502\",\n  \"S503\",\n  \"S504\",\n  \"S505\",\n  \"S506\",\n  \"S507\",\n  \"S508\",\n  \"S509\",\n  \"S601\",\n  \"S602\",\n  \"S604\",\n  \"S605\",\n  \"S606\",\n  \"S607\",\n  \"S608\",\n  \"S609\",\n  \"S610\",\n  \"S611\",\n  \"S612\",\n  \"S701\",\n  \"S702\",\n  \"SIM101\",\n  \"SIM102\",\n  \"SIM103\",\n  \"SIM105\",\n  \"SIM107\",\n  \"SIM108\",\n  \"SIM109\",\n  \"SIM110\",\n  \"SIM112\",\n  \"SIM113\",\n  \"SIM114\",\n  \"SIM115\",\n  \"SIM116\",\n  \"SIM117\",\n  \"SIM118\",\n  \"SIM201\",\n  \"SIM202\",\n  \"SIM208\",\n  \"SIM210\",\n  \"SIM211\",\n  \"SIM212\",\n  \"SIM220\",\n  \"SIM221\",\n  \"SIM222\",\n  \"SIM223\",\n  \"SIM300\",\n  \"SIM910\",\n  \"SIM911\",\n  \"SLF001\",\n  \"SLOT000\",\n  \"SLOT001\",\n  \"SLOT002\",\n  \"T100\",\n  \"T201\",\n  \"T203\",\n  \"TC001\",\n  \"TC002\",\n  \"TC003\",\n  \"TC004\",\n  \"TC005\",\n  \"TC010\",\n  \"TD004\",\n  \"TD005\",\n  \"TD006\",\n  \"TD007\",\n  \"TID251\",\n  \"TID252\",\n  \"TID253\",\n  \"TRY002\",\n  \"TRY003\",\n  \"TRY004\",\n  \"TRY201\",\n  \"TRY203\",\n  \"TRY300\",\n  \"TRY301\",\n  \"TRY400\",\n  \"TRY401\",\n  \"UP001\",\n  \"UP003\",\n  \"UP004\",\n  \"UP005\",\n  \"UP006\",\n  \"UP007\",\n  \"UP008\",\n  \"UP009\",\n  \"UP010\",\n  \"UP011\",\n  \"UP012\",\n  \"UP013\",\n  \"UP014\",\n  \"UP015\",\n  \"UP017\",\n  \"UP018\",\n  \"UP019\",\n  \"UP020\",\n  \"UP021\",\n  \"UP022\",\n  \"UP023\",\n  \"UP024\",\n  \"UP025\",\n  \"UP026\",\n  \"UP028\",\n  \"UP029\",\n  \"UP030\",\n  \"UP031\",\n  \"UP032\",\n  \"UP033\",\n  \"UP034\",\n  \"UP035\",\n  \"UP036\",\n  \"UP037\",\n  \"UP039\",\n  \"UP040\",\n  \"UP041\",\n  \"UP042\",\n  \"W291\",\n  \"W292\",\n  \"W293\",\n  \"W391\",\n  \"W505\",\n  \"W605\",\n  \"YTT101\",\n  \"YTT102\",\n  \"YTT103\",\n  \"YTT201\",\n  \"YTT202\",\n  \"YTT203\",\n  \"YTT204\",\n  \"YTT301\",\n  \"YTT302\",\n  \"YTT303\",\n]\n\n[tool.uv]\nenvironments = [\"sys_platform == 'linux'\", \"sys_platform == 'win32'\"]\n\n[tool.uv.sources]\ntorch = [{ index = \"pytorch-cu130\" }]\ntorchvision = [{ index = \"pytorch-cu130\" }]\ntorch-tensorrt = [{ index = \"pytorch-cu130\" }]\n\n[[tool.uv.index]]\nname = \"pytorch-cu130\"\nurl = \"https://download.pytorch.org/whl/cu130\"\nexplicit = true\n\n# zuban sys.path finding, for lsp\n[tool.zuban]\nmypy_path = [\"python\"]\n"
  },
  {
    "path": "python/README.md",
    "content": "# Detypify Model\n\nThis directory contains scripts for data preprocessing, model training, and asset generation for Detypify.\n\n## Project Structure\n\n- `proc_data.py`: Main data preprocessing script. Handles:\n  - Scraping symbol information from Typst documentation (downloads `typ_sym.html` if missing).\n  - Processing raw datasets (Detexify and MathWriting).\n  - Mapping LaTeX commands to Typst symbols.\n  - Creating and uploading sharded datasets to Hugging Face.\n  - Generating inference metadata (`infer.json`) and contribution metadata (`contrib.json`).\n- `train.py`: Training script using PyTorch Lightning.\n  - Supports multiple MobileNetV4 variants from the `timm` library and a custom CNN.\n  - Includes automatic batch size and learning rate finding.\n  - Exports the trained model to ONNX format.\n  - Uses TensorBoard for logging.\n- `model.py`: Neural network architectures.\n  - `TimmModel`: Wrapper for `timm` models optimized for grayscale math symbol recognition.\n  - `CNNModel`: A simple custom CNN for comparison or smaller tasks.\n- `dataset.py`: Data loading and augmentation.\n  - `MathSymbolDataModule`: Handles downloading from Hugging Face, rasterizing strokes, and applying real-time augmentations (rotation, affine transforms).\n- `review_contrib.py`: Utility to review and incorporate community-contributed symbol samples from the D1 database (Maintainer only).\n- `tex_to_typ.json`: Manual mapping overrides for LaTeX to Typst symbol names.\n- `callbacks.py`: Custom callbacks for model training:\n  - `EMAWeightAveraging`: EMA implementation with performance optimization and warmup, similar to `timm`'s EMAv3.\n  - `LogPredictCallback`: Logs sample images with their ground truth and predicted labels to TensorBoard for visual performance review.\n- `test_model.py`: Script for testing pre-trained model performance and logging wrong guesses.\n\n## Development\n\n### Prerequisites\n\nThis project uses `uv` for dependency management.\n\nFor training only, install dependencies with:\n\n```bash\nuv sync\n```\n\nIf you're interested in processing data:\n\n```bash\nuv sync --extra=data\n```\n\n### Data Preprocessing\n\n> [!NOTE]\n> For training the first time, at least **generate testing info** is needed.\n\nTo generate data for testing without full dataset processing:\n\n```bash\nuv run --extra=data proc_data.py --skip-convert-data\n```\n\nTo compose the dataset (Detexify + MathWriting) and upload it to Hugging Face:\n\n```bash\nuv run --extra=data proc_data.py --datasets detexify mathwriting\n```\n\nTo prepare data locally without uploading (useful for debugging):\n\n```bash\nuv run --extra=data proc_data.py --no-upload --split-parts\n```\n\nTo include the contributed dataset (requires `build/dataset.json`):\n\n```bash\nuv run --extra=data proc_data.py --include-contrib\n```\n\nSee more options with:\n\n```bash\nuv run --extra=data proc_data.py --help\n```\n\n### Model Training\n\n>[!NOTE]\n> The ema gamma and decay params are crucial things to change if you're meeting with\n> accuracy low problem.\n> By default, these options are tuned for batch size 128 as default.\n\nTo train the default models (defined in `train.py`):\n\n```bash\nuv run train.py --total-epochs 35 --image-size 224\n```\n\nYou can specify models to be trained:\n```bash\nuv run train.py --models mobilenetv4_conv_small_035 --models mobilenetv4_conv_small_050\n```\n\nThe script will:\n1. Automatically find the optimal batch size (can be disabled with `--no-find-batch-size`).\n2. Find the optimal learning rate.\n3. Train the models.\n4. Export checkpoints them to `build/train/{model_name}/ckpts`.\n\n**Key Options:**\n- `--out-dir`: Output directory (default: `build/train`).\n- `--ema-start-epoch`: Epoch to start EMA (default: 5).\n- `--log-pred`: Enable logging of predictions (default: True).\n\nTo view the training/test logs:\n\n```bash\nuv run tensorboard --logdir ./build/{train,test}\n```\n\nSee more tunable options with: `uv run python/train.py --help`\n\n### Model Testing\n\nTo test a trained model checkpoint:\n\n```bash\nuv run python/test_model.py path/to/checkpoint.ckpt --model-type timm --model-name mobilenetv4_conv_small_050\n```\n\nFor CNN models:\n\n```bash\nuv run python/test_model.py path/to/checkpoint.ckpt --model-type cnn\n```\n"
  },
  {
    "path": "python/__init__.py",
    "content": ""
  },
  {
    "path": "python/callbacks.py",
    "content": "\"\"\"Self Write Training Callbacks\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, override\n\nfrom lightning.pytorch.callbacks import Callback, ModelCheckpoint\nfrom lightning.pytorch.callbacks.weight_averaging import WeightAveraging\n\nif TYPE_CHECKING:\n    from typing import Literal\n\n    from lightning import LightningModule, Trainer\n    from torch import Tensor, device\n\n\nclass LogPredictCallback(Callback):\n    def __init__(\n        self,\n        classes: list[str],\n        max_batches: int = 16,\n        log_type: Literal[\"wrong\", \"right\", \"both\"] = \"both\",\n    ) -> None:\n        super().__init__()\n        self.classes = classes\n        self.max_batches = max_batches\n        self.log_type = log_type\n        self.logged_batches = 0\n\n    @override\n    def on_test_batch_end(\n        self,\n        trainer: Trainer,\n        pl_module: LightningModule,\n        outputs,\n        batch,\n        batch_idx: int,\n        dataloader_idx: int = 0,\n    ) -> None:\n        import torch\n\n        if self.logged_batches >= self.max_batches:\n            return\n\n        # Check if outputs is available (requires test_step to return pred)\n        if outputs is None:\n            return\n\n        pred_logits = outputs\n        image, label = batch[\"image\"], batch[\"label\"]\n\n        preds = torch.argmax(pred_logits, dim=1)\n\n        # Identify guesses based on log_type\n        if self.log_type == \"wrong\":\n            mask = preds != label\n        elif self.log_type == \"right\":\n            mask = preds == label\n        else:  # \"both\"\n            # Create a mask of all True with same shape as label\n            mask = torch.ones_like(label, dtype=torch.bool)\n\n        if not mask.any():\n            return\n\n        # images transformed as float32, converting back\n        selected_images: Tensor = image[mask] * 255\n        selected_images = selected_images.to(dtype=torch.uint8)\n        selected_preds = preds[mask]\n        true_labels = label[mask]\n\n        # Limit the number of images to log per batch (safety cap at 16)\n        num_to_log = min(len(selected_images), 16)\n        selected_images = selected_images[:num_to_log]\n        selected_preds = selected_preds[:num_to_log]\n        true_labels = true_labels[:num_to_log]\n\n        from lightning.pytorch.loggers import TensorBoardLogger\n\n        if isinstance(trainer.logger, TensorBoardLogger):\n            from math import ceil\n\n            import matplotlib as mpl\n            import matplotlib.pyplot as plt\n\n            mpl.use(\"Agg\")\n\n            tensorboard = trainer.logger.experiment\n\n            # Create a grid of plots using matplotlib\n            num_images = len(selected_images)\n            cols = ceil(num_images**0.5)\n            rows = ceil(num_images / cols)\n\n            # Adjust figure size based on grid\n            fig, axes = plt.subplots(rows, cols, figsize=(cols * 3, rows * 3))\n\n            # Normalize axes to be iterable even if single plot\n            axes_flat = [axes] if num_images == 1 else axes.flatten()\n\n            for i, (img, pred_idx, true_idx) in enumerate(\n                zip(selected_images, selected_preds, true_labels, strict=True)\n            ):\n                ax = axes_flat[i]\n\n                # Image is (C, H, W), usually (1, H, W) for grayscale\n                # Convert to (H, W) numpy for imshow\n                img_np = img.cpu().numpy()\n                if img_np.shape[0] == 1:\n                    img_np = img_np.squeeze(0)\n\n                ax.imshow(img_np, cmap=\"gray\")\n\n                pred_name = self.classes[pred_idx] if pred_idx < len(self.classes) else str(pred_idx.item())\n                true_name = self.classes[true_idx] if true_idx < len(self.classes) else str(true_idx.item())\n\n                # Determine color: red if wrong, green if right\n                is_correct = pred_idx == true_idx\n                title_color = \"green\" if is_correct else \"red\"\n\n                ax.set_title(f\"Truth: {true_name}\\nPrediction: {pred_name}\", color=title_color)\n                ax.axis(\"off\")\n\n            # Hide unused subplots\n            for i in range(num_images, len(axes_flat)):\n                axes_flat[i].axis(\"off\")\n\n            plt.tight_layout()\n\n            # Determine tag name\n            if self.log_type == \"wrong\":\n                tag = \"wrong_predictions\"\n            elif self.log_type == \"right\":\n                tag = \"right_predictions\"\n            else:\n                tag = \"predictions\"\n\n            tensorboard.add_figure(\n                tag,\n                fig,\n                global_step=batch_idx,\n            )\n            plt.close(fig)\n\n        self.logged_batches += 1\n\n\ndef get_ema_multi_avg_fn(\n    decay: float = 0.995,\n    min_decay: float = 0.0,\n    warmup_gamma: float = 25.0,\n    warmup_power: float = 0.7,\n    *,\n    use_warmup: bool = True,\n):\n    \"\"\"\n    Get a multi_avg_fn applying EMA with Inverse Gamma warmup schedule,\n    adapted from torch lightning's and timm's implementation\n\n    Unlike the standard get_ema_avg_fn which uses a fixed decay, this version\n    calculates the decay dynamically based on the step count (num_averaged).\n    This allows you to start EMA immediately (step 0) without initialization bias\n    \"\"\"\n\n    import torch\n\n    @torch.no_grad()\n    def ema_multi_update(\n        averaged_param_list: list[Tensor],\n        current_param_list: list[Tensor],\n        num_averaged: Tensor,\n    ):\n        step = num_averaged.item()\n\n        # Warmup\n\n        # Formula: decay = 1 - (1 + step / gamma) ^ -power\n        if use_warmup:\n            cur_decay = 1 - (1 + step / warmup_gamma) ** -warmup_power\n            cur_decay = max(min(decay, cur_decay), min_decay)\n        else:\n            cur_decay = decay\n\n        # Optimization: Filter & Fused Update\n\n        lerp_ema_params = []\n        lerp_curr_params = []\n\n        copy_ema_params = []\n        copy_curr_params = []\n\n        for ema_p, curr_p in zip(averaged_param_list, current_param_list, strict=True):\n            if ema_p.is_floating_point() or ema_p.is_complex():\n                lerp_ema_params.append(ema_p)\n                lerp_curr_params.append(curr_p)\n            else:\n                copy_ema_params.append(ema_p)\n                copy_curr_params.append(curr_p)\n\n        # Apply Fused Update (Horizontal Fusion)\n        if lerp_ema_params:\n            torch._foreach_lerp_(lerp_ema_params, lerp_curr_params, weight=1.0 - cur_decay)\n\n        # Apply Standard Copy for integers\n        for ema_p, curr_p in zip(copy_ema_params, copy_curr_params, strict=True):\n            ema_p.copy_(curr_p)\n\n    return ema_multi_update\n\n\nclass EMAWeightAveraging(WeightAveraging):\n    \"\"\"Exponential Moving Average Weight Averaging using timm's ModelEmaV3.\n\n    This callback provides advanced EMA features over standard Lightning\n    EMAWeightAveraging:\n    - Decay warmup: Gradually increases decay factor during early training\n      for better stability\n    - Step-aware decay: Supports dynamic decay scheduling based on training\n      steps\n\n    The decay warmup feature is particularly useful for models trained for many steps.\n    With inv_gamma=1 and power=3/4, the decay factor reaches:\n    - 0.999 at ~10K steps\n    - 0.9999 at ~215.4k steps\n    Args:\n        device: Device to store the EMA model on. If None, uses the same device as the\n            training model. Use \"cpu\" to save GPU memory.\n        use_buffers: If True, also averages model buffers (e.g., BatchNorm statistics).\n            Set to False if you plan to update batch norm statistics separately.\n        decay: Base exponential decay rate. Higher values give more weight to past\n            parameters. Typical values: 0.999-0.9999.\n        min_decay: Minimum decay value during warmup. Usually 0.0.\n        use_warmup: Enable decay warmup. The decay factor gradually increases from\n            min_decay to decay over time, improving training stability.\n        warmup_gamma: Warmup gamma parameter (inv_gamma in literature). Controls\n            warmup speed. Default 1.0.\n        warmup_power: Warmup power parameter. Controls warmup curve shape.\n            - 3/4: Good for medium training (100K-500K steps)\n        update_every_n_steps: Update the EMA model every N optimizer steps. Default 1.\n        update_starting_at_step: Start updates after this step index (0-based).\n            If None, starts immediately.\n        update_starting_at_epoch: Start updates after this epoch index (0-based).\n            If None, epoch-based control is disabled.\n\n    Note:\n        Like WeightAveraging, this callback doesn't support sharded models and may\n        experience memory increases due to storing averaged parameters.\n    \"\"\"\n\n    def __init__(\n        self,\n        device: device | str | int | None = None,\n        decay: float = 0.9999,\n        min_decay: float = 0.0,\n        warmup_gamma: float = 25.0,\n        warmup_power: float = 3 / 4,\n        update_every_n_steps: int = 1,\n        update_starting_at_step: int | None = None,\n        update_starting_at_epoch: int | None = None,\n        *,\n        use_buffers: bool = True,\n        use_warmup: bool = True,\n    ) -> None:\n        # Initialize parent without avg_fn since we're using ModelEmaV3\n        # Note: We can't pass use_buffers to parent since ModelEmaV3\n        # handles it differently\n        super().__init__(\n            device=device,\n            use_buffers=use_buffers,\n            multi_avg_fn=get_ema_multi_avg_fn(decay, min_decay, warmup_gamma, warmup_power, use_warmup=use_warmup),\n        )\n        self.update_every_n_steps = update_every_n_steps\n        self.update_starting_at_step = update_starting_at_step\n        self.update_starting_at_epoch = update_starting_at_epoch\n\n    @override\n    def should_update(self, step_idx: int | None = None, epoch_idx: int | None = None) -> bool:\n        \"\"\"Decide when to update the model weights.\n\n        Args:\n            step_idx: The current step index.\n            epoch_idx: The current epoch index.\n        Returns:\n            bool: True if the model weights should be updated, False otherwise.\n\n        \"\"\"\n        if step_idx is not None:\n            # Check step-based conditions only if we have a valid step_idx\n            meets_step_requirement = self.update_starting_at_step is None or step_idx >= self.update_starting_at_step\n            meets_step_frequency = self.update_every_n_steps > 0 and step_idx % self.update_every_n_steps == 0\n            if meets_step_requirement and meets_step_frequency:\n                return True\n\n        if epoch_idx is not None:\n            # Check epoch-based condition only if we specify one\n            meets_epoch_requirement = (\n                self.update_starting_at_epoch is not None and epoch_idx >= self.update_starting_at_epoch\n            )\n            if meets_epoch_requirement:\n                return True\n\n        return False\n\n\nclass ExportBestModelToONNX(Callback):\n    \"\"\"Export the best model checkpoint to ONNX format after training completes.\n\n    This callback finds the best checkpoint saved during training and exports it\n    to ONNX format, making it ready for deployment.\n\n    Args:\n        onnx_dir: Directory where ONNX file will be saved\n        model_name: Name to use for the ONNX file (without extension)\n        checkpoint_callback: The ModelCheckpoint callback used during training.\n            If None, the callback will try to find it automatically.\n        dynamo: Whether to use torch.dynamo for ONNX export (default: True)\n        external_data: Whether to save weights as external data (default: False)\n    \"\"\"\n\n    def __init__(\n        self,\n        save_dir: Path,\n        model_name: str,\n        checkpoint_callback: ModelCheckpoint | None = None,\n        *,\n        dynamo: bool = True,\n        external_data: bool = False,\n    ) -> None:\n        super().__init__()\n        self.save_dir = Path(save_dir)\n        self.model_name = model_name\n        self.checkpoint_callback = checkpoint_callback\n        self.dynamo = dynamo\n        self.external_data = external_data\n\n    @override\n    def on_fit_end(self, trainer: Trainer, pl_module: LightningModule) -> None:\n        \"\"\"Export the best model to ONNX when training finishes.\"\"\"\n        # Find the checkpoint callback if not provided\n        checkpoint_callback = self.checkpoint_callback\n        if checkpoint_callback is None:\n            for callback in trainer.callbacks:  # type: ignore\n                if isinstance(callback, ModelCheckpoint):\n                    checkpoint_callback = callback\n                    break\n\n        if checkpoint_callback is None:\n            logging.warning(\"No ModelCheckpoint callback found. Skipping ONNX export.\")\n            return\n\n        # Get the best model path\n        best_model_path = Path(checkpoint_callback.best_model_path)\n        if not best_model_path.exists():\n            logging.warning(\"No best model checkpoint available. Skipping ONNX export.\")\n            return\n\n        logging.info(\"Loading best checkpoint from: %s\", best_model_path)\n\n        # Load the best checkpoint\n        from model import TimmModel\n\n        best_model = TimmModel.load_from_checkpoint(best_model_path, model_name=self.model_name)\n        # Freeze and prepare model for export\n        best_model.freeze()\n        if hasattr(best_model, \"use_compile\"):\n            best_model.use_compile = False  # type: ignore\n\n        # Create ONNX directory\n        save_path = self.save_dir / f\"{best_model_path.stem}.onnx\"\n\n        logging.info(\"Exporting best model to ONNX: %s\", save_path)\n\n        # Export to ONNX\n        best_model.to_onnx(\n            save_path,\n            best_model.example_input_array,\n            dynamo=self.dynamo,\n            external_data=self.external_data,\n        )\n\n        logging.info(\"Successfully exported best model to: %s\", save_path)\n"
  },
  {
    "path": "python/dataset.py",
    "content": "from os import process_cpu_count\nfrom typing import override\n\nfrom datasets import load_dataset\nfrom lightning import LightningDataModule\nfrom torch.utils.data import DataLoader\n\n\nclass MathSymbolDataModule(LightningDataModule):\n    def __init__(\n        self,\n        image_size: int,\n        batch_size: int = 64,\n        num_workers: int = process_cpu_count() or 1,\n    ):\n        from proc_data import DATASET_REPO\n        from torch import float32 as t_float32\n        from torchvision.transforms import v2\n\n        super().__init__()\n        self.batch_size = batch_size\n        self.num_workers = num_workers\n        self.image_size = image_size\n        self.dataset_repo = DATASET_REPO\n\n        self.eval_transform = v2.Compose([v2.ToImage(), v2.ToDtype(dtype=t_float32, scale=True)])\n        self.train_transform = v2.Compose(\n            [\n                v2.ToImage(),\n                # augmentations\n                v2.RandomAffine(\n                    degrees=10,  # type: ignore[arg-type]\n                    translate=(0.1, 0.1),\n                    shear=10,\n                ),\n                v2.ToDtype(dtype=t_float32, scale=True),\n            ]\n        )\n\n    @override\n    def prepare_data(self):\n        load_dataset(self.dataset_repo)\n\n    @override\n    def setup(self, stage: str | None = None):\n        from datasets import Array2D\n        from proc_data import rasterize_strokes\n\n        def _rasterize_strokes_batched(batch, image_size):\n            batch[\"image\"] = [rasterize_strokes(strokes, image_size) for strokes in batch[\"strokes\"]]\n            return batch\n\n        dataset = (\n            load_dataset(self.dataset_repo)\n            .map(\n                _rasterize_strokes_batched,\n                batched=True,\n                remove_columns=[\"strokes\", \"source\"],\n                num_proc=self.num_workers,\n                fn_kwargs={\"image_size\": self.image_size},\n            )\n            .cast_column(\n                \"image\",\n                Array2D(shape=(self.image_size, self.image_size), dtype=\"uint8\"),\n            )\n            .with_format(\"torch\")\n        )\n\n        if stage == \"fit\":\n            self.train_dataset = dataset[\"train\"]\n            self.val_dataset = dataset[\"val\"]\n\n        if stage == \"test\" or stage is None:\n            self.test_dataset = dataset[\"test\"]\n\n    @override\n    def train_dataloader(self):\n        return DataLoader(\n            self.train_dataset,  # type: ignore\n            batch_size=self.batch_size,\n            shuffle=True,\n            num_workers=self.num_workers,\n            pin_memory=True,\n            persistent_workers=self.num_workers > 0,\n        )\n\n    @override\n    def val_dataloader(self):\n        return DataLoader(\n            self.val_dataset,  # type: ignore\n            batch_size=self.batch_size,\n            shuffle=False,\n            num_workers=self.num_workers,\n            pin_memory=True,\n            persistent_workers=self.num_workers > 0,\n        )\n\n    @override\n    def test_dataloader(self):\n        return DataLoader(\n            self.test_dataset,  # type: ignore\n            batch_size=self.batch_size,\n            shuffle=False,\n            num_workers=self.num_workers,\n            pin_memory=True,\n            persistent_workers=self.num_workers > 0,\n        )\n\n    @override\n    def on_after_batch_transfer(self, batch, dataloader_idx):\n        # when batch is not a dict, means its not from dataloader, do nothing.\n        if isinstance(batch, dict) and self.trainer:\n            from lightning.pytorch.trainer.states import RunningStage\n            from torch import uint8 as t_uint8\n\n            original_images = batch[\"image\"].to(dtype=t_uint8).unsqueeze(1)\n            match self.trainer.state.stage:\n                case RunningStage.TRAINING:\n                    batch[\"image\"] = self.train_transform(original_images)\n                case _:\n                    batch[\"image\"] = self.eval_transform(original_images)\n\n        return batch\n"
  },
  {
    "path": "python/model.py",
    "content": "from abc import abstractmethod\nfrom typing import override\n\nimport torch\nfrom lightning import LightningModule\nfrom timm import create_model\nfrom torch import Tensor, nn, optim\nfrom torch.optim.lr_scheduler import (\n    CosineAnnealingLR,\n    LinearLR,\n    SequentialLR,\n)\nfrom torchmetrics import Accuracy\nfrom train import ModelName\n\n\nclass BaseModel(LightningModule):\n    \"\"\"Base class for math symbol recognition models.\"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        image_size: int,\n        total_epochs: int,\n        warmup_epochs: int = 5,\n        learning_rate: float = 4e-4,\n        *,\n        use_compile: bool = False,\n    ):\n        super().__init__()\n        self.criterion = nn.CrossEntropyLoss()\n        self.acc_top1 = Accuracy(task=\"multiclass\", num_classes=num_classes, top_k=1)\n        self.acc_top3 = Accuracy(task=\"multiclass\", num_classes=num_classes, top_k=3)\n        self.use_compile = use_compile\n        self.learning_rate = learning_rate\n        self.total_epochs = total_epochs\n        self.warm_up_epochs = warmup_epochs\n        self.example_input_array: Tensor = torch.randn(1, 1, image_size, image_size)\n\n    @abstractmethod\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward pass - must be implemented by subclasses.\"\"\"\n\n    @override\n    def training_step(self, batch, batch_idx=0):\n        image, label = batch[\"image\"], batch[\"label\"]\n        pred = self.forward(image)\n        loss = self.criterion(pred, label)\n        self.log(\"train_loss\", loss)\n        self.log(\"train_acc\", self.acc_top1(pred, label))\n        return loss\n\n    @override\n    def validation_step(self, batch, batch_idx=0):\n        image, label = batch[\"image\"], batch[\"label\"]\n        pred = self.forward(image)\n        loss = self.criterion(pred, label)\n        self.log(\"val_loss\", loss, prog_bar=True)\n        self.log(\"val_acc\", self.acc_top1(pred, label), prog_bar=True)\n        self.log(\"val_top3\", self.acc_top3(pred, label))\n        return loss\n\n    def test_step(self, batch, batch_idx=0):  # noqa: ARG002\n        image, label = batch[\"image\"], batch[\"label\"]\n        pred = self.forward(image)\n        self.log(\"test_acc\", self.acc_top1(pred, label), prog_bar=True)\n        self.log(\"test_top3\", self.acc_top3(pred, label))\n        return pred\n\n    def configure_optimizers(self):\n        decay = []\n        no_decay = []\n        for name, param in self.named_parameters():\n            if not param.requires_grad:\n                continue\n            # Check for bias, norm, or batchnorm layers to exclude from decay\n            if param.ndim <= 1 or name.endswith(\".bias\") or \"norm\" in name or \"bn\" in name:\n                no_decay.append(param)\n            else:\n                decay.append(param)\n\n        optim_groups = [\n            {\"params\": decay, \"weight_decay\": 0.06},\n            {\"params\": no_decay, \"weight_decay\": 0.0},\n        ]\n\n        optimizer = optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.999), eps=1e-7)\n\n        warmup_scheduler = LinearLR(\n            optimizer,\n            total_iters=self.warm_up_epochs,\n        )\n\n        decay_scheduler = CosineAnnealingLR(optimizer, T_max=(self.total_epochs - self.warm_up_epochs), eta_min=1e-6)\n\n        scheduler = SequentialLR(\n            optimizer,\n            schedulers=[warmup_scheduler, decay_scheduler],\n            milestones=[self.warm_up_epochs],\n        )\n\n        return {\n            \"optimizer\": optimizer,\n            \"lr_scheduler\": {\n                \"scheduler\": scheduler,\n                \"interval\": \"epoch\",\n                \"monitor\": \"val_loss\",\n            },\n        }\n\n\nclass TimmModel(BaseModel):\n    def __init__(\n        self,\n        num_classes: int,\n        model_name: ModelName,\n        total_epochs: int,\n        image_size: int,\n        warmup_epochs: int = 5,\n        learning_rate: float = 0.002,\n        *,\n        use_compile: bool = False,\n        use_tensorrt: bool = True,\n    ):\n        super().__init__(\n            num_classes=num_classes,\n            image_size=image_size,\n            total_epochs=total_epochs,\n            warmup_epochs=warmup_epochs,\n            learning_rate=learning_rate,\n            use_compile=use_compile,\n        )\n        self.save_hyperparameters(\n            \"num_classes\",\n            \"model_name\",\n            \"warmup_epochs\",\n            \"total_epochs\",\n            \"image_size\",\n            \"learning_rate\",\n        )\n        model = create_model(\n            model_name,\n            num_classes=num_classes,\n            in_chans=1,\n            aa_layer=\"blurpc\",\n            drop_rate=0.15,\n            exportable=True,\n        )\n        self.model = model.to(memory_format=torch.channels_last)  # type: ignore\n\n        if use_tensorrt:\n            import torch_tensorrt  # noqa: F401\n\n        self.model_opt = torch.compile(\n            self.model,\n            backend=\"tensorrt\" if use_tensorrt else \"inductor\",\n            options={\"triton.cudagraphs\": True, \"shape_padding\": True},\n            dynamic=False,\n        )\n\n        self.model_name: str = model_name\n\n    def forward(self, x):\n        x = x.to(memory_format=torch.channels_last)\n        if self.use_compile:\n            return self.model_opt(x)\n        return self.model(x)\n"
  },
  {
    "path": "python/proc_data.py",
    "content": "\"\"\"Preprocess training datasets, helping functions and related constants/types.\"\"\"\n\nfrom __future__ import annotations\n\nfrom enum import StrEnum\nfrom functools import cache\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Literal, cast\n\nimport typer\nfrom msgspec import Struct\n\nif TYPE_CHECKING:\n    import polars as pl\n\ntype Point = tuple[float, float]\ntype Stroke = list[Point]\ntype Strokes = list[Stroke]\ntype SplitName = Literal[\"train\", \"test\", \"val\"]\n\n\n# Constants\nDATASET_ROOT = Path(\"build/dataset\")\nDATA_ROOT = Path(\"build/data\")\nEXTERNAL_DATA_PATH = Path(\"build/raw_data\")\nMATH_WRITING_DATA_PATH = EXTERNAL_DATA_PATH / \"mathwriting\"\nDETEXIFY_DATA_PATH = EXTERNAL_DATA_PATH / \"detexify\"\nCONTRIB_DATA = Path(\"build/dataset.json\")\nDATASET_REPO = \"Cloud0310/detypify-datasets\"\nTEX_TO_TYP_PATH = Path(__file__).parent / \"tex_to_typ.yaml\"\nRAW_POINT_LENGTH = 3\n\n\n# Structs\nclass TypstSymInfo(Struct, kw_only=True, omit_defaults=True):\n    char: str\n    names: list[str]\n    latex_name: str | None = None\n    markup_shorthand: str | None = None\n    math_shorthand: str | None = None\n    accent: bool = False\n    alternates: list[str] | None = None\n\n\nclass UnmappedSymbols(Struct, kw_only=True, omit_defaults=True):\n    name: str\n    unmapped: set[str] | None = None\n\n\nclass DetexifySymInfo(Struct, kw_only=True, omit_defaults=True):\n    command: str\n    # package: str | None = None\n    # mathmode: bool\n    # textmode: bool\n    id: str\n    # css_class: str\n\n\nclass MathSymbolSample(Struct):\n    label: str\n    symbol: Strokes\n\n\nclass DataSetName(StrEnum):\n    mathwriting = \"mathwriting\"\n    detexify = \"detexify\"\n    contrib = \"contrib\"\n\n\n# Helper functions\n@cache\ndef is_invisible(c: str) -> bool:\n    from unicodedata import category\n\n    return category(c) in {\"Zs\", \"Cc\", \"Cf\"}\n\n\n@cache\ndef get_logger():\n    import logging\n\n    logger = logging.getLogger(name=__name__)\n    logger.setLevel(logging.INFO)\n\n    return logger\n\n\n@cache\ndef get_typst_symbol_info() -> list[TypstSymInfo]:\n    \"\"\"Parses the Typst symbol page to extract symbol information.\n\n    Retrieves the HTML content from the Typst documentation (downloading it if\n    necessary) and parses it to find symbol names, characters, and their LaTeX\n    equivalents.\n\n    Returns:\n        A list of `TypstSymInfo` objects containing details for each symbol.\n    \"\"\"\n\n    import re\n    from urllib.request import urlopen\n\n    page_url = \"https://typst.app/docs/reference/symbols/sym/\"\n    with urlopen(page_url) as resp:\n        page_data = resp.read()\n\n    from bs4 import BeautifulSoup\n\n    sym_info = {}\n    if page_data:\n        soup = BeautifulSoup(page_data, \"lxml\")\n        for li in soup.find_all(\"li\", id=re.compile(\"^symbol-\")):\n            name = li[\"id\"][len(\"symbol-\") :]\n            char = li[\"data-value\"][0]\n            if is_invisible(char) or li.get(\"data-deprecation\"):\n                # We don't care about invisible chars and deprecated names.\n                continue\n            if char in sym_info:\n                # Repeated symbols. Merge names.\n                sym_info[char].names.append(name)\n            else:\n                latex_name, markup_shorthand, math_shorthand, alternates = (\n                    li.get(\"data-latex-name\"),\n                    li.get(\"data-markup-shorthand\"),\n                    li.get(\"data-math-shorthand\"),\n                    li.get(\"data-alternates\", \"\"),\n                )\n\n                # New symbols. Add to map.\n                sym_info[char] = TypstSymInfo(\n                    char=char,\n                    names=[cast(\"str\", name)],\n                    latex_name=cast(\"str | None\", latex_name),\n                    markup_shorthand=cast(\"str | None\", markup_shorthand),\n                    math_shorthand=cast(\"str | None\", math_shorthand),\n                    accent=li.get(\"accent\") == \"true\",\n                    alternates=cast(\"str\", alternates).split(),\n                )\n    else:\n        get_logger().warning(\"Unable to retrive page data.\")\n\n    return list(sym_info.values())\n\n\ndef rasterize_strokes(strokes: Strokes, output_size: int):\n    \"\"\"\n    Normalizes vector strokes and rasterizes them into a binary NumPy array.\n\n    Args:\n        strokes: List of strokes, where each stroke is a list of (x, y) coordinates.\n        output_size: The width and height of the output square grid.\n\n    Returns:\n        np.ndarray: A (size, size) uint8 array.\n                    Background is 0 (black), Strokes are 255 (white).\n    \"\"\"\n\n    import cv2\n    import numpy as np\n\n    if not strokes:\n        return np.zeros((output_size, output_size), dtype=np.uint8)\n\n    stroke_arrays = [np.array(s, dtype=np.float32) for s in strokes if s]\n\n    if not stroke_arrays:\n        return np.zeros((output_size, output_size), dtype=np.uint8)\n\n    all_points = np.vstack(stroke_arrays)\n\n    min_x, min_y = all_points.min(axis=0)\n    max_x, max_y = all_points.max(axis=0)\n    padding = 10\n    target_size = output_size - (2 * padding)\n\n    width = max(max_x - min_x, max_y - min_y)\n    scale = target_size / width if width > 1e-6 else 1.0  # noqa: PLR2004\n    center_x = (min_x + max_x) / 2\n    center_y = (min_y + max_y) / 2\n\n    # In-place transformation of the big array\n    all_points = ((all_points - [center_x, center_y]) * scale) + [\n        output_size / 2,\n        output_size / 2,\n    ]\n\n    lengths = [len(a) for a in stroke_arrays]\n    split_indices = np.cumsum(lengths)[:-1]\n    normalized_strokes = np.split(all_points.astype(np.int32), split_indices)\n\n    canvas = np.zeros((output_size, output_size), dtype=np.uint8)\n\n    # decrease to increase the stroke thinkness\n    # thinker stroke for better feature extraction\n    thickness_factor = 25\n    thickness = max(1, output_size // thickness_factor)\n    cv2.polylines(canvas, normalized_strokes, isClosed=False, color=255, thickness=thickness)\n\n    return canvas\n\n\n@cache\ndef get_dataset_classes(dataset_name: str) -> list[str]:\n    from datasets import load_dataset\n\n    classes: set[str] = set()\n    dataset = load_dataset(dataset_name)\n    for split in dataset:\n        classes.update(dataset[split].features[\"label\"].names)\n    return sorted(classes)\n\n\n@cache\ndef get_tex_typ_map() -> dict[str, TypstSymInfo]:\n    \"\"\"Creates a mapping from TeX command names to Typst symbol information.\n\n    Combines mappings from the Typst symbol page (via `get_typst_symbol_info`)\n    and a manual fallback dictionary (`TEX_TO_TYP`).\n\n    Returns:\n        A dictionary where keys are TeX commands (e.g., \"\\\\alpha\") and values\n        are `TypstSymInfo` objects.\n    \"\"\"\n    typ_sym_info = get_typst_symbol_info()\n    # mapping for symbol name to unicode char\n    tex_to_typ = {s.latex_name: s for s in typ_sym_info if s.latex_name is not None}\n    name_to_typ = {name: s for s in typ_sym_info for name in s.names}\n\n    with TEX_TO_TYP_PATH.open(\"rb\") as f:\n        from msgspec.yaml import decode\n\n        manual_mapping = decode(f.read(), type=dict[str, str])\n\n    tex_to_typ |= {k: name_to_typ[v] for k, v in manual_mapping.items()}\n    return tex_to_typ\n\n\n@cache\ndef get_xml_parser():\n    \"\"\"Cached xml parser for reuse to increase performance\"\"\"\n    from lxml import etree\n\n    return etree.XMLParser()\n\n\ndef parse_mathwriting_symbol(\n    filepath: Path,\n) -> MathSymbolSample | None:\n    \"\"\"Parses a single InkML file to extract the raw LaTeX label and stroke data.\n\n    This version does NOT map to Typst symbols, keeping the original LaTeX label.\n\n    Args:\n        filepath: Path to the .inkml file.\n\n    Returns:\n        A `MathSymbolSample` with latex_label as label if successful.\n        None if no label is found.\n    \"\"\"\n\n    from lxml import etree\n\n    # parsing\n    root = etree.parse(filepath, parser=get_xml_parser()).getroot()\n    namespace = {\"ink\": \"http://www.w3.org/2003/InkML\"}\n    tex_label = root.findtext(\".//ink:annotation[@type='label']\", namespaces=namespace)\n\n    # couldn't find data, return None\n    if not tex_label:\n        return None\n\n    return MathSymbolSample(\n        tex_label,\n        [\n            [\n                (float(x), float(y))\n                for x, y, _ in (\n                    # keep only x,y, discard time\n                    point_str.split()\n                    for point_str in trace.text.split(\",\")\n                    if len(point_str.split()) == RAW_POINT_LENGTH\n                )\n            ]\n            for trace in root.iterfind(\".//ink:trace\", namespaces=namespace)\n            if trace.text\n        ],\n    )\n\n\n# Raw dataset functions\ndef collect_mathwriting_raw():\n    \"\"\"Collects raw MathWriting data with LaTeX labels (not mapped to Typst).\n\n    Parses InkML files in parallel to extract strokes and original LaTeX labels.\n\n    Returns:\n        A Polars LazyFrame with columns:\n            - latex_label: Original LaTeX command string\n            - symbol: List of strokes as arrays of (x, y) coordinates\n    \"\"\"\n\n    from concurrent.futures import ProcessPoolExecutor\n\n    import polars as pl\n\n    label_acc = []\n    data_acc = []\n\n    with ProcessPoolExecutor() as executor:\n        results = executor.map(\n            parse_mathwriting_symbol,\n            MATH_WRITING_DATA_PATH.glob(\"*.inkml\"),\n            chunksize=500,\n        )\n        for result in results:\n            if result is None:\n                continue\n            label_acc.append(result.label)\n            data_acc.append(result.symbol)\n\n    del results\n    pl_schema = {\n        \"latex_label\": pl.String,\n        \"symbol\": pl.List(pl.List(pl.Array(pl.Float32, 2))),\n    }\n\n    return pl.DataFrame({\"latex_label\": label_acc, \"symbol\": data_acc}, schema=pl_schema).lazy()\n\n\ndef collect_detexify_raw():\n    \"\"\"Collects raw Detexify data with original command labels (not mapped to Typst).\n\n    Reads the raw JSON data and formats strokes without Typst symbol mapping.\n\n    Returns:\n        A Polars LazyFrame with columns:\n            - latex_label: Original LaTeX command string\n            - symbol: List of strokes as arrays of (x, y) coordinates\n    \"\"\"\n\n    import polars as pl\n    from msgspec import json\n\n    pl.Config.set_engine_affinity(\"streaming\")\n\n    with (DETEXIFY_DATA_PATH / \"symbols.json\").open(\"rb\") as f:\n        tex_sym_info = json.decode(f.read(), type=list[DetexifySymInfo])\n\n    # Create mapping from key to command (latex label)\n    key_to_command = {x.id: x.command for x in tex_sym_info}\n\n    with (DETEXIFY_DATA_PATH / \"detexify.json\").open(\"rb\") as f:\n        # Schema: list of (key, strokes)\n        data = json.decode(f.read(), type=list[tuple[str, list[list[tuple[float, float, float]]]]])\n\n    raw_data_schema = {\n        \"key\": pl.String,\n        \"strokes\": pl.List(pl.List(pl.Array(pl.Float32, 3))),\n    }\n\n    raw_lf = pl.DataFrame(data, schema=raw_data_schema, orient=\"row\").lazy()\n    del data\n\n    # Prepare Mapping\n    mapping_lf = pl.DataFrame(\n        {\n            \"key\": list(key_to_command.keys()),\n            \"latex_label\": list(key_to_command.values()),\n        }\n    ).lazy()\n\n    processed_lf = raw_lf.join(mapping_lf, on=\"key\", how=\"left\")\n\n    return (\n        processed_lf.filter(pl.col(\"latex_label\").is_not_null())\n        .select(\n            [\n                pl.col(\"latex_label\"),\n                pl.col(\"strokes\")\n                # Drop time (keep only x, y)\n                .list.eval(pl.element().list.eval(pl.element().arr.head(2).list.to_array(2)))\n                .alias(\"symbol\"),\n            ]\n        )\n        # Drop empty samples\n        .filter(pl.col(\"symbol\").list.len() > 0)\n    )\n\n\ndef collect_contrib_raw():\n    \"\"\"Collects raw contributed data with symbol names (not mapped to Typst).\n\n    Reads the contrib JSON and decodes strokes without Typst symbol mapping.\n\n    Returns:\n        A Polars LazyFrame with columns:\n            - latex_label: Original symbol name string\n            - symbol: List of strokes as arrays of (x, y) coordinates\n    \"\"\"\n\n    import polars as pl\n    from msgspec import json\n\n    # Load Data\n    with CONTRIB_DATA.open(\"rb\") as f:\n        # Schema: list of dicts {sym: str, strokes: json_string}\n        data = json.decode(f.read(), type=list[dict[str, str]])\n\n    # Decode strokes and rename sym to latex_label\n    processed_lf = (\n        pl.DataFrame(data)\n        .lazy()\n        .rename({\"sym\": \"latex_label\"})\n        # Decode strokes\n        .with_columns(\n            pl.col(\"strokes\")\n            .map_elements(\n                json.decode,\n                return_dtype=pl.List(pl.List(pl.Array(pl.Float32, 2))),\n            )\n            .alias(\"symbol\")\n        )\n        .drop(\"strokes\")\n    )\n\n    return (\n        processed_lf.filter(pl.col(\"latex_label\").is_not_null())\n        .select(\n            [\n                pl.col(\"latex_label\"),\n                pl.col(\"symbol\"),\n            ]\n        )\n        # Drop empty samples\n        .filter(pl.col(\"symbol\").list.len() > 0)\n    )\n\n\ndef create_raw_dataset(\n    dataset_names: list[DataSetName],\n) -> pl.DataFrame:\n    \"\"\"Creates and uploads raw dataset with original LaTeX/command labels.\n\n    This dataset is intended for CI/CD processing pipelines. It contains\n    the original labels before Typst symbol mapping.\n\n    Args:\n        dataset_names: List of dataset names to include (\"mathwriting\", \"detexify\", \"contrib\")\n    \"\"\"\n\n    from os import process_cpu_count\n\n    import polars as pl\n    from datasets import (\n        Dataset,\n        DatasetInfo,\n        Features,\n        List,\n        Sequence,\n        Value,\n    )\n\n    logger = get_logger()\n\n    logger.info(f\"--- Creating Raw Dataset: {','.join(dataset_names)} ---\")\n\n    lfs = []\n    for dataset_name in dataset_names:\n        match dataset_name:\n            case \"mathwriting\":\n                math_writing_lf = collect_mathwriting_raw()\n                # Add source column\n                math_writing_lf = math_writing_lf.with_columns(pl.lit(\"mathwriting\").alias(\"source\"))\n                lfs.append(math_writing_lf)\n            case \"detexify\":\n                detexify_lf = collect_detexify_raw()\n                # Add source column\n                detexify_lf = detexify_lf.with_columns(pl.lit(\"detexify\").alias(\"source\"))\n                lfs.append(detexify_lf)\n            case \"contrib\":\n                contrib_lf = collect_contrib_raw()\n                # Add source column\n                contrib_lf = contrib_lf.with_columns(pl.lit(\"contrib\").alias(\"source\"))\n                lfs.append(contrib_lf)\n\n    # Concatenate all datasets\n    lf = pl.concat(lfs)\n\n    # Collect and shuffle\n    df = lf.collect().sample(fraction=1.0, shuffle=True, seed=114514)\n\n    features: Features = Features(\n        {\n            \"latex_label\": Value(\"string\"),\n            \"symbol\": List(List(Sequence(Value(\"float32\"), length=2))),\n            \"source\": Value(\"string\"),\n        }\n    )  # type: ignore\n\n    description = (\n        \"Raw detypify dataset with original LaTeX labels, \"\n        \"composed by mathwriting, detexify and contributed datasets. \"\n        \"Intended for CI/CD processing pipelines.\"\n    )\n\n    dataset_info = DatasetInfo(description=description, features=features)\n    dataset = Dataset.from_polars(df, info=dataset_info)\n\n    logger.info(\"  -> Uploading raw dataset to %s as 'raw_data'...\", DATASET_REPO)\n    dataset.push_to_hub(repo_id=DATASET_REPO, config_name=\"raw\", split=\"data\", num_proc=process_cpu_count() or 1)\n    logger.info(\"--- Done. Raw dataset uploaded to %s (config: raw) ---\", DATASET_REPO)\n\n    # Save locally\n    raw_dataset_path = DATASET_ROOT / \"raw\"\n    raw_dataset_path.mkdir(parents=True, exist_ok=True)\n\n    logger.info(\"  -> Saving raw dataset locally to %s...\", raw_dataset_path)\n    df.write_parquet(\n        raw_dataset_path / \"data.parquet\",\n        compression=\"zstd\",\n    )\n    logger.info(\"--- Done. Raw dataset saved to %s ---\", raw_dataset_path)\n    return df\n\n\ndef load_raw_dataset(dataset_names: list[DataSetName]) -> pl.DataFrame:\n    \"\"\"Load raw dataset from HF config 'raw'.\n\n    Raises:\n        ValueError: If raw dataset not found on HF with helpful message.\n    \"\"\"\n    import polars as pl\n    from datasets import Dataset, load_dataset\n\n    df = Dataset.to_polars(load_dataset(DATASET_REPO, name=\"raw\", split=\"data\"))\n    if not isinstance(df, pl.DataFrame):\n        err_msg = \"Raw data is not pl.DataFrame\"\n        raise TypeError(err_msg)\n\n    # Filter by source if dataset_names provided\n    if dataset_names:\n        df = df.filter(pl.col(\"source\").is_in(dataset_names))\n\n    return df\n\n\ndef remap_from_raw(\n    dataset_names: list[DataSetName],\n    data: pl.DataFrame | None = None,\n) -> tuple[pl.LazyFrame, dict[DataSetName, set[str]]]:\n    \"\"\"High-level function: Load raw data and apply fresh mapping.\n\n    When Typst reference changes, reload raw data and remap.\n    \"\"\"\n    import polars as pl\n\n    logger = get_logger()\n\n    if data is None:\n        logger.info(\"  -> Loading raw dataset from HuggingFace...\")\n        data = load_raw_dataset(dataset_names)\n\n    logger.info(f\"  -> Applying LaTeX→Typst mapping to {len(data)} samples...\")\n\n    tex_to_typ = get_tex_typ_map()\n\n    tex_to_char = {k: v.char for k, v in tex_to_typ.items()}\n\n    # Apply mapping\n    mapped_df = data.with_columns(\n        [\n            pl.col(\"latex_label\").replace_strict(tex_to_char, default=None).alias(\"label\"),\n        ]\n    )\n\n    # Track unmapped per source\n    unmapped_df = mapped_df.filter(pl.col(\"label\").is_null()).group_by(\"source\").agg(pl.col(\"latex_label\").unique())\n    unmapped = {row[\"source\"]: set(row[\"latex_label\"]) for row in unmapped_df.to_dicts()}\n\n    # Filter out unmapped, select final columns, convert strokes format\n    result_df = (\n        mapped_df.filter(pl.col(\"label\").is_not_null())\n        .select(\n            [\n                pl.col(\"label\"),\n                pl.col(\"symbol\").alias(\"strokes\"),  # Rename to match processed format\n                pl.col(\"source\"),\n            ]\n        )\n        .filter(pl.col(\"strokes\").list.len() > 0)  # Drop empty\n    )\n\n    return result_df.lazy(), unmapped\n\n\ndef generate_data_info(classes: list[str]) -> None:\n    \"\"\"Generate the infer and contrib, unmapped data info\n\n    Args:\n        classes: Set of character classes to generate infer.json for.\n    \"\"\"\n    from msgspec import json\n\n    DATA_ROOT.mkdir(exist_ok=True, parents=True)\n\n    infer_path = DATA_ROOT / \"infer.json\"\n    contrib_path = DATA_ROOT / \"contrib.json\"\n    unmapped_path = DATA_ROOT / \"unmapped_latex_symbols.json\"\n\n    # generate infer.json and contrib.json files\n    typ_sym_info = get_typst_symbol_info()\n    infer = []\n    contrib = {n: s.char for s in typ_sym_info for n in s.names}\n    chr_to_sym = {s.char: s for s in typ_sym_info}\n    for c in classes:\n        if c not in chr_to_sym:\n            continue\n        sym = chr_to_sym[c]\n        info = {\"char\": sym.char, \"names\": sym.names}\n        if sym.markup_shorthand and sym.math_shorthand:\n            info[\"shorthand\"] = sym.markup_shorthand\n        elif sym.markup_shorthand:\n            info[\"markupShorthand\"] = sym.markup_shorthand\n        elif sym.math_shorthand:\n            info[\"mathShorthand\"] = sym.math_shorthand\n        infer.append(info)\n    for path, info_data in [(infer_path, infer), (contrib_path, contrib)]:\n        with path.open(\"wb\") as f:\n            f.write(json.encode(info_data))\n        info = f\"Generated data at {path}\"\n        get_logger().info(info)\n\n    _, unmapped = remap_from_raw(dataset_names=[DataSetName.mathwriting, DataSetName.detexify])\n    with (unmapped_path).open(\"wb\") as f:\n        f.write(json.format(json.encode(unmapped)))\n\n\ndef create_dataset(\n    dataset_names: list[DataSetName],\n    raw_data: pl.DataFrame | None = None,\n    split_ratio: tuple[float, float, float] = (0.8, 0.1, 0.1),\n) -> None:\n    \"\"\"Orchestrates the creation of a math symbol dataset.\n\n    1. Loads symbol mappings using the specific project logic.\n    2. Dispatches data loading to specific construct_* functions.\n    3. Performs a stratified train/test/val split.\n    4. Saves data as sharded files (Parquet/Vortex) and writes metadata.\n\n    Args:\n        dataset_names: The names of the dataset to use.\n        raw_data: The dataframe. Optional, if nothing, load from huggingface\n        split_ratio: A tuple defining the ratio for (train, test, val) splits.\n    \"\"\"\n\n    import polars as pl\n    from datasets import (\n        ClassLabel,\n        Dataset,\n        DatasetInfo,\n        Features,\n        LargeList,\n        List,\n        Value,\n    )\n\n    logger = get_logger()\n\n    logger.info(f\"--- Creating Datasets: {','.join(dataset_names)} ---\")\n\n    # load from raw data\n    lf, unmapped = remap_from_raw(dataset_names, raw_data)\n\n    dataset_path = DATASET_ROOT\n    split_names: list[SplitName] = [\"train\", \"test\", \"val\"]\n\n    from shutil import rmtree as rmdir\n\n    if dataset_path.exists():\n        rmdir(dataset_path)\n    dataset_path.mkdir(parents=True, exist_ok=True)\n\n    train_r, test_r, _ = split_ratio\n    t1 = train_r\n    t2 = train_r + test_r\n\n    logger.info(\"  -> Shuffling and splitting data...\")\n\n    # Add Stratified Indices\n    # Casting label to Utf8 ensures consistency across datasets.\n    base_lf = (\n        lf.with_columns(pl.col(\"label\").cast(pl.Utf8))\n        .collect()\n        .sample(fraction=1.0, shuffle=True, seed=114514)\n        .lazy()\n        .with_columns(\n            [\n                pl.len().over(\"label\").alias(\"n\"),\n                pl.int_range(0, pl.len()).over(\"label\").alias(\"idx\"),\n            ]\n        )\n    )\n    logger.info(\"  -> Generating metadata...\")\n    # Use base_lf (materialized view logic) for fast stats\n    stats_df = base_lf.select(pl.col(\"label\")).collect().get_column(\"label\").value_counts().sort(\"label\")\n\n    # Split and shuffle data\n    train_lf = base_lf.filter(pl.col(\"idx\") < (pl.col(\"n\") * t1)).drop([\"n\", \"idx\"]).sort(\"label\").collect()\n    test_lf = (\n        base_lf.filter((pl.col(\"idx\") >= (pl.col(\"n\") * t1)) & (pl.col(\"idx\") < (pl.col(\"n\") * t2)))\n        .drop([\"n\", \"idx\"])\n        .sort(\"label\")\n        .collect()\n    )\n    val_lf = base_lf.filter(pl.col(\"idx\") >= (pl.col(\"n\") * t2)).drop([\"n\", \"idx\"]).sort(\"label\").collect()\n\n    global_features: Features = Features(\n        {\n            \"label\": ClassLabel(names=stats_df[\"label\"].to_list()),\n            \"strokes\": LargeList(LargeList(List(Value(\"float32\")))),\n            \"source\": Value(\"string\"),\n        }\n    )  # type: ignore\n\n    for df, split in zip([train_lf, test_lf, val_lf], split_names, strict=True):\n        logger.info(\"  -> Uploading split: %s... to huggingface.\", split)\n        from os import process_cpu_count\n\n        def encode_labels(batch):\n            class_feature = global_features[\"label\"]\n            batch[\"label\"] = [class_feature.str2int(label) for label in batch[\"label\"]]\n            return batch\n\n        description = \"Detypify dataset, composed by mathwriting, detexify and contributed datasets\"\n        dataset_info = DatasetInfo(description=description)\n        dataset = cast(\n            Dataset,\n            Dataset.from_polars(df, info=dataset_info).map(encode_labels, batched=True).cast(features=global_features),\n        )\n        dataset.push_to_hub(repo_id=DATASET_REPO, num_proc=process_cpu_count() or 1, split=split, set_default=True)\n\n    logger.info(\"--- Done. Dataset uploaded to %s ---\", DATASET_REPO)\n\n\napp = typer.Typer(pretty_exceptions_show_locals=False)\n\n\n@app.command()\ndef main(\n    datasets: list[DataSetName] = typer.Option(\n        [DataSetName.detexify, DataSetName.mathwriting],\n        \"--datasets\",\n        \"-d\",\n        help=\"Datasets to process when converting data.\",\n    ),\n    convert_data: bool = typer.Option(False, help=\"Construct or upload local datasets.\"),\n    gen_info: bool = typer.Option(False, help=\"Writing symbol metadata and infer data files.\"),\n    split_ratio: tuple[float, float, float] = typer.Option(\n        (0.8, 0.1, 0.1),\n        help=\"Train/test/val split ratios for the processed dataset.\",\n    ),\n    create_raw: bool = typer.Option(\n        False,\n        help=\"Create and upload raw dataset with original LaTeX labels.\",\n    ),\n):\n    \"\"\"\n    Preprocess datasets, generate metadata, and upload results.\n    \"\"\"\n\n    dataset_names = list(set(datasets))\n\n    raw_data = None\n    if create_raw:\n        raw_data = create_raw_dataset(\n            dataset_names=dataset_names,\n        )\n\n    if convert_data:\n        create_dataset(\n            dataset_names=dataset_names,\n            raw_data=raw_data,\n            split_ratio=split_ratio,\n        )\n\n    if gen_info:\n        generate_data_info(classes=get_dataset_classes(DATASET_REPO))\n\n\nif __name__ == \"__main__\":\n    app()\n"
  },
  {
    "path": "python/review_contrib.py",
    "content": "\"\"\"Preprocess contribution from the webpage.\"\"\"\n\nimport logging\nimport shutil\nfrom pathlib import Path\n\nimport cv2\nfrom msgspec import json\nfrom PIL import Image, ImageDraw, ImageFont\nfrom proc_data import get_typst_symbol_info, rasterize_strokes\n\nOUT_DIR = Path(\"build/contrib\")\nREF_SIZE = 100  # px\n\n\ndef bold(s: str) -> str:\n    return \"\\033[1m\" + s + \"\\033[0m\"\n\n\nif __name__ == \"__main__\":\n    logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n    shutil.rmtree(OUT_DIR, ignore_errors=True)\n    OUT_DIR.mkdir(exist_ok=True)\n    img_size = 256\n\n    cmd = \"bunx wrangler d1 execute detypify --remote \\\n                --command='SELECT * FROM samples' --json > build/dataset.json\"\n    print(\"### Run this command to fetch data:\")\n    print(f\"### $ {bold(cmd)}\")\n    while input(\">>> Input 'done' to proceed: \") != \"done\":\n        pass\n    with Path(\"build/dataset.json\").open(\"rb\") as f:\n        samples = json.decode(f.read())[0][\"results\"]\n\n    logging.info(\"\\n### Generating images...\")\n    name_to_chr = {x.names[0]: x.char for x in get_typst_symbol_info()}\n    for s in samples:\n        id_, token, sym, strokes = s[\"id\"], s[\"token\"], s[\"sym\"], s[\"strokes\"]\n        img = rasterize_strokes(json.decode(strokes), img_size)\n        cv2.imwrite(f\"{OUT_DIR}/{sym}-{id_}-{token}.png\", img)\n\n        if not Path(f\"{OUT_DIR}/{sym}-0-0.png\").exists():\n            text = name_to_chr[sym]\n            img = Image.new(\"1\", (100, 100), \"white\")\n            draw = ImageDraw.Draw(img)\n            font = ImageFont.truetype(\"external/NewCMMath-Regular.otf\", size=80)\n            _, _, w, h = draw.textbbox((0, 0), text, font=font)\n            draw.text(((REF_SIZE - w) / 2, (REF_SIZE - h) / 2), text, font=font)\n            img.save(f\"{OUT_DIR}/{sym}-0-0.png\")\n\n    print(f\"\\n### Go through {bold(str(OUT_DIR))} and delete unwanted images\")\n    while input(\">>> Input 'done' to proceed: \") != \"done\":\n        pass\n\n    logging.info(\"\\n### Collecting wanted samples...\")\n    id_to_strokes = {s[\"id\"]: s[\"strokes\"] for s in samples}\n    for filename in OUT_DIR.iterdir():\n        sym, id_, _ = str(filename).rsplit(\".\", 1)[0].split(\"-\")\n        if id_ != \"0\":\n            strokes = id_to_strokes[str(id_)]\n            with Path(f\"data/dataset/{sym}.txt\").open(\"ab\") as f:\n                f.write(strokes + \"\\n\")\n\n    cmd = \"bunx wrangler d1 execute detypify --remote \\\n                --command='DELETE FROM samples WHERE id <= n'\"\n    print(\"\\n### Run this command to clean data:\")\n    print(f\"### $ {bold(cmd)}\")\n"
  },
  {
    "path": "python/tex_to_typ.yaml",
    "content": "# Alphabet\n# Double Struck Capital Letters\n\\mathds{A}: AA\n\\mathds{B}: BB\n\\mathds{C}: CC\n\\mathds{D}: DD\n\\mathds{E}: EE\n\\mathds{F}: FF\n\\mathds{G}: GG\n\\mathds{H}: HH\n\\mathds{I}: II\n\\mathds{J}: JJ\n\\mathds{K}: KK\n\\mathds{L}: LL\n\\mathds{M}: MM\n\\mathds{N}: NN\n\\mathds{O}: OO\n\\mathds{P}: PP\n\\mathds{Q}: QQ\n\\mathds{R}: RR\n\\mathds{S}: SS\n\\mathds{T}: TT\n\\mathds{U}: UU\n\\mathds{V}: VV\n\\mathds{W}: WW\n\\mathds{X}: XX\n\\mathds{Y}: YY\n\\mathds{Z}: ZZ\nAA: AA\nBB: BB\nCC: CC\nDD: DD\nEE: EE\nFF: FF\nGG: GG\nHH: HH\nII: II\nJJ: JJ\nKK: KK\nLL: LL\nMM: MM\nNN: NN\nOO: OO\nPP: PP\nQQ: QQ\nRR: RR\nSS: SS\nTT: TT\nUU: UU\nVV: VV\nWW: WW\nXX: XX\nYY: YY\nZZ: ZZ\n# Greek Capital Letters\n\\Alpha: Alpha\n\\Beta: Beta\n\\Gamma: Gamma\n\\Delta: Delta\n\\Epsilon: Epsilon\n\\Zeta: Zeta\n\\Eta: Eta\n\\Theta: Theta # TODO: no theta.alt\n\\Iota: Iota\n\\Kappa: Kappa\n\\Lambda: Lambda\n\\Mu: Mu\n\\Nu: Nu\n\\Xi: Xi\n\\Omicron: Omicron\n\\Pi: Pi\n\\Rho: Rho\n\\Sigma: Sigma\n\\Tau: Tau\n\\Upsilon: Upsilon\n\\Phi: Phi\n\\Chi: Chi\n\\Psi: Psi\n\\Omega: Omega\n# Greek Small Letters\n\\alpha: alpha\n\\beta: beta\n\\gamma: gamma\n\\delta: delta\n\\varepsilon: epsilon\n\\epsilon: epsilon.alt\n\\zeta: zeta\n\\eta: eta\n\\theta: theta\n\\vartheta: theta.alt\n\\iota: iota\n\\kappa: kappa\n\\varkappa: kappa.alt\n\\lambda: lambda\n\\mu: mu\n\\nu: nu\n\\xi: xi\n\\omicron: omicron\n\\pi: pi\n\\varpi: pi.alt\n\\rho: rho\n\\varrho: rho.alt\n\\sigma: sigma\n\\varsigma: sigma.alt\n\\tau: tau\n\\upsilon: upsilon\n\\varphi: phi\n\\phi: phi.alt\n\\chi: chi\n\\psi: psi\n\\omega: omega\n\\upalpha: alpha\n\\upbeta: beta\n\\upgamma: gamma\n\\updelta: delta\n\\upvarepsilon: epsilon\n\\upepsilon: epsilon.alt\n\\upzeta: zeta\n\\upeta: eta\n\\uptheta: theta\n\\upvartheta: theta.alt\n\\upiota: iota\n\\upkappa: kappa\n\\upvarkappa: kappa.alt\n\\uplambda: lambda\n\\upmu: mu\n\\upnu: nu\n\\upxi: xi\n\\upomicron: omicron\n\\uppi: pi\n\\upvarpi: pi.alt\n\\uprho: rho\n\\upvarrho: rho.alt\n\\upsigma: sigma\n\\upvarsigma: sigma.alt\n\\uptau: tau\n\\upupsilon: upsilon\n\\upvarphi: phi\n\\upphi: phi.alt\n\\upchi: chi\n\\uppsi: psi\n\\upomega: omega\n\\Upalpha: alpha\n\\Upbeta: beta\n\\Upgamma: gamma\n\\Updelta: delta\n\\Upvarepsilon: epsilon\n\\Upepsilon: epsilon.alt\n\\Upzeta: zeta\n\\Upeta: eta\n\\Uptheta: theta\n\\Upvartheta: theta.alt\n\\Upiota: iota\n\\Upkappa: kappa\n\\Upvarkappa: kappa.alt\n\\Uplambda: lambda\n\\Upmu: mu\n\\Upnu: nu\n\\Upxi: xi\n\\Upomicron: omicron\n\\Uppi: pi\n\\Upvarpi: pi.alt\n\\Uprho: rho\n\\Upvarrho: rho.alt\n\\Upsigma: sigma\n\\Upvarsigma: sigma.alt\n\\Uptau: tau\n\\Upupsilon: upsilon\n\\Upvarphi: phi\n\\Upphi: phi.alt\n\\Upchi: chi\n\\Uppsi: psi\n\\Upomega: omega\n# Hebrew Letters\n\\aleph: aleph # TODO: no beth, daleth, gimel\n# Others\n\\&: amp\n\\#: hash\n\\%: percent\n\\{: brace.l\n\\}: brace.r\n\\--: dash.en\n\\---: dash.em\n\\colon: colon\n\\degree: degree\n\\copyright: copyright\n\\textcircledP: copyright.sound\n\\textreferencemark: refmark\n\\textperthousand: permille\n\\simeq: tilde.eq\n\\circlearrowleft: arrow.ccw\n\\circlearrowright: arrow.cw\n\\dashleftarrow: arrow.l.dashed\n\\dashrightarrow: arrow.r.dashed\n\\lightning: arrow.zigzag\n\\circ: compose\n\\bowtie: join\n\\MVAt: at\n\\EUR: euro\n\\blacksquare: qed\n\\emptyset: emptyset\n\\|: bar.v.double\n\\iff: arrow.l.r.double.long\n\\bullet: bullet.op\n\\diamond: diamond.stroked\n\\earth: earth\n\\Earth: earth\n\\triangle: triangle.stroked.t\n\\smiley: smile\n# Text symbols\n\\textregistered: trademark.registered\n\\texttrademark: trademark\n\\textsection: section\n\\textparagraph: pilcrow\n\\textbullet: bullet\n\\textquestiondown: quest.inv\n\\textdollar: dollar\n\\textasciitilde: tilde.basic\n\\textasciicircum: caret\n# Currency / marks\n\\textcent: cent\n\\textyen: yen\n\\textwon: won\n\\textdong: dong\n\\textsterling: pound\n\\checkmark: checkmark\n\\maltese: maltese\n# Text punctuation / typography\n\\textdagger: dagger\n\\textdaggerdbl: dagger.double\n\\textemdash: dash.em\n\\textendash: dash.en\n\\textellipsis: dots.h\n# quotes\n\\textquotedbl: quote.double\n\\textquotedblleft: quote.l.double\n\\textquotedblright: quote.r.double\n\\guillemotleft: quote.chevron.l.double\n\\guillemotright: quote.chevron.r.double\n\\guilsinglleft: quote.chevron.l.single\n\\guilsinglright: quote.chevron.r.single\n"
  },
  {
    "path": "python/train.py",
    "content": "\"\"\"Train the model.\"\"\"\n\nimport logging\nfrom enum import StrEnum\n\nimport typer\n\nCUDA_AMPERE_VERSION = 8\n\n\nclass ModelName(StrEnum):\n    conv_small_035 = \"mobilenetv4_conv_small_035\"\n    conv_small_050 = \"mobilenetv4_conv_small_050\"\n    conv_small_full = \"mobilenetv4_conv_small\"\n    conv_medium = \"mobilenetv4_conv_medium\"\n    hybrid_medium_075 = \"mobilenetv4_hybrid_medium_075\"\n    hybrid_medium = \"mobilenetv4_hybrid_medium\"\n\n\nif __name__ == \"__main__\":\n    logger = logging.getLogger()\n    logger.setLevel(logging.INFO)\n    app = typer.Typer(pretty_exceptions_show_locals=False)\n\n    @app.command()\n    def main(\n        out_dir: str = typer.Option(\"build/train\", help=\"Output directory\"),\n        debug: bool = typer.Option(False, help=\"Enable debug mode\"),\n        profiling: bool = typer.Option(False, help=\"Enable performance profiler.\"),\n        dev_run: bool = typer.Option(False, help=\"Fast dev run (valid only when debug is True)\"),\n        log_pred: bool = typer.Option(True, help=\"Logging predictions to logger for review.\"),\n        init_batch_size: int = typer.Option(128, help=\"Initial batch size\"),\n        warmup_epochs: int = typer.Option(3, help=\"Number of warmup epochs\"),\n        total_epochs: int = typer.Option(40, help=\"Total number of epochs\"),\n        image_size: int = typer.Option(224, help=\"Image size (e.g., 128, 224, 256)\"),\n        find_batch_size: bool = typer.Option(False, help=\"Enable/Disable automatic batch size finding\"),\n        use_ema: bool = typer.Option(True, \"--ema/--no-ema\", help=\"Enable/Disable EMA weight averaging\"),\n        ema_decay: float = typer.Option(0.995, help=\"EMA decay rate\"),\n        ema_start_epoch: int = typer.Option(5, help=\"Epoch to start EMA\"),\n        ema_warmup: bool = typer.Option(True, \"--ema-warmup/--no-ema-warmup\", help=\"Enable/Disable EMA warmup.\"),\n        ema_warmup_gamma: float = typer.Option(25.0, help=\"EMA warmup gamma.\"),\n        ema_warmup_power: float = typer.Option(0.7, help=\"EMA warmup power.\"),\n        amp_precision: str = typer.Option(\"bf16-mixed\", help=\"Precision: 64, 32, 16-mixed, bf16-mixed\"),\n        use_tensorrt: bool = typer.Option(True, help=\"Use pytorch tensorrt compile backend\"),\n        models: list[ModelName] = typer.Option(\n            [\n                \"conv_small_035\",\n            ],\n            \"--models\",\n            help=\"List of models to train\",\n        ),\n    ):\n        \"\"\"Train the model.\"\"\"\n        # Collect all input arguments\n        args_dict = {\n            \"out_dir\": out_dir,\n            \"debug\": debug,\n            \"profiling\": profiling,\n            \"dev_run\": dev_run,\n            \"log_pred\": log_pred,\n            \"init_batch_size\": init_batch_size,\n            \"warmup_epochs\": warmup_epochs,\n            \"total_epochs\": total_epochs,\n            \"image_size\": image_size,\n            \"find_batch_size\": find_batch_size,\n            \"use_ema\": use_ema,\n            \"ema_decay\": ema_decay,\n            \"ema_start_epoch\": ema_start_epoch,\n            \"ema_warmup\": ema_warmup,\n            \"ema_warmup_gamma\": ema_warmup_gamma,\n            \"ema_warmup_power\": ema_warmup_power,\n            \"amp_precision\": amp_precision,\n            \"models\": models,\n        }\n\n        # Lazy import\n        from pathlib import Path\n\n        from dataset import MathSymbolDataModule\n        from lightning import Trainer\n        from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint\n        from lightning.pytorch.loggers import TensorBoardLogger\n        from lightning.pytorch.tuner.tuning import Tuner\n        from model import TimmModel\n        from msgspec import yaml\n        from proc_data import DATASET_REPO, get_dataset_classes\n        from torch import set_float32_matmul_precision\n        from torch.cuda import get_device_properties, is_available\n\n        out_dir_path = Path(out_dir)\n\n        classes = get_dataset_classes(DATASET_REPO)\n        model_instances: list[TimmModel] = [\n            TimmModel(\n                num_classes=len(classes),\n                model_name=model,\n                warmup_epochs=warmup_epochs,\n                total_epochs=total_epochs,\n                image_size=image_size,\n                use_tensorrt=use_tensorrt,\n            )\n            for model in models\n        ]\n\n        # define data module\n        dm = MathSymbolDataModule(\n            batch_size=init_batch_size,\n            image_size=image_size,\n        )\n\n        # for Ampere or later NVIDIA graphics only\n        if is_available():\n            if get_device_properties(0).major >= CUDA_AMPERE_VERSION:\n                # set matmul precision for speed\n                set_float32_matmul_precision(\"medium\")\n            # 20 series graphics don't support bf16 format precision\n            elif amp_precision == \"bf16-mixed\":\n                amp_precision = \"16-mixed\"\n                args_dict[\"amp_precision\"] = amp_precision\n        for model in model_instances:\n            model_name_str = (\n                model.__class__.__name__ if model.__class__.__name__ != \"TimmModel\" else str(model.model_name)\n            )\n            tb_logger = TensorBoardLogger(save_dir=out_dir_path, name=model_name_str, default_hp_metric=False)  # type: ignore\n\n            final_output_dir = Path(tb_logger.log_dir)\n            checkpoints_dir = final_output_dir / \"ckpts\"\n            train_args_path = final_output_dir / \"training_args.yaml\"\n            train_args_path.parent.mkdir(parents=True, exist_ok=True)\n\n            current_args = args_dict.copy()\n            current_args.update(\n                {\n                    \"model_name\": model_name_str,\n                    \"num_classes\": len(classes),\n                }\n            )\n\n            if not debug:\n                with train_args_path.open(\"wb\") as f:\n                    f.write(yaml.encode(current_args))\n\n            callbacks: list = [LearningRateMonitor(logging_interval=\"epoch\")]\n\n            # Lazy import callbacks only when needed\n            if log_pred:\n                from callbacks import LogPredictCallback\n\n                callbacks.append(LogPredictCallback(sorted(classes)))\n\n            if use_ema:\n                from callbacks import EMAWeightAveraging\n\n                callbacks.append(\n                    EMAWeightAveraging(\n                        decay=ema_decay,\n                        update_starting_at_epoch=ema_start_epoch,\n                        use_warmup=ema_warmup,\n                        warmup_gamma=ema_warmup_gamma,\n                        warmup_power=ema_warmup_power,\n                    )\n                )\n\n            # Add checkpoint callback to save best model\n            checkpoint_callback = ModelCheckpoint(\n                dirpath=checkpoints_dir,\n                save_weights_only=True,\n                filename=\"best-{epoch:02d}-{val_acc:.4f}\",\n                monitor=\"val_acc\",\n                mode=\"max\",\n                save_top_k=1,\n                save_last=True,\n            )\n            callbacks.append(checkpoint_callback)\n\n            # Add ONNX export callback for best model\n            if not debug:\n                from callbacks import ExportBestModelToONNX\n\n                callbacks.append(\n                    ExportBestModelToONNX(\n                        save_dir=checkpoints_dir,\n                        model_name=model_name_str,\n                        checkpoint_callback=checkpoint_callback,\n                    )\n                )\n\n            trainer = Trainer(\n                max_epochs=total_epochs,\n                default_root_dir=out_dir_path,\n                logger=tb_logger,\n                fast_dev_run=debug and dev_run,\n                precision=amp_precision,  # type: ignore\n                profiler=\"simple\" if profiling else None,\n                callbacks=callbacks,\n            )\n\n            # finetune learning rate and batch size\n            tuner = Tuner(trainer)\n            # disable compiling as it required fixed batch size\n            model.use_compile = False\n            # NOTE: don't use fast_dev_run=True with scale batch and lr finder\n            batch_size = init_batch_size\n            if not debug and trainer.num_devices == 1 and find_batch_size:\n                suggested_batch_size = tuner.scale_batch_size(model, datamodule=dm, init_val=init_batch_size)\n                batch_size = suggested_batch_size or init_batch_size\n            logger.info(\"The final batch size is %s.\", batch_size)\n            if not debug and not dev_run:\n                lr_finder = tuner.lr_find(model, datamodule=dm, min_lr=1e-4, max_lr=1e-3)\n                fig = lr_finder.plot(suggest=True)  # type: ignore\n                save_path = final_output_dir / f\"lr_{batch_size}_{image_size}.svg\"\n                save_path.parent.mkdir(parents=True, exist_ok=True)\n                fig.savefig(save_path)  # type: ignore\n                model.hparams.learning_rate = lr_finder.suggestion()  # type: ignore\n\n            current_args[\"final_batch_size\"] = batch_size\n            lr = model.hparams.get(\"learning_rate\")\n            if lr is not None:\n                current_args[\"suggested_learning_rate\"] = lr\n\n            with train_args_path.open(\"wb\") as f:\n                f.write(yaml.encode(current_args))\n\n            # training\n            model.use_compile = True\n            trainer.fit(model, datamodule=dm)\n            trainer.test(model, datamodule=dm)\n\n    app()\n"
  }
]