[
  {
    "path": ".dockerignore",
    "content": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\nreport.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n*.lcov\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# Snowpack dependency directory (https://snowpack.dev/)\nweb_modules/\n\n# TypeScript cache\n*.tsbuildinfo\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Optional stylelint cache\n.stylelintcache\n\n# Microbundle cache\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variable files\n.env\n.env.development.local\n.env.test.local\n.env.production.local\n.env.local\n\n# parcel-bundler cache (https://parceljs.org/)\n.cache\n.parcel-cache\n\n# Next.js build output\n.next\nout\n\n# Nuxt.js build / generate output\n.nuxt\ndist\n\n# Gatsby files\n.cache/\n# Comment in the public line in if your project uses Gatsby and not Next.js\n# https://nextjs.org/blog/next-9-1#public-directory-support\n# public\n\n# vuepress build output\n.vuepress/dist\n\n# vuepress v2.x temp and cache directory\n.temp\n.cache\n\n# Docusaurus cache and generated files\n.docusaurus\n\n# Serverless directories\n.serverless/\n\n# FuseBox cache\n.fusebox/\n\n# DynamoDB Local files\n.dynamodb/\n\n# TernJS port file\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n.vscode-test\n\n# yarn v2\n.yarn/cache\n.yarn/unplugged\n.yarn/build-state.yml\n.yarn/install-state.gz\n.pnp.*\n\n\n# Misc\n.DS_Store\n"
  },
  {
    "path": ".eslintrc.json",
    "content": "{\n  \"extends\": [\n    \"eslint:recommended\",\n    \"plugin:@typescript-eslint/recommended\",\n    \"prettier\"\n  ],\n  \"plugins\": [\"@typescript-eslint\"],\n  \"parser\": \"@typescript-eslint/parser\",\n  \"root\": true\n}\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/1.bug_report.yml",
    "content": "name: Bug Report\ndescription: File a bug report\ntitle: \"[Bug]: \"\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thanks for taking the time to fill out this bug report!\n        \n  - type: checkboxes\n    attributes:\n      label: Not a duplicate issue\n      options:\n        - label: I have searched the existing issues and confirmed this issue is not a duplicate.\n          required: true\n\n  - type: checkboxes\n    attributes:\n      label: Verify ChatGPT service is operational\n      options:\n        - label: I have checked [status.openai.com](https://status.openai.com) and ChatGPT is operational.\n\n  - type: input\n    attributes:\n      label: Bot version\n      description: Please enter the version of the bot you are using.\n      placeholder: v2.0.0\n    validations:\n      required: true\n      \n  - type: dropdown\n    attributes:\n      label: API type\n      description: Please select the type(s) of API you are using.\n      multiple: true\n      options:\n        - official\n        - unofficial\n        - browser\n\n  - type: textarea\n    attributes:\n      label: Environment details\n      description: Please enter your Node.js version, operating system, and whether you are using Docker.\n    validations:\n      required: true\n      \n  - type: textarea\n    attributes:\n      label: Describe the Bug\n      description: A clear and concise description of what the bug is, and the expected behavior.\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: To Reproduce\n      description: Steps to reproduce the behavior.\n\n  - type: textarea\n    attributes:\n      label: Logs\n      description: |\n        **Be careful not to leak your account information!**\n        If applicable, please provide logs to help diagnose your problem. You may need to set the `debug` level to 2 or higher. If the log is very long, please provide a link to [pastebin](https://pastebin.com) or a similar service. \n\n  - type: textarea\n    attributes:\n      label: Additional context\n      description: Add any other context about the problem here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/2.feature_request.yml",
    "content": "name: Feature Request\ndescription: Suggest an idea for this project\ntitle: \"[Feature Request]: \"\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thanks for suggesting an idea for this project!\n        \n  - type: checkboxes\n    attributes:\n      label: Not a duplicate feature request\n      options:\n        - label: I have searched the existing issues and confirmed this feature request is not a duplicate.\n          required: true\n\n  - type: checkboxes\n    attributes:\n      label: Supported by the upstream API\n      description: This project relies on this [API](https://github.com/transitive-bullshit/chatgpt-api) to use ChatGPT models. If the feature is not supported yet, please file a feature request in the API repo instead.\n      options:\n        - label: This feature can be implemented using the upstream API.\n\n  - type: textarea\n    attributes:\n      label: Describe the feature\n      description: Please describe the feature you would like to see implemented.\n    validations:\n      required: true\n  \n  - type: textarea\n    attributes:\n      label: Additional context\n      description: Please provide any additional context about the feature request (e.g. proposed implementation).\n"
  },
  {
    "path": ".github/workflows/docker.yml",
    "content": "name: Publish Docker image\n\non:\n  release:\n    types: [published]\n  workflow_dispatch:\n\njobs:\n  docker:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v3\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v2\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v2\n      - name: Login to Docker Hub\n        uses: docker/login-action@v2\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n      \n      # Browserless\n      - name: Extract metadata for Docker (browserless)\n        id: meta\n        uses: docker/metadata-action@v4\n        with:\n          images: RainEggplant/chatgpt-telegram-bot\n          tags: |\n            type=semver,pattern={{version}}\n      - name: Build and push (browserless)\n        uses: docker/build-push-action@v4\n        with:\n          context: .\n          file: ./Dockerfile\n          platforms: linux/amd64,linux/arm64\n          push: true\n          tags: ${{ steps.meta.outputs.tags }}\n          labels: ${{ steps.meta.outputs.labels }}\n\n      # Browser-based\n      - name: Extract metadata for Docker (browser-based)\n        id: meta_browser\n        uses: docker/metadata-action@v4\n        with:\n          images: RainEggplant/chatgpt-telegram-bot\n          flavor: |\n            suffix=-browser,onlatest=true\n          tags: |\n            type=semver,pattern={{version}}\n      - name: Build and push (browser-based)\n        uses: docker/build-push-action@v4\n        with:\n          context: .\n          file: ./Dockerfile.browser\n          platforms: linux/amd64,linux/arm64\n          push: true\n          tags: ${{ steps.meta_browser.outputs.tags }}\n          labels: ${{ steps.meta_browser.outputs.labels }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# IDE files\n.idea\n\n# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\nreport.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\nlib-cov\n\n# Coverage directory used by tools like istanbul\ncoverage\n*.lcov\n\n# nyc test coverage\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n.grunt\n\n# Bower dependency directory (https://bower.io/)\nbower_components\n\n# node-waf configuration\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\nbuild/Release\n\n# Dependency directories\nnode_modules/\njspm_packages/\n\n# Snowpack dependency directory (https://snowpack.dev/)\nweb_modules/\n\n# TypeScript cache\n*.tsbuildinfo\n\n# Optional npm cache directory\n.npm\n\n# Optional eslint cache\n.eslintcache\n\n# Optional stylelint cache\n.stylelintcache\n\n# Microbundle cache\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n.node_repl_history\n\n# Output of 'npm pack'\n*.tgz\n\n# Yarn Integrity file\n.yarn-integrity\n\n# dotenv environment variable files\n.env\n.env.development.local\n.env.test.local\n.env.production.local\n.env.local\n\n# parcel-bundler cache (https://parceljs.org/)\n.cache\n.parcel-cache\n\n# Next.js build output\n.next\nout\n\n# Nuxt.js build / generate output\n.nuxt\ndist\n\n# Gatsby files\n.cache/\n# Comment in the public line in if your project uses Gatsby and not Next.js\n# https://nextjs.org/blog/next-9-1#public-directory-support\n# public\n\n# vuepress build output\n.vuepress/dist\n\n# vuepress v2.x temp and cache directory\n.temp\n.cache\n\n# Docusaurus cache and generated files\n.docusaurus\n\n# Serverless directories\n.serverless/\n\n# FuseBox cache\n.fusebox/\n\n# DynamoDB Local files\n.dynamodb/\n\n# TernJS port file\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n.vscode-test\n\n# yarn v2\n.yarn/cache\n.yarn/unplugged\n.yarn/build-state.yml\n.yarn/install-state.gz\n.pnp.*\n\n\n# Misc\n.DS_Store\n\n# Config files\nconfig/local*.json\n\n# Database files\ndatabase\n"
  },
  {
    "path": ".husky/pre-commit",
    "content": "#!/usr/bin/env sh\n. \"$(dirname -- \"$0\")/_/husky.sh\"\n\n# Temporary workaround for excessive output\n# See https://github.com/okonet/lint-staged/issues/1164#issuecomment-1177561594\n# Check if /dev/tty works: https://stackoverflow.com/a/69088164\nif sh -c \": >/dev/tty\" >/dev/null 2>/dev/null; then exec >/dev/tty 2>&1; fi\n\npnpm lint-staged\n\n# Skip typechecking before tsc can work with lint-stage\n# https://github.com/microsoft/TypeScript/issues/27379\n"
  },
  {
    "path": ".prettierrc",
    "content": "{\n  \"printWidth\": 80,\n  \"tabWidth\": 2,\n  \"useTabs\": false,\n  \"semi\": true,\n  \"singleQuote\": true,\n  \"trailingComma\": \"es5\",\n  \"bracketSpacing\": false,\n  \"arrowParens\": \"always\"\n}\n"
  },
  {
    "path": ".vscode/launch.json",
    "content": "{\n  // Use IntelliSense to learn about possible attributes.\n  // Hover to view descriptions of existing attributes.\n  // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n  \"version\": \"0.2.0\",\n  \"configurations\": [\n    {\n      \"type\": \"node\",\n      \"runtimeArgs\": [\"--experimental-loader=extensionless\"],\n      \"request\": \"launch\",\n      \"name\": \"Debug\",\n      \"skipFiles\": [\"<node_internals>/**\"],\n      \"program\": \"${workspaceFolder}/src/index.ts\",\n      \"preLaunchTask\": \"Build for Debugging\",\n      \"outFiles\": [\"${workspaceFolder}/dist/**/*.js\"]\n    }\n  ]\n}\n"
  },
  {
    "path": ".vscode/tasks.json",
    "content": "{\n  \"version\": \"2.0.0\",\n  \"tasks\": [\n    {\n      \"type\": \"npm\",\n      \"script\": \"build:debug\",\n      \"group\": \"build\",\n      \"problemMatcher\": [\"$tsc\"],\n      \"label\": \"Build for Debugging\",\n      \"detail\": \"tsc --build && tsc --sourceMap\"\n    }\n  ]\n}\n"
  },
  {
    "path": "Dockerfile",
    "content": "# Builder stage\nFROM node:lts-alpine AS builder\n\nRUN npm install -g pnpm\n\nWORKDIR /app\n\nCOPY config /app/config\nCOPY package.json pnpm-lock.yaml ./\nRUN pnpm install --frozen-lockfile --ignore-scripts\n\nCOPY . .\nRUN pnpm build\n\n\n# Runner stage\nFROM node:lts-alpine\n\nRUN npm install -g pnpm\n\nWORKDIR /app\n\nCOPY config /app/config\nCOPY package.json pnpm-lock.yaml ./\nRUN pnpm install --frozen-lockfile --ignore-scripts --prod --no-optional\n\nCOPY --from=builder /app/dist ./dist\n\nCMD pnpm start\n"
  },
  {
    "path": "Dockerfile.browser",
    "content": "# Builder stage\nFROM node:lts-alpine AS builder\n\nRUN npm install -g pnpm\n\nWORKDIR /app\n\nCOPY package.json pnpm-lock.yaml ./\nRUN pnpm install --frozen-lockfile --ignore-scripts \n\nCOPY . .\nRUN pnpm build\n\n\n# Runner stage\nFROM node:lts\n\nRUN apt update && apt install -y chromium xvfb\nRUN npm install -g pnpm\nENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium\n\nWORKDIR /app\n\nCOPY package.json pnpm-lock.yaml ./\nRUN pnpm install --frozen-lockfile --ignore-scripts --prod\n\n# Install prebuilt lmdb binary\nRUN arch_out=$(uname -m) \\\n && if [ \"${arch_out}\" = \"x86_64\" ]; then \\\n        ARCH=x64; \\\n    elif [ \"${arch_out}\" = \"aarch64\" ]; then \\\n        ARCH=arm64; \\\n    elif echo \"${arch_out}\" | grep -q \"armv\"; then \\\n        ARCH=arm; \\\n    else \\\n        ARCH=${arch_out}; \\\n    fi \\\n && LMDB_VER=$(pnpm list lmdb | grep lmdb | awk '{print $2}') \\\n && pnpm add @lmdb/lmdb-linux-$ARCH@$LMDB_VER --ignore-scripts --prod --no-optional\n# We can also build it ourselves:\n# RUN npm explore lmdb -- npm run install\n\nCOPY --from=builder /app/dist ./dist\n\nCMD xvfb-run -a --server-args=\"-screen 0 1280x800x24 -ac -nolisten tcp -dpi 96 +extension RANDR\" pnpm start\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022 RainEggplant\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# ChatGPT Telegram Bot\n\n![badge:version](https://img.shields.io/github/v/release/RainEggplant/chatgpt-telegram-bot?color=brightgreen)\n![docker image size](https://img.shields.io/docker/image-size/raineggplant/chatgpt-telegram-bot/latest?label=docker%20image%20size)\n![license](https://img.shields.io/badge/license-MIT-green)\n\nEnglish ｜ [中文](README_zh-CN.md)\n\nA ChatGPT bot for Telegram based on Node.js. Support both browserless and browser-based APIs.\n\n## Updates\n\n<strong>🎉 v2 has been released!</strong>\n<details open>\n  <summary><b>🔔 Apr. 3, 2023 (v2.5.0)</b></summary>\n\n  > - Support per-chat conversation by @Vadko.\n  > - Support setting message queue as optional by @Vadko.  \n</details>\n\n<details>\n<summary><strong>Previous Updates</strong></summary>\n<details>\n  <summary><b>🔔 Mar. 10, 2023 (v2.4.0)</b></summary>\n\n  > - Add a timeout mechanism to prevent the message queue from getting stuck. You can override the default timeout in the config file.\n</details>\n\n<details>\n  <summary><b>🔔 Mar. 07, 2023 (v2.3.2)</b></summary>\n\n  > - You can pull the [pre-built Docker image](https://hub.docker.com/r/raineggplant/chatgpt-telegram-bot) from Docker Hub now!\n</details>\n\n<details>\n  <summary><b>🔔 Mar. 02, 2023 (v2.3.0)</b></summary>\n\n  > - Support the [official OpenAI chat completions API](https://platform.openai.com/docs/guides/chat).\n  > - Support proxy by using a custom fetch function.\n\n  We strongly advice you to use the `official` API. There are rumors that OpenAI may ban your account if you continue to use the `unofficial` and `browser` API.\n</details>\n\n<details>\n  <summary><b>🔔 Feb. 28, 2023 (v2.2.0)</b></summary>\n\n  > - Support message queue to avoid rate limit.\n  > - Improve Markdown parsing.\n</details>\n\n<details>\n  <summary><b>🔔 Feb. 22, 2023 (v2.1.1)</b></summary>\n\n  > - Support custom prompt prefix and suffix (allowing you to customize the bot's identity and behavior).\n  > - Support Node.js v19.\n</details>\n\n<details>\n  <summary><b>🔔 Feb. 19, 2023 (v2.1.0)</b></summary>\n\n  > We have added support for the unofficial proxy API by @acheong08. This API uses a proxy server that allows users to bypass Cloudflare protection and use the real ChatGPT. Please see [Usage](#usage) for more details.\n  >\n  > For previous users, we've updated our API options. `api.version` is now `api.type`, with options `browser` (previously `v3`), `official` (previously `v4`), and `unofficial`. Please update your config file accordingly.\n</details>\n\n<details>\n  <summary><b>🔔 Feb. 17, 2023</b></summary>\n\n  > According to [one of the maintainers](https://github.com/waylaidwanderer/node-chatgpt-api#updates) of the reverse proxy servers, OpenAI has patched this method. So you have to either use the browserless Official API with official models (which costs money), or use the browser-based solution.\n</details>\n\n<details>\n  <summary><b>🔔 Feb. 15, 2023</b></summary>\n\n  > We have release the v2.0.0 of this bot, which supports both [browserless](https://github.com/transitive-bullshit/chatgpt-api) and [browser-based](https://github.com/transitive-bullshit/chatgpt-api/tree/v3) APIs. You can switch between the two APIs at any time using the config file. Additionally, we have refactored the codebase to make it more maintainable and easier to extend.\n  >\n  > For old users, you will need to switch from the `.env` file to json files under the `config/` folder.\n</details>\n\n</details>\n\n## Features\n\n<table>\n  <tr>\n    <th>Private Chat</th>\n    <th>Group Chat</th>\n  </tr>\n  <tr>\n    <td><img src=\"./assets/private_chat.jpg\" /></td>\n    <td><img src=\"./assets/group_chat.jpg\" /></td>\n  </tr>\n</table>\n\n- Support for both browserless (official, unofficial) and browser-based APIs\n- Support for both private and group chats\n- Work in privacy mode (the bot can only see specific messages)\n- Bot access control based on user and group IDs\n- Reset chat thread and refresh session with command\n- Queue messages to avoid rate limit\n- Typing indicator, Markdown formatting, ...\n- Cloudflare bypassing and CAPTCHA automation (for the browser-based API)\n- Customize bot identity and behavior (by setting `api.official.systemMessage`)\n- User-friendly logging\n\n## Usage\n\n### Differences between the three types of APIs\n\n> Thank @transitive-bullshit for making this easy-to-understand table!\n\n| Type         | Free?  | Robust?  | Quality?                |\n| -------------| ------ | -------- | ----------------------- |\n| `official`   | ❌ No  | ✅ Yes   | ✅ Real ChatGPT models |\n| `unofficial` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT         |\n| `browser`    | ✅ Yes | ❌ No    | ✅ Real ChatGPT         |\n\n- `official`: Uses the `gpt-3.5-turbo` model by default with the official OpenAI chat completions API (official, robust approach, but it's not free)\n- `unofficial`: Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)\n- `browser` (not recommended): Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone)\n\n> **Warning**\n>\n> There are rumors that OpenAI may ban your account if you continue to use the `unofficial` and `browser` API. Use it at your own risk.\n\n### Start the server\n\n#### Option 1: Node\nTo get started, follow these steps:\n\n1. Clone this project.\n2. Create `local.json` under the `config/` folder. You can copy the `config/default.json` as a template.\n3. Modify the `local.json` following the instructions in the file. The settings in `local.json` will override the default settings in `default.json`.\n  - Set `api.type` to `official` if you want to use the browserless official API. Then provide your [OpenAI API Key](https://platform.openai.com/overview) and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptapi) for more details. Note that this will cost your credits.\n  - Set `api.type` to `unofficial` if you want to use the browserless unofficial API. Then provide your OpenAI access token ([how to get your access token?](https://github.com/transitive-bullshit/chatgpt-api#access-token)) and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi) for more details.\n  - Set `api.type` to `browser` if you want to use the browser-based API (not recommended). Then provide the OpenAI / Google / Microsoft credentials and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#authentication) and [this](https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters) for more details. Make sure you have a Chromium-based browser installed.\n\nThen you can start the bot with:\n\n```shell\npnpm install\npnpm build && pnpm start\n```\n\n#### Option 2: Docker\n\nTo get started, follow these steps:\n\n1. Create a folder named `config` and create a `local.json` file in it. You can follow the instructions in the \"Choice #1: Node\" section to customize the settings.\n2. Run the following command to start the bot:\n\n    ```shell\n    docker run -d -v ./config:/app/config raineggplant/chatgpt-telegram-bot:latest\n    ```\n\n    This will pull the latest image that only supports the browserless API. If you want to use the browser-based API, you can add a `-browser` suffix to the tag, e.g., `raineggplant/chatgpt-telegram-bot:latest-browser`.\n\n\n### Chat with the bot in Telegram\n\nTo chat with the bot in Telegram, you can:\n\n- Send direct messages to the bot (this is not supported in groups)\n- Send messages that start with the specified command (e.g., `/chat` or the command you specified in the json config file)\n- Reply to the bot's last message\n\n> **Note** Make sure you have enabled the privacy mode of your bot before adding it to a group, or it will reply to every message in the group.\n\nThe bot also has several commands.\n\n- `/help`: Show help information.\n- `/reset`: Reset the current chat thread and start a new one.\n- `/reload` (admin required, browser-based API only): Refresh the ChatGPT session.\n\n> **Note** When using a command in a group, make sure to include a mention after the command, like `/help@chatgpt_bot`.\n\n\n## Advanced\n\n### Running the bot using browser-based API on a headless server\n\nYou can use [Xvfb](https://www.x.org/releases/X11R7.6/doc/man/man1/Xvfb.1.xhtml) to create a virtual framebuffer on a headless server and run this program:\n\n```shell\nxvfb-run -a --server-args=\"-screen 0 1280x800x24 -nolisten tcp -dpi 96 +extension RANDR\" pnpm start\n```\n\nWe recommend you to use Google auth to avoid the complicated login Recaptchas. If you use a OpenAI account, you may have to use nopecha or 2captcha or manually solve the Recaptcha (by connecting to the display server using x11vnc). For more details about CAPTCHA solving, please refer to [the api repository](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#captchas).\n\n## Credits\n\n- [ChatGPT API](https://github.com/transitive-bullshit/chatgpt-api): Node.js client for the unofficial ChatGPT API.\n- [ChatGPT](https://github.com/acheong08/ChatGPT): ChatGPT API for Python.\n- [Node.js Telegram Bot API](https://github.com/yagop/node-telegram-bot-api): Telegram Bot API for NodeJS.\n- [🤖️ chatbot-telegram](https://github.com/Ciyou/chatbot-telegram): Yet another telegram ChatGPT bot.\n\n## LICENSE\n\n[MIT License](LICENSE).\n\n**Leave a star ⭐ if you find this project useful.**\n"
  },
  {
    "path": "README_zh-CN.md",
    "content": "# ChatGPT Telegram 机器人\n\n![badge:version](https://img.shields.io/github/v/release/RainEggplant/chatgpt-telegram-bot?color=brightgreen)\n![docker image size](https://img.shields.io/docker/image-size/raineggplant/chatgpt-telegram-bot/latest?label=docker%20image%20size)\n![license](https://img.shields.io/badge/license-MIT-green)\n\n[English](README.md) | 中文\n\n一个基于 Node.js 的 ChatGPT 电报机器人。支持非浏览器和基于浏览器的 API。\n\n> Fun fact: 这篇中文文档是 ChatGPT 翻译的。\n\n## 更新\n\n<strong>🎉 v2 已发布！</strong>\n<details open>\n  <summary><b>🔔 2023 年 4 月 3 日 (v2.5.0)</b></summary>\n\n  > - 支持不同用户/群组的多个会话 by @Vadko.\n  > - 支持将消息队列设置为可选 by @Vadko.  \n</details>\n\n<details>\n<summary><strong>历史更新</strong></summary>\n<details>\n  <summary><b>🔔 2023 年 3 月 10 日 (v2.4.0)</b></summary>\n\n  > - 添加超时机制以防止消息队列被卡住。您可以在配置文件中覆盖默认超时时间。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 3 月 7 日 (v2.3.2)</b></summary>\n\n  > - 您现在可以从 Docker Hub 拉取 [预构建的 Docker 镜像](https://hub.docker.com/r/raineggplant/chatgpt-telegram-bot) 了！\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 3 月 2 日 (v2.3.0)</b></summary>\n\n  > - 支持 [OpenAI 官方 chat completions API](https://platform.openai.com/docs/guides/chat)。\n  > - 支持使用自定义的 fetch 函数进行代理。\n\n  我们强烈建议您使用 `official` API。有传言称如果您继续使用 `unofficial` 和 `browser` API，OpenAI 可能会禁止您的账户。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 2 月 28 日 (v2.2.0)</b></summary>\n\n  > - 支持消息队列以避免速率限制。\n  > - 改善 Markdown 解析。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 2 月 22 日 (v2.1.1)</b></summary>\n\n  > - 支持自定义提示前缀和后缀（允许您自定义机器人的身份和行为）。\n  > - 支持 Node.js v19。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 2 月 19 日 (v2.1.0)</b></summary>\n\n  > 我们已经添加了对 @acheong08 的非官方代理 API 的支持。此 API 使用代理服务器，允许用户绕过 Cloudflare 保护并使用真正的 ChatGPT。请参阅 [使用方法](#使用方法) 获取更多详情。\n  >\n  > 对于老用户，我们已更新我们的 API 选项。`api.version` 现在变为了 `api.type`，可选项为 `browser`（之前的 `v3`）、`official`（之前的 `v4`）和 `unofficial`。请相应地更新您的配置文件。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 2 月 17 日</b></summary>\n\n  > 根据[维护者之一](https://github.com/waylaidwanderer/node-chatgpt-api#updates)的说法，OpenAI 已经修补了这种方法。所以你要么使用带官方模型的无浏览器 Official API（需要付费），要么使用基于浏览器的解决方案。\n</details>\n\n<details>\n  <summary><b>🔔 2023 年 2 月 15 日</b></summary>\n\n  > 我们发布了 v2.0.0 版本的这个机器人，支持 [非浏览器](https://github.com/transitive-bullshit/chatgpt-api) 和 [基于浏览器](https://github.com/transitive-bullshit/chatgpt-api/tree/v3) 的 API。您可以随时使用配置文件在两个 API 之间切换。此外，我们已经重构了代码库，使其更易于维护和扩展。\n  >\n  > 对于老用户，您需要从 `.env` 文件切换到 `config/` 文件夹下的 json 文件。\n</details>\n\n</details>\n\n## 特点\n\n<table>\n  <tr>\n    <th>私聊</th>\n    <th>群聊</th>\n  </tr>\n  <tr>\n    <td><img src=\"./assets/private_chat.jpg\" /></td>\n    <td><img src=\"./assets/group_chat.jpg\" /></td>\n  </tr>\n</table>\n\n- 支持非浏览器 (官方，非官方) 和基于浏览器的 API\n- 支持私聊和群聊\n- 在隐私模式下工作 (机器人只能查看特定的消息)\n- 基于用户和群组 ID 的机器人访问控制\n- 使用命令重置聊天线程并刷新会话\n- 使用消息队列以避免速率限制\n- “正在输入”提示，Markdown 格式化等\n- 绕过 Cloudflare 并自动完成验证码 (适用于基于浏览器的 API)\n- 自定义机器人身份和行为 (通过设置 `api.official.systemMessage`)\n- 用户友好的日志记录\n\n## 使用方法\n\n### 三种 API 类型之间的区别\n\n> 感谢 @transitive-bullshit 制作了这个易于理解的表格！\n\n| 类型         | 免费？ | 稳定？ | 质量？              |\n| ------------ | ------ | ------ | ------------------- |\n| `official`   | ❌ 否   | ✅ 是   | ✅ 真实 ChatGPT 模型 |\n| `unofficial` | ✅ 是   | ☑️ 可能 | ✅ 真实 ChatGPT      |\n| `browser`    | ✅ 是   | ❌ 否   | ✅ 真实 ChatGPT      |\n\n- `official`: 使用 OpenAI 官方的 chat completions API，默认基于 `gpt-3.5-turbo` 模型 (官方、稳定，但不免费)\n- `unofficial`: 使用一个非官方的代理服务器以绕过 Cloudflare 访问 ChatGPT 网页版的后端 API (使用真实的 ChatGPT，非常轻量级，但依赖第三方服务器并且受速率限制)\n- `browser` (不推荐使用): 使用 Puppeteer 访问官方的 ChatGPT 网页版 (使用真实的ChatGPT，但非常不稳定、耗费资源，并且容易出错)\n\n> **Warning**\n>\n> 有传言称，如果您继续使用 `unofficial` 和 `browser` API，OpenAI 可能会封禁您的账户。请自行决定是否使用。\n\n### 启动服务器\n\n#### 选项 1: Node\n要开始使用，请按照以下步骤操作：\n\n1. 克隆这个项目。\n2. 在 `config/` 文件夹下创建 `local.json`。可以将 `config/default.json` 复制为模板。\n3. 按照文件中的说明修改 `local.json`。`local.json` 中的设置将覆盖 `default.json` 中的默认设置。\n  - 如果您想使用非浏览器的官方 API，请将 `api.type` 设置为 `official`。然后提供您的[OpenAI API密钥](https://platform.openai.com/overview)和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptapi)获取更多详细信息。请注意，这将消耗您的账户余额。\n  - 如果您想使用非浏览器的非官方 API，请将 `api.type` 设置为 `unofficial`。然后提供您的 OpenAI 访问令牌 ([如何获取访问令牌?](https://github.com/transitive-bullshit/chatgpt-api#access-token)) 和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi)获取更多详细信息。\n  - 如果您想使用基于浏览器的 API (不推荐)，请将 `api.type` 设置为 `browser`。然后提供 OpenAI / Google / Microsoft 凭证和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#authentication)和[这里](https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters)获取更多详细信息。请确保您已安装基于 Chromium 的浏览器。\n\n然后，您可以使用以下命令启动机器人：\n\n```shell\npnpm install\npnpm build && pnpm start\n```\n\n#### 选项 2: Docker\n\n请按以下步骤开始：\n\n1. 创建一个名为 `config` 的文件夹，并在其中创建一个 `local.json` 文件。您可以按照“选项 1: Node”中的说明自定义设置。\n2. 运行以下命令启动机器人：\n\n    ```shell\n    docker run -d -v ./config:/app/config raineggplant/chatgpt-telegram-bot:latest\n    ```\n\n    这将拉取最新的只支持无浏览器 API 的镜像。如果您想使用基于浏览器的 API，则可以将标签后缀添加为 `-browser`，例如 `raineggplant/chatgpt-telegram-bot:latest-browser`。\n\n\n### 在 Telegram 中与机器人聊天\n\n要在 Telegram 中与机器人聊天，您可以：\n\n- 向机器人直接发送消息（不支持在群组中使用）\n- 发送以指定命令开头的消息（例如 `/chat` 或您在 JSON 配置文件中指定的命令）\n- 回复机器人的最后一条消息\n\n> **Note** 在将机器人添加到群组之前，请确保已启用机器人的隐私模式，否则它将回复群组中的每条消息。\n\n该机器人还有几个命令。\n\n- `/help`：显示帮助信息。\n- `/reset`：重置当前聊天线程并开始新线程。\n- `/reload`（需要管理员权限，只适用于基于浏览器的 API）：刷新 ChatGPT 会话。\n\n> **Note** 在群组中使用命令时，请确保在命令之后包含提及，例如`/help@chatgpt_bot`。\n\n\n## 高级选项\n\n### 在无头服务器上运行基于浏览器 API 的机器人\n\n您可以使用 [Xvfb](https://www.x.org/releases/X11R7.6/doc/man/man1/Xvfb.1.xhtml) 在无头服务器上创建虚拟帧缓冲区，并运行以下程序：\n\n```shell\nxvfb-run -a --server-args=\"-screen 0 1280x800x24 -nolisten tcp -dpi 96 +extension RANDR\" pnpm start\n```\n\n我们建议您使用 Google 验证，以避免复杂的登录 Recaptchas。如果您使用 OpenAI 帐户，则可能需要使用 nopecha 或 2captcha 或手动解决Recaptcha（通过使用 x11vnc 连接到显示服务器）。有关 CAPTCHA 解决的更多详细信息，请参阅[API存储库](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#captchas)。\n\n## 鸣谢\n\n- [ChatGPT API](https://github.com/transitive-bullshit/chatgpt-api)：ChatGPT API 的 Node.js 客户端。\n- [ChatGPT](https://github.com/acheong08/ChatGPT)：Python 版本的 ChatGPT API。\n- [Node.js Telegram Bot API](https://github.com/yagop/node-telegram-bot-api)：NodeJS 的 Telegram Bot API。\n- [🤖️ chatbot-telegram](https://github.com/Ciyou/chatbot-telegram)：另一个 Telegram ChatGPT 机器人。\n\n## 许可证\n\n[MIT License](LICENSE).\n\n**如果您觉得这个项目有用，请给它一个 star ⭐。**\n"
  },
  {
    "path": "config/default.json",
    "content": "{\n  \"debug\": 1, // debug level: 0 - no debug, 1 - debug, 2 - verbose debug\n  \"bot\": {\n    \"token\": \"TELEGRAM_BOT_TOKEN\",\n    \"groupIds\": [], // allowed group ids, leave empty to allow all\n    \"userIds\": [], // allowed user ids, leave empty to allow all\n    \"chatCmd\": \"/chat\",\n    \"queue\": true,\n    \"redisUri\": \"\"\n  },\n  \"api\": {\n    \"type\": \"official\", // \"browser\", \"official\", \"unofficial\": the type of the chatgpt api to use\n    \"browser\": {\n      // Please refer to \"https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters\"\n      \"email\": \"ACCOUNT_EMAIL\",\n      \"password\": \"ACCOUNT_PASSWORD\",\n      \"isGoogleLogin\": false,\n      \"isProAccount\": false,\n      \"executablePath\": \"\",\n      \"nopechaKey\": \"\",\n      \"captchaToken\": \"\",\n      \"userDataDir\": \"\",\n      \"timeoutMs\": 120000 // set to 0 to disable\n    },\n    \"official\": {\n      // Please refer to \"https://github.com/transitive-bullshit/chatgpt-api/blob/main/docs/classes/ChatGPTAPI.md#parameters\"\n      \"apiKey\": \"API_KEY\",\n      \"apiBaseUrl\": \"\",\n      \"completionParams\": {},\n      \"systemMessage\": \"\",\n      \"maxModelTokens\": 0, // set to 0 to use default\n      \"maxResponseTokens\": 0, // set to 0 to use default\n      \"timeoutMs\": 60000 // set to 0 to disable\n    },\n    \"unofficial\": {\n      // Please refer to \"https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi\"\n      \"accessToken\": \"ACCESS_TOKEN\",\n      \"apiReverseProxyUrl\": \"\",\n      \"model\": \"\",\n      \"timeoutMs\": 120000 // set to 0 to disable\n    }\n  },\n  \"proxy\": \"\" // You can also specify the proxy using the environment variable \"HTTP_PROXY\"\n}\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"chatgpt-telegram-bot\",\n  \"version\": \"2.5.0\",\n  \"description\": \"A ChatGPT bot for Telegram.\",\n  \"main\": \"index.js\",\n  \"type\": \"module\",\n  \"scripts\": {\n    \"build\": \"tsc --build\",\n    \"build:debug\": \"tsc --build && tsc --sourceMap\",\n    \"dev\": \"tsc-watch --onSuccess \\\"node --experimental-loader=extensionless dist/index.js\\\"\",\n    \"start\": \"node --experimental-loader=extensionless dist/index.js\",\n    \"lint\": \"eslint src/**/*.{js,ts}\",\n    \"lint:fix\": \"eslint --fix src/**/*.{js,ts}\",\n    \"format\": \"prettier --check src/**/*.{js,ts}\",\n    \"format:fix\": \"prettier --write src/**/*.{js,ts}\",\n    \"typecheck\": \"tsc --noEmit --noUnusedLocals\",\n    \"prepare\": \"husky install\"\n  },\n  \"lint-staged\": {\n    \"src/**/*.{js,ts}\": [\n      \"eslint\",\n      \"prettier --check\"\n    ]\n  },\n  \"keywords\": [\n    \"ChatGPT\",\n    \"Telegram\",\n    \"Bot\"\n  ],\n  \"author\": \"RainEggplant\",\n  \"license\": \"MIT\",\n  \"devDependencies\": {\n    \"@types/config\": \"^3.3.0\",\n    \"@types/lodash\": \"^4.14.192\",\n    \"@types/node\": \"^18.15.11\",\n    \"@types/node-telegram-bot-api\": \"^0.57.7\",\n    \"@types/promise-queue\": \"^2.2.0\",\n    \"@typescript-eslint/eslint-plugin\": \"^5.57.0\",\n    \"@typescript-eslint/parser\": \"^5.57.0\",\n    \"eslint\": \"^8.37.0\",\n    \"eslint-config-prettier\": \"^8.8.0\",\n    \"husky\": \"^8.0.3\",\n    \"lint-staged\": \"^13.2.0\",\n    \"prettier\": \"^2.8.7\",\n    \"tsc-watch\": \"^6.0.0\",\n    \"typescript\": \"^4.9.5\"\n  },\n  \"dependencies\": {\n    \"@keyv/redis\": \"^2.5.7\",\n    \"chatgpt\": \"^5.1.4\",\n    \"config\": \"^3.3.9\",\n    \"dotenv\": \"^16.0.3\",\n    \"extensionless\": \"^1.3.4\",\n    \"https-proxy-agent\": \"^5.0.1\",\n    \"ioredis\": \"^5.3.2\",\n    \"keyv\": \"^4.5.2\",\n    \"lodash\": \"^4.17.21\",\n    \"node-fetch\": \"^3.3.1\",\n    \"node-telegram-bot-api\": \"^0.60.0\",\n    \"promise-queue\": \"^2.2.5\",\n    \"telegramify-markdown\": \"^1.1.0\"\n  },\n  \"optionalDependencies\": {\n    \"chatgpt-v3\": \"npm:chatgpt@3.5.1\",\n    \"puppeteer\": \"^19.8.2\"\n  }\n}\n"
  },
  {
    "path": "src/@types/telegramify-markdown/index.d.ts",
    "content": "declare module 'telegramify-markdown' {\n  // eslint-disable-next-line @typescript-eslint/no-explicit-any\n  function telegramifyMarkdown(markdown: string, options?: any): string;\n  export = telegramifyMarkdown;\n}\n"
  },
  {
    "path": "src/api.ts",
    "content": "import type {\n  ChatGPTAPI,\n  ChatGPTUnofficialProxyAPI,\n  ChatMessage as ChatResponseV4,\n} from 'chatgpt';\nimport type {\n  ChatGPTAPIBrowser,\n  ChatResponse as ChatResponseV3,\n} from 'chatgpt-v3';\nimport {\n  APIBrowserOptions,\n  APIOfficialOptions,\n  APIOptions,\n  APIUnofficialOptions,\n} from './types';\nimport {logWithTime} from './utils';\nimport {DB} from './db';\n\nclass ChatGPT {\n  debug: number;\n  readonly apiType: string;\n  protected _opts: APIOptions;\n  protected _api:\n    | ChatGPTAPI\n    | ChatGPTAPIBrowser\n    | ChatGPTUnofficialProxyAPI\n    | undefined;\n  protected _apiBrowser: ChatGPTAPIBrowser | undefined;\n  protected _apiOfficial: ChatGPTAPI | undefined;\n  protected _apiUnofficialProxy: ChatGPTUnofficialProxyAPI | undefined;\n  protected _timeoutMs: number | undefined;\n  protected _db: DB;\n\n  constructor(apiOpts: APIOptions, db: DB, debug = 1) {\n    this.debug = debug;\n    this.apiType = apiOpts.type;\n    this._opts = apiOpts;\n    this._timeoutMs = undefined;\n    this._db = db;\n  }\n\n  init = async () => {\n    if (this._opts.type == 'browser') {\n      const {ChatGPTAPIBrowser} = await import('chatgpt-v3');\n      this._apiBrowser = new ChatGPTAPIBrowser(\n        this._opts.browser as APIBrowserOptions\n      );\n      await this._apiBrowser.initSession();\n      this._api = this._apiBrowser;\n      this._timeoutMs = this._opts.browser?.timeoutMs;\n    } else if (this._opts.type == 'official') {\n      const {ChatGPTAPI} = await import('chatgpt');\n      this._apiOfficial = new ChatGPTAPI({\n        ...(this._opts.official as APIOfficialOptions),\n        getMessageById: async (id) => {\n          const message = await this._db.messageStore.get(id);\n          return message as ChatResponseV4;\n        },\n        upsertMessage: async (message) => {\n          await this._db.messageStore.set(message.id, message);\n        },\n      });\n      this._api = this._apiOfficial;\n      this._timeoutMs = this._opts.official?.timeoutMs;\n    } else if (this._opts.type == 'unofficial') {\n      const {ChatGPTUnofficialProxyAPI} = await import('chatgpt');\n      this._apiUnofficialProxy = new ChatGPTUnofficialProxyAPI(\n        this._opts.unofficial as APIUnofficialOptions\n      );\n      this._api = this._apiUnofficialProxy;\n      this._timeoutMs = this._opts.unofficial?.timeoutMs;\n    } else {\n      throw new RangeError('Invalid API type');\n    }\n    logWithTime('🔮 ChatGPT API has started...');\n  };\n\n  sendMessage = async (\n    text: string,\n    chatId: number,\n    messageId: string,\n    replyId?: string,\n    onProgress?: (res: ChatResponseV3 | ChatResponseV4) => void\n  ) => {\n    if (!this._api) return;\n\n    const contextDB = await this._db.getContext(chatId);\n    const parentIdFromReply = await this._db.getReplyId(replyId);\n\n    const context = {\n      conversationId: contextDB?.conversationId,\n      parentMessageId: parentIdFromReply ?? contextDB?.parentMessageId,\n    };\n\n    let res: ChatResponseV3 | ChatResponseV4;\n    if (this.apiType == 'official') {\n      if (!this._apiOfficial) return;\n      res = await this._apiOfficial.sendMessage(text, {\n        ...context,\n        onProgress,\n        messageId,\n        timeoutMs: this._timeoutMs,\n      });\n    } else {\n      res = await this._api.sendMessage(text, {\n        ...context,\n        onProgress,\n        messageId,\n        timeoutMs: this._timeoutMs,\n      });\n    }\n\n    const parentMessageId =\n      this.apiType == 'browser'\n        ? (res as ChatResponseV3).messageId\n        : (res as ChatResponseV4).id;\n\n    await this._db.updateContext(chatId, {\n      conversationId: res.conversationId,\n      parentMessageId,\n    });\n\n    return res;\n  };\n\n  resetThread = async (chatId: number) => {\n    if (this._apiBrowser) {\n      await this._apiBrowser.resetThread();\n    }\n    await this._db.clearContext(chatId);\n  };\n\n  refreshSession = async () => {\n    if (this._apiBrowser) {\n      await this._apiBrowser.refreshSession();\n    }\n  };\n}\n\nexport {ChatGPT};\n"
  },
  {
    "path": "src/db.ts",
    "content": "import KeyvRedis from '@keyv/redis';\nimport Keyv from 'keyv';\nimport {ChatMessage as ChatResponseV4} from 'chatgpt';\nimport {BotOptions} from './types';\nimport Redis from 'ioredis';\nimport {logWithTime} from './utils';\n\ninterface ContextObject {\n  conversationId?: string;\n  parentMessageId?: string;\n}\n\ntype Context = ContextObject | undefined;\n\nexport class DB {\n  protected _store: KeyvRedis | undefined;\n  protected _redis: Redis | undefined;\n  public messageStore: Keyv<ChatResponseV4>;\n  private _usersStore: Keyv<ContextObject>;\n\n  constructor(botOps: BotOptions) {\n    if (botOps.redisUri) {\n      this._redis = new Redis(botOps.redisUri, {family: 6});\n      this._redis.on('ready', async () => {\n        logWithTime('📚 Redis has started...');\n        const response = await this._redis?.ping();\n        logWithTime(`🏓 Redis ping result: ${response}`);\n      });\n      this._store = new KeyvRedis(this._redis);\n    }\n    this.messageStore = new Keyv({\n      store: this._store,\n      namespace: 'messages',\n    });\n    this._usersStore = new Keyv({\n      store: this._store,\n      namespace: 'users',\n    });\n  }\n\n  getContext = (chatId: number): Promise<Context> => {\n    return this._usersStore.get(chatId.toString());\n  };\n  updateContext = async (\n    chatId: number,\n    newContext: Pick<ContextObject, 'conversationId'> &\n      Required<Pick<ContextObject, 'parentMessageId'>>\n  ) => {\n    await this._usersStore.set(chatId.toString(), newContext);\n  };\n  clearContext = async (chatId: number) => {\n    await this._usersStore.delete(chatId.toString());\n  };\n  getReplyId = async (replyId: string | undefined) => {\n    if (!replyId) return undefined;\n    const reply = await this.messageStore.get(replyId);\n    return reply?.id;\n  };\n}\n"
  },
  {
    "path": "src/handlers/authentication.ts",
    "content": "import type TelegramBot from 'node-telegram-bot-api';\nimport type {Message} from 'node-telegram-bot-api';\nimport {BotOptions} from '../types';\nimport {logWithTime} from '../utils';\n\nclass Authenticator {\n  debug: number;\n  protected _bot: TelegramBot;\n  protected _opts: BotOptions;\n\n  constructor(bot: TelegramBot, botOpts: BotOptions, debug = 1) {\n    this.debug = debug;\n    this._bot = bot;\n    this._opts = botOpts;\n  }\n\n  authenticate = async (msg: Message) => {\n    if (msg.chat.type === 'private') {\n      if (\n        this._opts.userIds.length != 0 &&\n        this._opts.userIds.indexOf(msg.chat.id) == -1\n      ) {\n        logWithTime(\n          '⚠️ Authentication failed for user ' +\n            `@${msg.from?.username ?? ''} (${msg.from?.id}).`\n        );\n        await this._bot.sendMessage(\n          msg.chat.id,\n          '⛔️ Sorry, you are not my owner. I cannot chat with you or execute your command.'\n        );\n        return false;\n      }\n    } else {\n      if (\n        this._opts.groupIds.length != 0 &&\n        this._opts.groupIds.indexOf(msg.chat.id) == -1\n      ) {\n        logWithTime(\n          `⚠️ Authentication failed for group ${msg.chat.title} (${msg.chat.id}).`\n        );\n        await this._bot.sendMessage(\n          msg.chat.id,\n          \"⛔️ Sorry, I'm not supposed to work here. Please remove me from the group.\"\n        );\n        return false;\n      }\n    }\n    return true;\n  };\n}\n\nexport {Authenticator};\n"
  },
  {
    "path": "src/handlers/chat.ts",
    "content": "import type {ChatMessage as ChatResponseV4} from 'chatgpt';\nimport type {ChatResponse as ChatResponseV3} from 'chatgpt-v3';\nimport _ from 'lodash';\nimport type TelegramBot from 'node-telegram-bot-api';\nimport telegramifyMarkdown from 'telegramify-markdown';\nimport type {ChatGPT} from '../api';\nimport {BotOptions} from '../types';\nimport {generateIdFromMessage, logWithTime} from '../utils';\nimport Queue from 'promise-queue';\nimport {DB} from '../db';\n\nclass ChatHandler {\n  debug: number;\n  protected _opts: BotOptions;\n  protected _bot: TelegramBot;\n  protected _api: ChatGPT;\n  protected _n_queued = 0;\n  protected _n_pending = 0;\n  protected _apiRequestsQueue = new Queue(1, Infinity);\n  protected _positionInQueue: Record<string, number> = {};\n  protected _updatePositionQueue = new Queue(20, Infinity);\n  protected _db: DB;\n\n  constructor(\n    bot: TelegramBot,\n    api: ChatGPT,\n    botOpts: BotOptions,\n    db: DB,\n    debug = 1\n  ) {\n    this.debug = debug;\n    this._bot = bot;\n    this._api = api;\n    this._opts = botOpts;\n    this._db = db;\n  }\n\n  handle = async (msg: TelegramBot.Message, text: string) => {\n    if (!text) return;\n\n    const chatId = msg.chat.id;\n    if (this.debug >= 1) {\n      const userInfo = `@${msg.from?.username ?? ''} (${msg.from?.id})`;\n      const chatInfo =\n        msg.chat.type == 'private'\n          ? 'private chat'\n          : `group ${msg.chat.title} (${msg.chat.id})`;\n      logWithTime(`📩 Message from ${userInfo} in ${chatInfo}:\\n${text}`);\n    }\n\n    // Send a message to the chat acknowledging receipt of their message\n    const reply = await this._bot.sendMessage(\n      chatId,\n      this._opts.queue ? '⌛' : '🤔',\n      {\n        reply_to_message_id: msg.message_id,\n      }\n    );\n\n    const sendToGpt = async () => {\n      await this._sendToGpt(\n        text,\n        chatId,\n        reply,\n        generateIdFromMessage(reply),\n        generateIdFromMessage(msg.reply_to_message)\n      );\n    };\n\n    if (!this._opts.queue) {\n      await sendToGpt();\n    } else {\n      // add to sequence queue due to chatGPT processes only one request at a time\n      const requestPromise = this._apiRequestsQueue.add(() => {\n        return sendToGpt();\n      });\n      if (this._n_pending == 0) this._n_pending++;\n      else this._n_queued++;\n      this._positionInQueue[this._getQueueKey(chatId, reply.message_id)] =\n        this._n_queued;\n\n      await this._bot.editMessageText(\n        this._n_queued > 0 ? `⌛: You are #${this._n_queued} in line.` : '🤔',\n        {\n          chat_id: chatId,\n          message_id: reply.message_id,\n        }\n      );\n      await requestPromise;\n    }\n  };\n\n  protected _sendToGpt = async (\n    text: string,\n    chatId: number,\n    originalReply: TelegramBot.Message,\n    messageId: string,\n    replyId?: string\n  ) => {\n    let reply = originalReply;\n    await this._bot.sendChatAction(chatId, 'typing');\n\n    // Send message to ChatGPT\n    try {\n      const res = await this._api.sendMessage(\n        text,\n        chatId,\n        messageId,\n        replyId,\n        _.throttle(\n          async (partialResponse: ChatResponseV3 | ChatResponseV4) => {\n            const resText =\n              this._api.apiType == 'browser'\n                ? (partialResponse as ChatResponseV3).response\n                : (partialResponse as ChatResponseV4).text;\n            reply = await this._editMessage(reply, resText);\n            await this._bot.sendChatAction(chatId, 'typing');\n          },\n          3000,\n          {leading: true, trailing: false}\n        )\n      );\n      const resText =\n        this._api.apiType == 'browser'\n          ? (res as ChatResponseV3).response\n          : (res as ChatResponseV4).text;\n      await this._editMessage(reply, resText);\n\n      if (this.debug >= 1) logWithTime(`📨 Response:\\n${resText}`);\n    } catch (err) {\n      logWithTime('⛔️ ChatGPT API error:', (err as Error).message);\n      await this._db.clearContext(chatId);\n      this._bot.sendMessage(\n        chatId,\n        \"⚠️ Sorry, I'm having trouble connecting to the server, please try again later.\"\n      );\n    }\n\n    // Update queue order after finishing current request\n    await this._updateQueue(chatId, reply.message_id);\n  };\n\n  // Edit telegram message\n  protected _editMessage = async (\n    msg: TelegramBot.Message,\n    text: string,\n    needParse = true\n  ) => {\n    if (text.trim() == '' || msg.text == text) {\n      return msg;\n    }\n    try {\n      text = telegramifyMarkdown(text, 'escape');\n      const res = await this._bot.editMessageText(text, {\n        chat_id: msg.chat.id,\n        message_id: msg.message_id,\n        parse_mode: needParse ? 'MarkdownV2' : undefined,\n      });\n      // type of res is boolean | Message\n      if (typeof res === 'object') {\n        // return a Message type instance if res is a Message type\n        return res as TelegramBot.Message;\n      } else {\n        // return the original message if res is a boolean type\n        return msg;\n      }\n    } catch (err) {\n      logWithTime('⛔️ Edit message error:', (err as Error).message);\n      if (this.debug >= 2) logWithTime('⛔️ Message text:', text);\n      return msg;\n    }\n  };\n\n  protected _getQueueKey = (chatId: number, messageId: number) =>\n    `${chatId}:${messageId}`;\n\n  protected _parseQueueKey = (key: string) => {\n    const [chat_id, message_id] = key.split(':');\n\n    return {chat_id, message_id};\n  };\n\n  protected _updateQueue = async (chatId: number, messageId: number) => {\n    // delete value for current request\n    delete this._positionInQueue[this._getQueueKey(chatId, messageId)];\n    if (this._n_queued > 0) this._n_queued--;\n    else this._n_pending--;\n\n    for (const key in this._positionInQueue) {\n      const {chat_id, message_id} = this._parseQueueKey(key);\n      this._positionInQueue[key]--;\n      this._updatePositionQueue.add(() => {\n        return this._bot.editMessageText(\n          this._positionInQueue[key] > 0\n            ? `⌛: You are #${this._positionInQueue[key]} in line.`\n            : '🤔',\n          {\n            chat_id,\n            message_id: Number(message_id),\n          }\n        );\n      });\n    }\n  };\n}\n\nexport {ChatHandler};\n"
  },
  {
    "path": "src/handlers/command.ts",
    "content": "import type TelegramBot from 'node-telegram-bot-api';\nimport type {ChatGPT} from '../api';\nimport {BotOptions} from '../types';\nimport {logWithTime} from '../utils';\n\nclass CommandHandler {\n  debug: number;\n  protected _opts: BotOptions;\n  protected _bot: TelegramBot;\n  protected _api: ChatGPT;\n\n  constructor(bot: TelegramBot, api: ChatGPT, botOpts: BotOptions, debug = 1) {\n    this.debug = debug;\n    this._bot = bot;\n    this._api = api;\n    this._opts = botOpts;\n  }\n\n  handle = async (\n    msg: TelegramBot.Message,\n    command: string,\n    isMentioned: boolean,\n    botUsername: string\n  ) => {\n    const userInfo = `@${msg.from?.username ?? ''} (${msg.from?.id})`;\n    const chatInfo =\n      msg.chat.type == 'private'\n        ? 'private chat'\n        : `group ${msg.chat.title} (${msg.chat.id})`;\n    if (this.debug >= 1) {\n      logWithTime(\n        `👨‍💻️ User ${userInfo} issued command \"${command}\" in ${chatInfo} (isMentioned=${isMentioned}).`\n      );\n    }\n\n    // Ignore commands without mention in groups.\n    if (msg.chat.type != 'private' && !isMentioned) return;\n\n    switch (command) {\n      case '/help':\n        await this._bot.sendMessage(\n          msg.chat.id,\n          'To chat with me, you can:\\n' +\n            '  • send messages directly (not supported in groups)\\n' +\n            `  • send messages that start with ${this._opts.chatCmd}\\n` +\n            '  • reply to my last message\\n\\n' +\n            'Command list:\\n' +\n            `(When using a command in a group, make sure to include a mention after the command, like /help@${botUsername}).\\n` +\n            '  • /help Show help information.\\n' +\n            '  • /reset Reset the current chat thread and start a new one.\\n' +\n            '  • /reload (admin required) Refresh the ChatGPT session.'\n        );\n        break;\n\n      case '/reset':\n        await this._bot.sendChatAction(msg.chat.id, 'typing');\n        await this._api.resetThread(msg.chat.id);\n        await this._bot.sendMessage(\n          msg.chat.id,\n          '🔄 The chat thread has been reset. New chat thread started.'\n        );\n        logWithTime(`🔄 Chat thread reset by ${userInfo}.`);\n        break;\n\n      case '/reload':\n        if (this._opts.userIds.indexOf(msg.from?.id ?? 0) == -1) {\n          await this._bot.sendMessage(\n            msg.chat.id,\n            '⛔️ Sorry, you do not have the permission to run this command.'\n          );\n          logWithTime(\n            `⚠️ Permission denied for \"${command}\" from ${userInfo}.`\n          );\n        } else {\n          await this._bot.sendChatAction(msg.chat.id, 'typing');\n          await this._api.refreshSession();\n          await this._bot.sendMessage(msg.chat.id, '🔄 Session refreshed.');\n          logWithTime(`🔄 Session refreshed by ${userInfo}.`);\n        }\n        break;\n\n      default:\n        await this._bot.sendMessage(\n          msg.chat.id,\n          '⚠️ Unsupported command. Run /help to see the usage.'\n        );\n        break;\n    }\n  };\n}\n\nexport {CommandHandler};\n"
  },
  {
    "path": "src/handlers/message.ts",
    "content": "import type TelegramBot from 'node-telegram-bot-api';\nimport type {ChatGPT} from '../api';\nimport {BotOptions} from '../types';\nimport {logWithTime} from '../utils';\nimport {Authenticator} from './authentication';\nimport {ChatHandler} from './chat';\nimport {CommandHandler} from './command';\nimport {DB} from '../db';\n\nclass MessageHandler {\n  debug: number;\n  protected _opts: BotOptions;\n  protected _bot: TelegramBot;\n  protected _botUsername = '';\n  protected _botId: number | undefined;\n  protected _api: ChatGPT;\n  protected _authenticator: Authenticator;\n  protected _commandHandler: CommandHandler;\n  protected _chatHandler: ChatHandler;\n\n  constructor(\n    bot: TelegramBot,\n    api: ChatGPT,\n    botOpts: BotOptions,\n    db: DB,\n    debug = 1\n  ) {\n    this.debug = debug;\n    this._bot = bot;\n    this._api = api;\n    this._opts = botOpts;\n    this._authenticator = new Authenticator(bot, botOpts, debug);\n    this._commandHandler = new CommandHandler(bot, api, botOpts, debug);\n    this._chatHandler = new ChatHandler(bot, api, botOpts, db, debug);\n  }\n\n  init = async () => {\n    this._botUsername = (await this._bot.getMe()).username ?? '';\n    this._botId = (await this._bot.getMe()).id;\n    logWithTime(`🤖 Bot @${this._botUsername} has started...`);\n  };\n\n  handle = async (msg: TelegramBot.Message) => {\n    if (this.debug >= 2) logWithTime(msg);\n\n    // Authentication.\n    if (!(await this._authenticator.authenticate(msg))) return;\n\n    // Parse message.\n    const {text, command, isMentioned} = this._parseMessage(msg);\n    if (command != '' && command != this._opts.chatCmd) {\n      // For commands except `${chatCmd}`, pass the request to commandHandler.\n      await this._commandHandler.handle(\n        msg,\n        command,\n        isMentioned,\n        this._botUsername\n      );\n    } else {\n      // Handles:\n      // - direct messages in private chats\n      // - replied messages in both private chats and group chats\n      // - messages that start with `chatCmd` in private chats and group chats\n      if (\n        command == this._opts.chatCmd ||\n        msg.chat.type == 'private' ||\n        msg.reply_to_message?.from?.id === this._botId\n      ) {\n        await this._chatHandler.handle(msg, text);\n      }\n    }\n  };\n\n  protected _parseMessage = (msg: TelegramBot.Message) => {\n    let text = msg.text ?? '';\n    let command = '';\n    let isMentioned = false;\n    if ('entities' in msg) {\n      // May have bot commands.\n      const regMention = new RegExp(`@${this._botUsername}$`);\n      for (const entity of msg.entities ?? []) {\n        if (entity.type == 'bot_command' && entity.offset == 0) {\n          text = msg.text?.slice(entity.length).trim() ?? '';\n          command = msg.text?.slice(0, entity.length) ?? '';\n          isMentioned = regMention.test(command);\n          command = command.replace(regMention, ''); // Remove the mention.\n          break;\n        }\n      }\n    }\n    return {text, command, isMentioned};\n  };\n}\n\nexport {MessageHandler};\n"
  },
  {
    "path": "src/index.ts",
    "content": "import TelegramBot from 'node-telegram-bot-api';\nimport {ChatGPT} from './api';\nimport {MessageHandler} from './handlers/message';\nimport {loadConfig} from './utils';\nimport {DB} from './db';\n\nasync function main() {\n  const opts = loadConfig();\n  const db = new DB(opts.bot);\n\n  // Initialize ChatGPT API.\n  const api = new ChatGPT(opts.api, db);\n  await api.init();\n\n  // Initialize Telegram Bot and message handler.\n  const bot = new TelegramBot(opts.bot.token, {\n    polling: true,\n    // eslint-disable-next-line @typescript-eslint/no-explicit-any\n    request: {proxy: opts.proxy} as any,\n  });\n  const messageHandler = new MessageHandler(bot, api, opts.bot, db, opts.debug);\n  await messageHandler.init();\n\n  bot.on('message', messageHandler.handle);\n}\n\nmain().catch((err) => {\n  console.error(err);\n  process.exit(1);\n});\n"
  },
  {
    "path": "src/types.d.ts",
    "content": "import type {openai, FetchFn} from 'chatgpt';\n\nexport interface BotOptions {\n  token: string;\n  userIds: number[];\n  groupIds: number[];\n  chatCmd: string;\n  queue: boolean;\n  redisUri?: string;\n}\n\nexport interface APIBrowserOptions {\n  email: string;\n  password: string;\n  isGoogleLogin?: boolean;\n  isProAccount?: boolean;\n  executablePath?: string;\n  proxyServer?: string;\n  nopechaKey?: string;\n  captchaToken?: string;\n  userDataDir?: string;\n  timeoutMs?: number;\n  debug?: boolean;\n}\n\nexport interface APIOfficialOptions {\n  apiKey: string;\n  apiBaseUrl?: string;\n  completionParams?: Partial<\n    Omit<openai.CreateChatCompletionRequest, 'messages' | 'n'>\n  >;\n  systemMessage?: string;\n  maxModelTokens?: number;\n  maxResponseTokens?: number;\n  timeoutMs?: number;\n  fetch?: FetchFn;\n  debug?: boolean;\n}\n\nexport interface APIUnofficialOptions {\n  accessToken: string;\n  apiReverseProxyUrl?: string;\n  model?: string;\n  timeoutMs?: number;\n  fetch?: FetchFn;\n  debug?: boolean;\n}\n\nexport interface APIOptions {\n  type: 'browser' | 'official' | 'unofficial';\n  browser?: APIBrowserOptions;\n  official?: APIOfficialOptions;\n  unofficial?: APIUnofficialOptions;\n}\n\nexport interface Config {\n  debug: number;\n  bot: BotOptions;\n  api: APIOptions;\n  proxy?: string;\n}\n"
  },
  {
    "path": "src/utils.ts",
    "content": "import type {FetchFn, openai} from 'chatgpt';\nimport config from 'config';\nimport pkg from 'https-proxy-agent';\nimport fetch, {type RequestInfo, type RequestInit} from 'node-fetch';\nimport {\n  Config,\n  APIBrowserOptions,\n  APIOfficialOptions,\n  APIUnofficialOptions,\n} from './types';\nimport {Message} from 'node-telegram-bot-api';\nconst {HttpsProxyAgent} = pkg;\n\nfunction loadConfig(): Config {\n  function tryGet<T>(key: string): T | undefined {\n    if (!config.has(key)) {\n      return undefined;\n    } else {\n      return config.get<T>(key);\n    }\n  }\n\n  let fetchFn: FetchFn | undefined = undefined;\n  const proxy = tryGet<string>('proxy') || process.env.http_proxy;\n  if (proxy) {\n    const proxyAgent = new HttpsProxyAgent(proxy);\n    fetchFn = ((url, opts) =>\n      fetch(\n        url as RequestInfo,\n        {...opts, agent: proxyAgent} as RequestInit\n      )) as FetchFn;\n  }\n\n  const apiType = config.get<'browser' | 'official' | 'unofficial'>('api.type');\n  let apiBrowserCfg: APIBrowserOptions | undefined;\n  let apiOfficialCfg: APIOfficialOptions | undefined;\n  let apiUnofficialCfg: APIUnofficialOptions | undefined;\n  if (apiType == 'browser') {\n    apiBrowserCfg = {\n      email: config.get<string>('api.browser.email'),\n      password: config.get<string>('api.browser.password'),\n      isGoogleLogin: tryGet<boolean>('api.browser.isGoogleLogin') || false,\n      isProAccount: tryGet<boolean>('api.browser.isProAccount') || false,\n      executablePath:\n        tryGet<string>('api.browser.executablePath') ||\n        process.env.PUPPETEER_EXECUTABLE_PATH ||\n        undefined,\n      proxyServer: tryGet<string>('proxy') || undefined,\n      nopechaKey: tryGet<string>('api.browser.nopechaKey') || undefined,\n      captchaToken: tryGet<string>('api.browser.captchaToken') || undefined,\n      userDataDir: tryGet<string>('api.browser.userDataDir') || undefined,\n      timeoutMs: tryGet<number>('api.browser.timeoutMs') || undefined,\n      debug: config.get<number>('debug') >= 2,\n    };\n  } else if (apiType == 'official') {\n    apiOfficialCfg = {\n      apiKey: config.get<string>('api.official.apiKey'),\n      apiBaseUrl: tryGet<string>('api.official.apiBaseUrl') || undefined,\n      completionParams:\n        tryGet<\n          Partial<Omit<openai.CreateChatCompletionRequest, 'messages' | 'n'>>\n        >('api.official.completionParams') || undefined,\n      systemMessage: tryGet<string>('api.official.systemMessage') || undefined,\n      maxModelTokens:\n        tryGet<number>('api.official.maxModelTokens') || undefined,\n      maxResponseTokens:\n        tryGet<number>('api.official.maxResponseTokens') || undefined,\n      timeoutMs: tryGet<number>('api.official.timeoutMs') || undefined,\n      fetch: fetchFn,\n      debug: config.get<number>('debug') >= 2,\n    };\n  } else if (apiType == 'unofficial') {\n    apiUnofficialCfg = {\n      accessToken: config.get<string>('api.unofficial.accessToken'),\n      apiReverseProxyUrl:\n        tryGet<string>('api.unofficial.apiReverseProxyUrl') || undefined,\n      model: tryGet<string>('api.unofficial.model') || undefined,\n      timeoutMs: tryGet<number>('api.unofficial.timeoutMs') || undefined,\n      fetch: fetchFn,\n      debug: config.get<number>('debug') >= 2,\n    };\n  } else {\n    throw new RangeError('Invalid API type');\n  }\n\n  const cfg = {\n    debug: tryGet<number>('debug') || 1,\n    bot: {\n      token: config.get<string>('bot.token'),\n      userIds: tryGet<number[]>('bot.userIds') || [],\n      groupIds: tryGet<number[]>('bot.groupIds') || [],\n      chatCmd: tryGet<string>('bot.chatCmd') || '/chat',\n      queue: config.get<boolean>('bot.queue') ?? true,\n      redisUri: config.get<string>('bot.redisUri'),\n    },\n    api: {\n      type: apiType,\n      browser: apiBrowserCfg,\n      official: apiOfficialCfg,\n      unofficial: apiUnofficialCfg,\n    },\n    proxy: proxy,\n  };\n\n  return cfg;\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nfunction logWithTime(...args: any[]) {\n  console.log(new Date().toLocaleString(), ...args);\n}\n\nfunction generateIdFromMessage<T extends Message | undefined = undefined>(\n  msg?: T\n): T extends Message ? string : undefined {\n  return (\n    msg\n      ? `${msg.chat.id}_${msg.message_id}_${msg.from?.id}_${msg.date}`\n      : undefined\n  ) as T extends Message ? string : undefined;\n}\n\nexport {loadConfig, logWithTime, generateIdFromMessage};\n"
  },
  {
    "path": "tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ESNext\",\n    \"module\": \"ESNext\",\n    \"moduleResolution\": \"node\",\n    \"strict\": true,\n    \"esModuleInterop\": true,\n    \"skipLibCheck\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"outDir\": \"dist\",\n    \"typeRoots\": [\"node_modules/@types\", \"src/@types\"]\n  },\n  \"include\": [\"src\"],\n  \"exclude\": [\"node_modules\"]\n}\n"
  }
]