Repository: RainEggplant/chatgpt-telegram-bot Branch: master Commit: 50afe60220b7 Files: 28 Total size: 57.9 KB Directory structure: gitextract_iwt_as7g/ ├── .dockerignore ├── .eslintrc.json ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── 1.bug_report.yml │ │ └── 2.feature_request.yml │ └── workflows/ │ └── docker.yml ├── .gitignore ├── .husky/ │ └── pre-commit ├── .prettierrc ├── .vscode/ │ ├── launch.json │ └── tasks.json ├── Dockerfile ├── Dockerfile.browser ├── LICENSE ├── README.md ├── README_zh-CN.md ├── config/ │ └── default.json ├── package.json ├── src/ │ ├── @types/ │ │ └── telegramify-markdown/ │ │ └── index.d.ts │ ├── api.ts │ ├── db.ts │ ├── handlers/ │ │ ├── authentication.ts │ │ ├── chat.ts │ │ ├── command.ts │ │ └── message.ts │ ├── index.ts │ ├── types.d.ts │ └── utils.ts └── tsconfig.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* lerna-debug.log* .pnpm-debug.log* # Diagnostic reports (https://nodejs.org/api/report.html) report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json # Runtime data pids *.pid *.seed *.pid.lock # Directory for instrumented libs generated by jscoverage/JSCover lib-cov # Coverage directory used by tools like istanbul coverage *.lcov # nyc test coverage .nyc_output # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) .grunt # Bower dependency directory (https://bower.io/) bower_components # node-waf configuration .lock-wscript # Compiled binary addons (https://nodejs.org/api/addons.html) build/Release # Dependency directories node_modules/ jspm_packages/ # Snowpack dependency directory (https://snowpack.dev/) web_modules/ # TypeScript cache *.tsbuildinfo # Optional npm cache directory .npm # Optional eslint cache .eslintcache # Optional stylelint cache .stylelintcache # Microbundle cache .rpt2_cache/ .rts2_cache_cjs/ .rts2_cache_es/ .rts2_cache_umd/ # Optional REPL history .node_repl_history # Output of 'npm pack' *.tgz # Yarn Integrity file .yarn-integrity # dotenv environment variable files .env .env.development.local .env.test.local .env.production.local .env.local # parcel-bundler cache (https://parceljs.org/) .cache .parcel-cache # Next.js build output .next out # Nuxt.js build / generate output .nuxt dist # Gatsby files .cache/ # Comment in the public line in if your project uses Gatsby and not Next.js # https://nextjs.org/blog/next-9-1#public-directory-support # public # vuepress build output .vuepress/dist # vuepress v2.x temp and cache directory .temp .cache # Docusaurus cache and generated files .docusaurus # Serverless directories .serverless/ # FuseBox cache .fusebox/ # DynamoDB Local files .dynamodb/ # TernJS port file .tern-port # Stores VSCode versions used for testing VSCode extensions .vscode-test # yarn v2 .yarn/cache .yarn/unplugged .yarn/build-state.yml .yarn/install-state.gz .pnp.* # Misc .DS_Store ================================================ FILE: .eslintrc.json ================================================ { "extends": [ "eslint:recommended", "plugin:@typescript-eslint/recommended", "prettier" ], "plugins": ["@typescript-eslint"], "parser": "@typescript-eslint/parser", "root": true } ================================================ FILE: .github/ISSUE_TEMPLATE/1.bug_report.yml ================================================ name: Bug Report description: File a bug report title: "[Bug]: " body: - type: markdown attributes: value: | Thanks for taking the time to fill out this bug report! - type: checkboxes attributes: label: Not a duplicate issue options: - label: I have searched the existing issues and confirmed this issue is not a duplicate. required: true - type: checkboxes attributes: label: Verify ChatGPT service is operational options: - label: I have checked [status.openai.com](https://status.openai.com) and ChatGPT is operational. - type: input attributes: label: Bot version description: Please enter the version of the bot you are using. placeholder: v2.0.0 validations: required: true - type: dropdown attributes: label: API type description: Please select the type(s) of API you are using. multiple: true options: - official - unofficial - browser - type: textarea attributes: label: Environment details description: Please enter your Node.js version, operating system, and whether you are using Docker. validations: required: true - type: textarea attributes: label: Describe the Bug description: A clear and concise description of what the bug is, and the expected behavior. validations: required: true - type: textarea attributes: label: To Reproduce description: Steps to reproduce the behavior. - type: textarea attributes: label: Logs description: | **Be careful not to leak your account information!** If applicable, please provide logs to help diagnose your problem. You may need to set the `debug` level to 2 or higher. If the log is very long, please provide a link to [pastebin](https://pastebin.com) or a similar service. - type: textarea attributes: label: Additional context description: Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/2.feature_request.yml ================================================ name: Feature Request description: Suggest an idea for this project title: "[Feature Request]: " body: - type: markdown attributes: value: | Thanks for suggesting an idea for this project! - type: checkboxes attributes: label: Not a duplicate feature request options: - label: I have searched the existing issues and confirmed this feature request is not a duplicate. required: true - type: checkboxes attributes: label: Supported by the upstream API description: This project relies on this [API](https://github.com/transitive-bullshit/chatgpt-api) to use ChatGPT models. If the feature is not supported yet, please file a feature request in the API repo instead. options: - label: This feature can be implemented using the upstream API. - type: textarea attributes: label: Describe the feature description: Please describe the feature you would like to see implemented. validations: required: true - type: textarea attributes: label: Additional context description: Please provide any additional context about the feature request (e.g. proposed implementation). ================================================ FILE: .github/workflows/docker.yml ================================================ name: Publish Docker image on: release: types: [published] workflow_dispatch: jobs: docker: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to Docker Hub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} # Browserless - name: Extract metadata for Docker (browserless) id: meta uses: docker/metadata-action@v4 with: images: RainEggplant/chatgpt-telegram-bot tags: | type=semver,pattern={{version}} - name: Build and push (browserless) uses: docker/build-push-action@v4 with: context: . file: ./Dockerfile platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} # Browser-based - name: Extract metadata for Docker (browser-based) id: meta_browser uses: docker/metadata-action@v4 with: images: RainEggplant/chatgpt-telegram-bot flavor: | suffix=-browser,onlatest=true tags: | type=semver,pattern={{version}} - name: Build and push (browser-based) uses: docker/build-push-action@v4 with: context: . file: ./Dockerfile.browser platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.meta_browser.outputs.tags }} labels: ${{ steps.meta_browser.outputs.labels }} ================================================ FILE: .gitignore ================================================ # IDE files .idea # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* lerna-debug.log* .pnpm-debug.log* # Diagnostic reports (https://nodejs.org/api/report.html) report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json # Runtime data pids *.pid *.seed *.pid.lock # Directory for instrumented libs generated by jscoverage/JSCover lib-cov # Coverage directory used by tools like istanbul coverage *.lcov # nyc test coverage .nyc_output # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) .grunt # Bower dependency directory (https://bower.io/) bower_components # node-waf configuration .lock-wscript # Compiled binary addons (https://nodejs.org/api/addons.html) build/Release # Dependency directories node_modules/ jspm_packages/ # Snowpack dependency directory (https://snowpack.dev/) web_modules/ # TypeScript cache *.tsbuildinfo # Optional npm cache directory .npm # Optional eslint cache .eslintcache # Optional stylelint cache .stylelintcache # Microbundle cache .rpt2_cache/ .rts2_cache_cjs/ .rts2_cache_es/ .rts2_cache_umd/ # Optional REPL history .node_repl_history # Output of 'npm pack' *.tgz # Yarn Integrity file .yarn-integrity # dotenv environment variable files .env .env.development.local .env.test.local .env.production.local .env.local # parcel-bundler cache (https://parceljs.org/) .cache .parcel-cache # Next.js build output .next out # Nuxt.js build / generate output .nuxt dist # Gatsby files .cache/ # Comment in the public line in if your project uses Gatsby and not Next.js # https://nextjs.org/blog/next-9-1#public-directory-support # public # vuepress build output .vuepress/dist # vuepress v2.x temp and cache directory .temp .cache # Docusaurus cache and generated files .docusaurus # Serverless directories .serverless/ # FuseBox cache .fusebox/ # DynamoDB Local files .dynamodb/ # TernJS port file .tern-port # Stores VSCode versions used for testing VSCode extensions .vscode-test # yarn v2 .yarn/cache .yarn/unplugged .yarn/build-state.yml .yarn/install-state.gz .pnp.* # Misc .DS_Store # Config files config/local*.json # Database files database ================================================ FILE: .husky/pre-commit ================================================ #!/usr/bin/env sh . "$(dirname -- "$0")/_/husky.sh" # Temporary workaround for excessive output # See https://github.com/okonet/lint-staged/issues/1164#issuecomment-1177561594 # Check if /dev/tty works: https://stackoverflow.com/a/69088164 if sh -c ": >/dev/tty" >/dev/null 2>/dev/null; then exec >/dev/tty 2>&1; fi pnpm lint-staged # Skip typechecking before tsc can work with lint-stage # https://github.com/microsoft/TypeScript/issues/27379 ================================================ FILE: .prettierrc ================================================ { "printWidth": 80, "tabWidth": 2, "useTabs": false, "semi": true, "singleQuote": true, "trailingComma": "es5", "bracketSpacing": false, "arrowParens": "always" } ================================================ FILE: .vscode/launch.json ================================================ { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "type": "node", "runtimeArgs": ["--experimental-loader=extensionless"], "request": "launch", "name": "Debug", "skipFiles": ["/**"], "program": "${workspaceFolder}/src/index.ts", "preLaunchTask": "Build for Debugging", "outFiles": ["${workspaceFolder}/dist/**/*.js"] } ] } ================================================ FILE: .vscode/tasks.json ================================================ { "version": "2.0.0", "tasks": [ { "type": "npm", "script": "build:debug", "group": "build", "problemMatcher": ["$tsc"], "label": "Build for Debugging", "detail": "tsc --build && tsc --sourceMap" } ] } ================================================ FILE: Dockerfile ================================================ # Builder stage FROM node:lts-alpine AS builder RUN npm install -g pnpm WORKDIR /app COPY config /app/config COPY package.json pnpm-lock.yaml ./ RUN pnpm install --frozen-lockfile --ignore-scripts COPY . . RUN pnpm build # Runner stage FROM node:lts-alpine RUN npm install -g pnpm WORKDIR /app COPY config /app/config COPY package.json pnpm-lock.yaml ./ RUN pnpm install --frozen-lockfile --ignore-scripts --prod --no-optional COPY --from=builder /app/dist ./dist CMD pnpm start ================================================ FILE: Dockerfile.browser ================================================ # Builder stage FROM node:lts-alpine AS builder RUN npm install -g pnpm WORKDIR /app COPY package.json pnpm-lock.yaml ./ RUN pnpm install --frozen-lockfile --ignore-scripts COPY . . RUN pnpm build # Runner stage FROM node:lts RUN apt update && apt install -y chromium xvfb RUN npm install -g pnpm ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium WORKDIR /app COPY package.json pnpm-lock.yaml ./ RUN pnpm install --frozen-lockfile --ignore-scripts --prod # Install prebuilt lmdb binary RUN arch_out=$(uname -m) \ && if [ "${arch_out}" = "x86_64" ]; then \ ARCH=x64; \ elif [ "${arch_out}" = "aarch64" ]; then \ ARCH=arm64; \ elif echo "${arch_out}" | grep -q "armv"; then \ ARCH=arm; \ else \ ARCH=${arch_out}; \ fi \ && LMDB_VER=$(pnpm list lmdb | grep lmdb | awk '{print $2}') \ && pnpm add @lmdb/lmdb-linux-$ARCH@$LMDB_VER --ignore-scripts --prod --no-optional # We can also build it ourselves: # RUN npm explore lmdb -- npm run install COPY --from=builder /app/dist ./dist CMD xvfb-run -a --server-args="-screen 0 1280x800x24 -ac -nolisten tcp -dpi 96 +extension RANDR" pnpm start ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2022 RainEggplant Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # ChatGPT Telegram Bot ![badge:version](https://img.shields.io/github/v/release/RainEggplant/chatgpt-telegram-bot?color=brightgreen) ![docker image size](https://img.shields.io/docker/image-size/raineggplant/chatgpt-telegram-bot/latest?label=docker%20image%20size) ![license](https://img.shields.io/badge/license-MIT-green) English | [中文](README_zh-CN.md) A ChatGPT bot for Telegram based on Node.js. Support both browserless and browser-based APIs. ## Updates 🎉 v2 has been released!
🔔 Apr. 3, 2023 (v2.5.0) > - Support per-chat conversation by @Vadko. > - Support setting message queue as optional by @Vadko.
Previous Updates
🔔 Mar. 10, 2023 (v2.4.0) > - Add a timeout mechanism to prevent the message queue from getting stuck. You can override the default timeout in the config file.
🔔 Mar. 07, 2023 (v2.3.2) > - You can pull the [pre-built Docker image](https://hub.docker.com/r/raineggplant/chatgpt-telegram-bot) from Docker Hub now!
🔔 Mar. 02, 2023 (v2.3.0) > - Support the [official OpenAI chat completions API](https://platform.openai.com/docs/guides/chat). > - Support proxy by using a custom fetch function. We strongly advice you to use the `official` API. There are rumors that OpenAI may ban your account if you continue to use the `unofficial` and `browser` API.
🔔 Feb. 28, 2023 (v2.2.0) > - Support message queue to avoid rate limit. > - Improve Markdown parsing.
🔔 Feb. 22, 2023 (v2.1.1) > - Support custom prompt prefix and suffix (allowing you to customize the bot's identity and behavior). > - Support Node.js v19.
🔔 Feb. 19, 2023 (v2.1.0) > We have added support for the unofficial proxy API by @acheong08. This API uses a proxy server that allows users to bypass Cloudflare protection and use the real ChatGPT. Please see [Usage](#usage) for more details. > > For previous users, we've updated our API options. `api.version` is now `api.type`, with options `browser` (previously `v3`), `official` (previously `v4`), and `unofficial`. Please update your config file accordingly.
🔔 Feb. 17, 2023 > According to [one of the maintainers](https://github.com/waylaidwanderer/node-chatgpt-api#updates) of the reverse proxy servers, OpenAI has patched this method. So you have to either use the browserless Official API with official models (which costs money), or use the browser-based solution.
🔔 Feb. 15, 2023 > We have release the v2.0.0 of this bot, which supports both [browserless](https://github.com/transitive-bullshit/chatgpt-api) and [browser-based](https://github.com/transitive-bullshit/chatgpt-api/tree/v3) APIs. You can switch between the two APIs at any time using the config file. Additionally, we have refactored the codebase to make it more maintainable and easier to extend. > > For old users, you will need to switch from the `.env` file to json files under the `config/` folder.
## Features
Private Chat Group Chat
- Support for both browserless (official, unofficial) and browser-based APIs - Support for both private and group chats - Work in privacy mode (the bot can only see specific messages) - Bot access control based on user and group IDs - Reset chat thread and refresh session with command - Queue messages to avoid rate limit - Typing indicator, Markdown formatting, ... - Cloudflare bypassing and CAPTCHA automation (for the browser-based API) - Customize bot identity and behavior (by setting `api.official.systemMessage`) - User-friendly logging ## Usage ### Differences between the three types of APIs > Thank @transitive-bullshit for making this easy-to-understand table! | Type | Free? | Robust? | Quality? | | -------------| ------ | -------- | ----------------------- | | `official` | ❌ No | ✅ Yes | ✅ Real ChatGPT models | | `unofficial` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT | | `browser` | ✅ Yes | ❌ No | ✅ Real ChatGPT | - `official`: Uses the `gpt-3.5-turbo` model by default with the official OpenAI chat completions API (official, robust approach, but it's not free) - `unofficial`: Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited) - `browser` (not recommended): Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone) > **Warning** > > There are rumors that OpenAI may ban your account if you continue to use the `unofficial` and `browser` API. Use it at your own risk. ### Start the server #### Option 1: Node To get started, follow these steps: 1. Clone this project. 2. Create `local.json` under the `config/` folder. You can copy the `config/default.json` as a template. 3. Modify the `local.json` following the instructions in the file. The settings in `local.json` will override the default settings in `default.json`. - Set `api.type` to `official` if you want to use the browserless official API. Then provide your [OpenAI API Key](https://platform.openai.com/overview) and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptapi) for more details. Note that this will cost your credits. - Set `api.type` to `unofficial` if you want to use the browserless unofficial API. Then provide your OpenAI access token ([how to get your access token?](https://github.com/transitive-bullshit/chatgpt-api#access-token)) and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi) for more details. - Set `api.type` to `browser` if you want to use the browser-based API (not recommended). Then provide the OpenAI / Google / Microsoft credentials and other settings. You can refer to [this](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#authentication) and [this](https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters) for more details. Make sure you have a Chromium-based browser installed. Then you can start the bot with: ```shell pnpm install pnpm build && pnpm start ``` #### Option 2: Docker To get started, follow these steps: 1. Create a folder named `config` and create a `local.json` file in it. You can follow the instructions in the "Choice #1: Node" section to customize the settings. 2. Run the following command to start the bot: ```shell docker run -d -v ./config:/app/config raineggplant/chatgpt-telegram-bot:latest ``` This will pull the latest image that only supports the browserless API. If you want to use the browser-based API, you can add a `-browser` suffix to the tag, e.g., `raineggplant/chatgpt-telegram-bot:latest-browser`. ### Chat with the bot in Telegram To chat with the bot in Telegram, you can: - Send direct messages to the bot (this is not supported in groups) - Send messages that start with the specified command (e.g., `/chat` or the command you specified in the json config file) - Reply to the bot's last message > **Note** Make sure you have enabled the privacy mode of your bot before adding it to a group, or it will reply to every message in the group. The bot also has several commands. - `/help`: Show help information. - `/reset`: Reset the current chat thread and start a new one. - `/reload` (admin required, browser-based API only): Refresh the ChatGPT session. > **Note** When using a command in a group, make sure to include a mention after the command, like `/help@chatgpt_bot`. ## Advanced ### Running the bot using browser-based API on a headless server You can use [Xvfb](https://www.x.org/releases/X11R7.6/doc/man/man1/Xvfb.1.xhtml) to create a virtual framebuffer on a headless server and run this program: ```shell xvfb-run -a --server-args="-screen 0 1280x800x24 -nolisten tcp -dpi 96 +extension RANDR" pnpm start ``` We recommend you to use Google auth to avoid the complicated login Recaptchas. If you use a OpenAI account, you may have to use nopecha or 2captcha or manually solve the Recaptcha (by connecting to the display server using x11vnc). For more details about CAPTCHA solving, please refer to [the api repository](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#captchas). ## Credits - [ChatGPT API](https://github.com/transitive-bullshit/chatgpt-api): Node.js client for the unofficial ChatGPT API. - [ChatGPT](https://github.com/acheong08/ChatGPT): ChatGPT API for Python. - [Node.js Telegram Bot API](https://github.com/yagop/node-telegram-bot-api): Telegram Bot API for NodeJS. - [🤖️ chatbot-telegram](https://github.com/Ciyou/chatbot-telegram): Yet another telegram ChatGPT bot. ## LICENSE [MIT License](LICENSE). **Leave a star ⭐ if you find this project useful.** ================================================ FILE: README_zh-CN.md ================================================ # ChatGPT Telegram 机器人 ![badge:version](https://img.shields.io/github/v/release/RainEggplant/chatgpt-telegram-bot?color=brightgreen) ![docker image size](https://img.shields.io/docker/image-size/raineggplant/chatgpt-telegram-bot/latest?label=docker%20image%20size) ![license](https://img.shields.io/badge/license-MIT-green) [English](README.md) | 中文 一个基于 Node.js 的 ChatGPT 电报机器人。支持非浏览器和基于浏览器的 API。 > Fun fact: 这篇中文文档是 ChatGPT 翻译的。 ## 更新 🎉 v2 已发布!
🔔 2023 年 4 月 3 日 (v2.5.0) > - 支持不同用户/群组的多个会话 by @Vadko. > - 支持将消息队列设置为可选 by @Vadko.
历史更新
🔔 2023 年 3 月 10 日 (v2.4.0) > - 添加超时机制以防止消息队列被卡住。您可以在配置文件中覆盖默认超时时间。
🔔 2023 年 3 月 7 日 (v2.3.2) > - 您现在可以从 Docker Hub 拉取 [预构建的 Docker 镜像](https://hub.docker.com/r/raineggplant/chatgpt-telegram-bot) 了!
🔔 2023 年 3 月 2 日 (v2.3.0) > - 支持 [OpenAI 官方 chat completions API](https://platform.openai.com/docs/guides/chat)。 > - 支持使用自定义的 fetch 函数进行代理。 我们强烈建议您使用 `official` API。有传言称如果您继续使用 `unofficial` 和 `browser` API,OpenAI 可能会禁止您的账户。
🔔 2023 年 2 月 28 日 (v2.2.0) > - 支持消息队列以避免速率限制。 > - 改善 Markdown 解析。
🔔 2023 年 2 月 22 日 (v2.1.1) > - 支持自定义提示前缀和后缀(允许您自定义机器人的身份和行为)。 > - 支持 Node.js v19。
🔔 2023 年 2 月 19 日 (v2.1.0) > 我们已经添加了对 @acheong08 的非官方代理 API 的支持。此 API 使用代理服务器,允许用户绕过 Cloudflare 保护并使用真正的 ChatGPT。请参阅 [使用方法](#使用方法) 获取更多详情。 > > 对于老用户,我们已更新我们的 API 选项。`api.version` 现在变为了 `api.type`,可选项为 `browser`(之前的 `v3`)、`official`(之前的 `v4`)和 `unofficial`。请相应地更新您的配置文件。
🔔 2023 年 2 月 17 日 > 根据[维护者之一](https://github.com/waylaidwanderer/node-chatgpt-api#updates)的说法,OpenAI 已经修补了这种方法。所以你要么使用带官方模型的无浏览器 Official API(需要付费),要么使用基于浏览器的解决方案。
🔔 2023 年 2 月 15 日 > 我们发布了 v2.0.0 版本的这个机器人,支持 [非浏览器](https://github.com/transitive-bullshit/chatgpt-api) 和 [基于浏览器](https://github.com/transitive-bullshit/chatgpt-api/tree/v3) 的 API。您可以随时使用配置文件在两个 API 之间切换。此外,我们已经重构了代码库,使其更易于维护和扩展。 > > 对于老用户,您需要从 `.env` 文件切换到 `config/` 文件夹下的 json 文件。
## 特点
私聊 群聊
- 支持非浏览器 (官方,非官方) 和基于浏览器的 API - 支持私聊和群聊 - 在隐私模式下工作 (机器人只能查看特定的消息) - 基于用户和群组 ID 的机器人访问控制 - 使用命令重置聊天线程并刷新会话 - 使用消息队列以避免速率限制 - “正在输入”提示,Markdown 格式化等 - 绕过 Cloudflare 并自动完成验证码 (适用于基于浏览器的 API) - 自定义机器人身份和行为 (通过设置 `api.official.systemMessage`) - 用户友好的日志记录 ## 使用方法 ### 三种 API 类型之间的区别 > 感谢 @transitive-bullshit 制作了这个易于理解的表格! | 类型 | 免费? | 稳定? | 质量? | | ------------ | ------ | ------ | ------------------- | | `official` | ❌ 否 | ✅ 是 | ✅ 真实 ChatGPT 模型 | | `unofficial` | ✅ 是 | ☑️ 可能 | ✅ 真实 ChatGPT | | `browser` | ✅ 是 | ❌ 否 | ✅ 真实 ChatGPT | - `official`: 使用 OpenAI 官方的 chat completions API,默认基于 `gpt-3.5-turbo` 模型 (官方、稳定,但不免费) - `unofficial`: 使用一个非官方的代理服务器以绕过 Cloudflare 访问 ChatGPT 网页版的后端 API (使用真实的 ChatGPT,非常轻量级,但依赖第三方服务器并且受速率限制) - `browser` (不推荐使用): 使用 Puppeteer 访问官方的 ChatGPT 网页版 (使用真实的ChatGPT,但非常不稳定、耗费资源,并且容易出错) > **Warning** > > 有传言称,如果您继续使用 `unofficial` 和 `browser` API,OpenAI 可能会封禁您的账户。请自行决定是否使用。 ### 启动服务器 #### 选项 1: Node 要开始使用,请按照以下步骤操作: 1. 克隆这个项目。 2. 在 `config/` 文件夹下创建 `local.json`。可以将 `config/default.json` 复制为模板。 3. 按照文件中的说明修改 `local.json`。`local.json` 中的设置将覆盖 `default.json` 中的默认设置。 - 如果您想使用非浏览器的官方 API,请将 `api.type` 设置为 `official`。然后提供您的[OpenAI API密钥](https://platform.openai.com/overview)和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptapi)获取更多详细信息。请注意,这将消耗您的账户余额。 - 如果您想使用非浏览器的非官方 API,请将 `api.type` 设置为 `unofficial`。然后提供您的 OpenAI 访问令牌 ([如何获取访问令牌?](https://github.com/transitive-bullshit/chatgpt-api#access-token)) 和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi)获取更多详细信息。 - 如果您想使用基于浏览器的 API (不推荐),请将 `api.type` 设置为 `browser`。然后提供 OpenAI / Google / Microsoft 凭证和其他设置。您可以参考[这里](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#authentication)和[这里](https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters)获取更多详细信息。请确保您已安装基于 Chromium 的浏览器。 然后,您可以使用以下命令启动机器人: ```shell pnpm install pnpm build && pnpm start ``` #### 选项 2: Docker 请按以下步骤开始: 1. 创建一个名为 `config` 的文件夹,并在其中创建一个 `local.json` 文件。您可以按照“选项 1: Node”中的说明自定义设置。 2. 运行以下命令启动机器人: ```shell docker run -d -v ./config:/app/config raineggplant/chatgpt-telegram-bot:latest ``` 这将拉取最新的只支持无浏览器 API 的镜像。如果您想使用基于浏览器的 API,则可以将标签后缀添加为 `-browser`,例如 `raineggplant/chatgpt-telegram-bot:latest-browser`。 ### 在 Telegram 中与机器人聊天 要在 Telegram 中与机器人聊天,您可以: - 向机器人直接发送消息(不支持在群组中使用) - 发送以指定命令开头的消息(例如 `/chat` 或您在 JSON 配置文件中指定的命令) - 回复机器人的最后一条消息 > **Note** 在将机器人添加到群组之前,请确保已启用机器人的隐私模式,否则它将回复群组中的每条消息。 该机器人还有几个命令。 - `/help`:显示帮助信息。 - `/reset`:重置当前聊天线程并开始新线程。 - `/reload`(需要管理员权限,只适用于基于浏览器的 API):刷新 ChatGPT 会话。 > **Note** 在群组中使用命令时,请确保在命令之后包含提及,例如`/help@chatgpt_bot`。 ## 高级选项 ### 在无头服务器上运行基于浏览器 API 的机器人 您可以使用 [Xvfb](https://www.x.org/releases/X11R7.6/doc/man/man1/Xvfb.1.xhtml) 在无头服务器上创建虚拟帧缓冲区,并运行以下程序: ```shell xvfb-run -a --server-args="-screen 0 1280x800x24 -nolisten tcp -dpi 96 +extension RANDR" pnpm start ``` 我们建议您使用 Google 验证,以避免复杂的登录 Recaptchas。如果您使用 OpenAI 帐户,则可能需要使用 nopecha 或 2captcha 或手动解决Recaptcha(通过使用 x11vnc 连接到显示服务器)。有关 CAPTCHA 解决的更多详细信息,请参阅[API存储库](https://github.com/transitive-bullshit/chatgpt-api/tree/v3#captchas)。 ## 鸣谢 - [ChatGPT API](https://github.com/transitive-bullshit/chatgpt-api):ChatGPT API 的 Node.js 客户端。 - [ChatGPT](https://github.com/acheong08/ChatGPT):Python 版本的 ChatGPT API。 - [Node.js Telegram Bot API](https://github.com/yagop/node-telegram-bot-api):NodeJS 的 Telegram Bot API。 - [🤖️ chatbot-telegram](https://github.com/Ciyou/chatbot-telegram):另一个 Telegram ChatGPT 机器人。 ## 许可证 [MIT License](LICENSE). **如果您觉得这个项目有用,请给它一个 star ⭐。** ================================================ FILE: config/default.json ================================================ { "debug": 1, // debug level: 0 - no debug, 1 - debug, 2 - verbose debug "bot": { "token": "TELEGRAM_BOT_TOKEN", "groupIds": [], // allowed group ids, leave empty to allow all "userIds": [], // allowed user ids, leave empty to allow all "chatCmd": "/chat", "queue": true, "redisUri": "" }, "api": { "type": "official", // "browser", "official", "unofficial": the type of the chatgpt api to use "browser": { // Please refer to "https://github.com/transitive-bullshit/chatgpt-api/blob/v3/docs/classes/ChatGPTAPIBrowser.md#parameters" "email": "ACCOUNT_EMAIL", "password": "ACCOUNT_PASSWORD", "isGoogleLogin": false, "isProAccount": false, "executablePath": "", "nopechaKey": "", "captchaToken": "", "userDataDir": "", "timeoutMs": 120000 // set to 0 to disable }, "official": { // Please refer to "https://github.com/transitive-bullshit/chatgpt-api/blob/main/docs/classes/ChatGPTAPI.md#parameters" "apiKey": "API_KEY", "apiBaseUrl": "", "completionParams": {}, "systemMessage": "", "maxModelTokens": 0, // set to 0 to use default "maxResponseTokens": 0, // set to 0 to use default "timeoutMs": 60000 // set to 0 to disable }, "unofficial": { // Please refer to "https://github.com/transitive-bullshit/chatgpt-api#usage---chatgptunofficialproxyapi" "accessToken": "ACCESS_TOKEN", "apiReverseProxyUrl": "", "model": "", "timeoutMs": 120000 // set to 0 to disable } }, "proxy": "" // You can also specify the proxy using the environment variable "HTTP_PROXY" } ================================================ FILE: package.json ================================================ { "name": "chatgpt-telegram-bot", "version": "2.5.0", "description": "A ChatGPT bot for Telegram.", "main": "index.js", "type": "module", "scripts": { "build": "tsc --build", "build:debug": "tsc --build && tsc --sourceMap", "dev": "tsc-watch --onSuccess \"node --experimental-loader=extensionless dist/index.js\"", "start": "node --experimental-loader=extensionless dist/index.js", "lint": "eslint src/**/*.{js,ts}", "lint:fix": "eslint --fix src/**/*.{js,ts}", "format": "prettier --check src/**/*.{js,ts}", "format:fix": "prettier --write src/**/*.{js,ts}", "typecheck": "tsc --noEmit --noUnusedLocals", "prepare": "husky install" }, "lint-staged": { "src/**/*.{js,ts}": [ "eslint", "prettier --check" ] }, "keywords": [ "ChatGPT", "Telegram", "Bot" ], "author": "RainEggplant", "license": "MIT", "devDependencies": { "@types/config": "^3.3.0", "@types/lodash": "^4.14.192", "@types/node": "^18.15.11", "@types/node-telegram-bot-api": "^0.57.7", "@types/promise-queue": "^2.2.0", "@typescript-eslint/eslint-plugin": "^5.57.0", "@typescript-eslint/parser": "^5.57.0", "eslint": "^8.37.0", "eslint-config-prettier": "^8.8.0", "husky": "^8.0.3", "lint-staged": "^13.2.0", "prettier": "^2.8.7", "tsc-watch": "^6.0.0", "typescript": "^4.9.5" }, "dependencies": { "@keyv/redis": "^2.5.7", "chatgpt": "^5.1.4", "config": "^3.3.9", "dotenv": "^16.0.3", "extensionless": "^1.3.4", "https-proxy-agent": "^5.0.1", "ioredis": "^5.3.2", "keyv": "^4.5.2", "lodash": "^4.17.21", "node-fetch": "^3.3.1", "node-telegram-bot-api": "^0.60.0", "promise-queue": "^2.2.5", "telegramify-markdown": "^1.1.0" }, "optionalDependencies": { "chatgpt-v3": "npm:chatgpt@3.5.1", "puppeteer": "^19.8.2" } } ================================================ FILE: src/@types/telegramify-markdown/index.d.ts ================================================ declare module 'telegramify-markdown' { // eslint-disable-next-line @typescript-eslint/no-explicit-any function telegramifyMarkdown(markdown: string, options?: any): string; export = telegramifyMarkdown; } ================================================ FILE: src/api.ts ================================================ import type { ChatGPTAPI, ChatGPTUnofficialProxyAPI, ChatMessage as ChatResponseV4, } from 'chatgpt'; import type { ChatGPTAPIBrowser, ChatResponse as ChatResponseV3, } from 'chatgpt-v3'; import { APIBrowserOptions, APIOfficialOptions, APIOptions, APIUnofficialOptions, } from './types'; import {logWithTime} from './utils'; import {DB} from './db'; class ChatGPT { debug: number; readonly apiType: string; protected _opts: APIOptions; protected _api: | ChatGPTAPI | ChatGPTAPIBrowser | ChatGPTUnofficialProxyAPI | undefined; protected _apiBrowser: ChatGPTAPIBrowser | undefined; protected _apiOfficial: ChatGPTAPI | undefined; protected _apiUnofficialProxy: ChatGPTUnofficialProxyAPI | undefined; protected _timeoutMs: number | undefined; protected _db: DB; constructor(apiOpts: APIOptions, db: DB, debug = 1) { this.debug = debug; this.apiType = apiOpts.type; this._opts = apiOpts; this._timeoutMs = undefined; this._db = db; } init = async () => { if (this._opts.type == 'browser') { const {ChatGPTAPIBrowser} = await import('chatgpt-v3'); this._apiBrowser = new ChatGPTAPIBrowser( this._opts.browser as APIBrowserOptions ); await this._apiBrowser.initSession(); this._api = this._apiBrowser; this._timeoutMs = this._opts.browser?.timeoutMs; } else if (this._opts.type == 'official') { const {ChatGPTAPI} = await import('chatgpt'); this._apiOfficial = new ChatGPTAPI({ ...(this._opts.official as APIOfficialOptions), getMessageById: async (id) => { const message = await this._db.messageStore.get(id); return message as ChatResponseV4; }, upsertMessage: async (message) => { await this._db.messageStore.set(message.id, message); }, }); this._api = this._apiOfficial; this._timeoutMs = this._opts.official?.timeoutMs; } else if (this._opts.type == 'unofficial') { const {ChatGPTUnofficialProxyAPI} = await import('chatgpt'); this._apiUnofficialProxy = new ChatGPTUnofficialProxyAPI( this._opts.unofficial as APIUnofficialOptions ); this._api = this._apiUnofficialProxy; this._timeoutMs = this._opts.unofficial?.timeoutMs; } else { throw new RangeError('Invalid API type'); } logWithTime('🔮 ChatGPT API has started...'); }; sendMessage = async ( text: string, chatId: number, messageId: string, replyId?: string, onProgress?: (res: ChatResponseV3 | ChatResponseV4) => void ) => { if (!this._api) return; const contextDB = await this._db.getContext(chatId); const parentIdFromReply = await this._db.getReplyId(replyId); const context = { conversationId: contextDB?.conversationId, parentMessageId: parentIdFromReply ?? contextDB?.parentMessageId, }; let res: ChatResponseV3 | ChatResponseV4; if (this.apiType == 'official') { if (!this._apiOfficial) return; res = await this._apiOfficial.sendMessage(text, { ...context, onProgress, messageId, timeoutMs: this._timeoutMs, }); } else { res = await this._api.sendMessage(text, { ...context, onProgress, messageId, timeoutMs: this._timeoutMs, }); } const parentMessageId = this.apiType == 'browser' ? (res as ChatResponseV3).messageId : (res as ChatResponseV4).id; await this._db.updateContext(chatId, { conversationId: res.conversationId, parentMessageId, }); return res; }; resetThread = async (chatId: number) => { if (this._apiBrowser) { await this._apiBrowser.resetThread(); } await this._db.clearContext(chatId); }; refreshSession = async () => { if (this._apiBrowser) { await this._apiBrowser.refreshSession(); } }; } export {ChatGPT}; ================================================ FILE: src/db.ts ================================================ import KeyvRedis from '@keyv/redis'; import Keyv from 'keyv'; import {ChatMessage as ChatResponseV4} from 'chatgpt'; import {BotOptions} from './types'; import Redis from 'ioredis'; import {logWithTime} from './utils'; interface ContextObject { conversationId?: string; parentMessageId?: string; } type Context = ContextObject | undefined; export class DB { protected _store: KeyvRedis | undefined; protected _redis: Redis | undefined; public messageStore: Keyv; private _usersStore: Keyv; constructor(botOps: BotOptions) { if (botOps.redisUri) { this._redis = new Redis(botOps.redisUri, {family: 6}); this._redis.on('ready', async () => { logWithTime('📚 Redis has started...'); const response = await this._redis?.ping(); logWithTime(`🏓 Redis ping result: ${response}`); }); this._store = new KeyvRedis(this._redis); } this.messageStore = new Keyv({ store: this._store, namespace: 'messages', }); this._usersStore = new Keyv({ store: this._store, namespace: 'users', }); } getContext = (chatId: number): Promise => { return this._usersStore.get(chatId.toString()); }; updateContext = async ( chatId: number, newContext: Pick & Required> ) => { await this._usersStore.set(chatId.toString(), newContext); }; clearContext = async (chatId: number) => { await this._usersStore.delete(chatId.toString()); }; getReplyId = async (replyId: string | undefined) => { if (!replyId) return undefined; const reply = await this.messageStore.get(replyId); return reply?.id; }; } ================================================ FILE: src/handlers/authentication.ts ================================================ import type TelegramBot from 'node-telegram-bot-api'; import type {Message} from 'node-telegram-bot-api'; import {BotOptions} from '../types'; import {logWithTime} from '../utils'; class Authenticator { debug: number; protected _bot: TelegramBot; protected _opts: BotOptions; constructor(bot: TelegramBot, botOpts: BotOptions, debug = 1) { this.debug = debug; this._bot = bot; this._opts = botOpts; } authenticate = async (msg: Message) => { if (msg.chat.type === 'private') { if ( this._opts.userIds.length != 0 && this._opts.userIds.indexOf(msg.chat.id) == -1 ) { logWithTime( '⚠️ Authentication failed for user ' + `@${msg.from?.username ?? ''} (${msg.from?.id}).` ); await this._bot.sendMessage( msg.chat.id, '⛔️ Sorry, you are not my owner. I cannot chat with you or execute your command.' ); return false; } } else { if ( this._opts.groupIds.length != 0 && this._opts.groupIds.indexOf(msg.chat.id) == -1 ) { logWithTime( `⚠️ Authentication failed for group ${msg.chat.title} (${msg.chat.id}).` ); await this._bot.sendMessage( msg.chat.id, "⛔️ Sorry, I'm not supposed to work here. Please remove me from the group." ); return false; } } return true; }; } export {Authenticator}; ================================================ FILE: src/handlers/chat.ts ================================================ import type {ChatMessage as ChatResponseV4} from 'chatgpt'; import type {ChatResponse as ChatResponseV3} from 'chatgpt-v3'; import _ from 'lodash'; import type TelegramBot from 'node-telegram-bot-api'; import telegramifyMarkdown from 'telegramify-markdown'; import type {ChatGPT} from '../api'; import {BotOptions} from '../types'; import {generateIdFromMessage, logWithTime} from '../utils'; import Queue from 'promise-queue'; import {DB} from '../db'; class ChatHandler { debug: number; protected _opts: BotOptions; protected _bot: TelegramBot; protected _api: ChatGPT; protected _n_queued = 0; protected _n_pending = 0; protected _apiRequestsQueue = new Queue(1, Infinity); protected _positionInQueue: Record = {}; protected _updatePositionQueue = new Queue(20, Infinity); protected _db: DB; constructor( bot: TelegramBot, api: ChatGPT, botOpts: BotOptions, db: DB, debug = 1 ) { this.debug = debug; this._bot = bot; this._api = api; this._opts = botOpts; this._db = db; } handle = async (msg: TelegramBot.Message, text: string) => { if (!text) return; const chatId = msg.chat.id; if (this.debug >= 1) { const userInfo = `@${msg.from?.username ?? ''} (${msg.from?.id})`; const chatInfo = msg.chat.type == 'private' ? 'private chat' : `group ${msg.chat.title} (${msg.chat.id})`; logWithTime(`📩 Message from ${userInfo} in ${chatInfo}:\n${text}`); } // Send a message to the chat acknowledging receipt of their message const reply = await this._bot.sendMessage( chatId, this._opts.queue ? '⌛' : '🤔', { reply_to_message_id: msg.message_id, } ); const sendToGpt = async () => { await this._sendToGpt( text, chatId, reply, generateIdFromMessage(reply), generateIdFromMessage(msg.reply_to_message) ); }; if (!this._opts.queue) { await sendToGpt(); } else { // add to sequence queue due to chatGPT processes only one request at a time const requestPromise = this._apiRequestsQueue.add(() => { return sendToGpt(); }); if (this._n_pending == 0) this._n_pending++; else this._n_queued++; this._positionInQueue[this._getQueueKey(chatId, reply.message_id)] = this._n_queued; await this._bot.editMessageText( this._n_queued > 0 ? `⌛: You are #${this._n_queued} in line.` : '🤔', { chat_id: chatId, message_id: reply.message_id, } ); await requestPromise; } }; protected _sendToGpt = async ( text: string, chatId: number, originalReply: TelegramBot.Message, messageId: string, replyId?: string ) => { let reply = originalReply; await this._bot.sendChatAction(chatId, 'typing'); // Send message to ChatGPT try { const res = await this._api.sendMessage( text, chatId, messageId, replyId, _.throttle( async (partialResponse: ChatResponseV3 | ChatResponseV4) => { const resText = this._api.apiType == 'browser' ? (partialResponse as ChatResponseV3).response : (partialResponse as ChatResponseV4).text; reply = await this._editMessage(reply, resText); await this._bot.sendChatAction(chatId, 'typing'); }, 3000, {leading: true, trailing: false} ) ); const resText = this._api.apiType == 'browser' ? (res as ChatResponseV3).response : (res as ChatResponseV4).text; await this._editMessage(reply, resText); if (this.debug >= 1) logWithTime(`📨 Response:\n${resText}`); } catch (err) { logWithTime('⛔️ ChatGPT API error:', (err as Error).message); await this._db.clearContext(chatId); this._bot.sendMessage( chatId, "⚠️ Sorry, I'm having trouble connecting to the server, please try again later." ); } // Update queue order after finishing current request await this._updateQueue(chatId, reply.message_id); }; // Edit telegram message protected _editMessage = async ( msg: TelegramBot.Message, text: string, needParse = true ) => { if (text.trim() == '' || msg.text == text) { return msg; } try { text = telegramifyMarkdown(text, 'escape'); const res = await this._bot.editMessageText(text, { chat_id: msg.chat.id, message_id: msg.message_id, parse_mode: needParse ? 'MarkdownV2' : undefined, }); // type of res is boolean | Message if (typeof res === 'object') { // return a Message type instance if res is a Message type return res as TelegramBot.Message; } else { // return the original message if res is a boolean type return msg; } } catch (err) { logWithTime('⛔️ Edit message error:', (err as Error).message); if (this.debug >= 2) logWithTime('⛔️ Message text:', text); return msg; } }; protected _getQueueKey = (chatId: number, messageId: number) => `${chatId}:${messageId}`; protected _parseQueueKey = (key: string) => { const [chat_id, message_id] = key.split(':'); return {chat_id, message_id}; }; protected _updateQueue = async (chatId: number, messageId: number) => { // delete value for current request delete this._positionInQueue[this._getQueueKey(chatId, messageId)]; if (this._n_queued > 0) this._n_queued--; else this._n_pending--; for (const key in this._positionInQueue) { const {chat_id, message_id} = this._parseQueueKey(key); this._positionInQueue[key]--; this._updatePositionQueue.add(() => { return this._bot.editMessageText( this._positionInQueue[key] > 0 ? `⌛: You are #${this._positionInQueue[key]} in line.` : '🤔', { chat_id, message_id: Number(message_id), } ); }); } }; } export {ChatHandler}; ================================================ FILE: src/handlers/command.ts ================================================ import type TelegramBot from 'node-telegram-bot-api'; import type {ChatGPT} from '../api'; import {BotOptions} from '../types'; import {logWithTime} from '../utils'; class CommandHandler { debug: number; protected _opts: BotOptions; protected _bot: TelegramBot; protected _api: ChatGPT; constructor(bot: TelegramBot, api: ChatGPT, botOpts: BotOptions, debug = 1) { this.debug = debug; this._bot = bot; this._api = api; this._opts = botOpts; } handle = async ( msg: TelegramBot.Message, command: string, isMentioned: boolean, botUsername: string ) => { const userInfo = `@${msg.from?.username ?? ''} (${msg.from?.id})`; const chatInfo = msg.chat.type == 'private' ? 'private chat' : `group ${msg.chat.title} (${msg.chat.id})`; if (this.debug >= 1) { logWithTime( `👨‍💻️ User ${userInfo} issued command "${command}" in ${chatInfo} (isMentioned=${isMentioned}).` ); } // Ignore commands without mention in groups. if (msg.chat.type != 'private' && !isMentioned) return; switch (command) { case '/help': await this._bot.sendMessage( msg.chat.id, 'To chat with me, you can:\n' + ' • send messages directly (not supported in groups)\n' + ` • send messages that start with ${this._opts.chatCmd}\n` + ' • reply to my last message\n\n' + 'Command list:\n' + `(When using a command in a group, make sure to include a mention after the command, like /help@${botUsername}).\n` + ' • /help Show help information.\n' + ' • /reset Reset the current chat thread and start a new one.\n' + ' • /reload (admin required) Refresh the ChatGPT session.' ); break; case '/reset': await this._bot.sendChatAction(msg.chat.id, 'typing'); await this._api.resetThread(msg.chat.id); await this._bot.sendMessage( msg.chat.id, '🔄 The chat thread has been reset. New chat thread started.' ); logWithTime(`🔄 Chat thread reset by ${userInfo}.`); break; case '/reload': if (this._opts.userIds.indexOf(msg.from?.id ?? 0) == -1) { await this._bot.sendMessage( msg.chat.id, '⛔️ Sorry, you do not have the permission to run this command.' ); logWithTime( `⚠️ Permission denied for "${command}" from ${userInfo}.` ); } else { await this._bot.sendChatAction(msg.chat.id, 'typing'); await this._api.refreshSession(); await this._bot.sendMessage(msg.chat.id, '🔄 Session refreshed.'); logWithTime(`🔄 Session refreshed by ${userInfo}.`); } break; default: await this._bot.sendMessage( msg.chat.id, '⚠️ Unsupported command. Run /help to see the usage.' ); break; } }; } export {CommandHandler}; ================================================ FILE: src/handlers/message.ts ================================================ import type TelegramBot from 'node-telegram-bot-api'; import type {ChatGPT} from '../api'; import {BotOptions} from '../types'; import {logWithTime} from '../utils'; import {Authenticator} from './authentication'; import {ChatHandler} from './chat'; import {CommandHandler} from './command'; import {DB} from '../db'; class MessageHandler { debug: number; protected _opts: BotOptions; protected _bot: TelegramBot; protected _botUsername = ''; protected _botId: number | undefined; protected _api: ChatGPT; protected _authenticator: Authenticator; protected _commandHandler: CommandHandler; protected _chatHandler: ChatHandler; constructor( bot: TelegramBot, api: ChatGPT, botOpts: BotOptions, db: DB, debug = 1 ) { this.debug = debug; this._bot = bot; this._api = api; this._opts = botOpts; this._authenticator = new Authenticator(bot, botOpts, debug); this._commandHandler = new CommandHandler(bot, api, botOpts, debug); this._chatHandler = new ChatHandler(bot, api, botOpts, db, debug); } init = async () => { this._botUsername = (await this._bot.getMe()).username ?? ''; this._botId = (await this._bot.getMe()).id; logWithTime(`🤖 Bot @${this._botUsername} has started...`); }; handle = async (msg: TelegramBot.Message) => { if (this.debug >= 2) logWithTime(msg); // Authentication. if (!(await this._authenticator.authenticate(msg))) return; // Parse message. const {text, command, isMentioned} = this._parseMessage(msg); if (command != '' && command != this._opts.chatCmd) { // For commands except `${chatCmd}`, pass the request to commandHandler. await this._commandHandler.handle( msg, command, isMentioned, this._botUsername ); } else { // Handles: // - direct messages in private chats // - replied messages in both private chats and group chats // - messages that start with `chatCmd` in private chats and group chats if ( command == this._opts.chatCmd || msg.chat.type == 'private' || msg.reply_to_message?.from?.id === this._botId ) { await this._chatHandler.handle(msg, text); } } }; protected _parseMessage = (msg: TelegramBot.Message) => { let text = msg.text ?? ''; let command = ''; let isMentioned = false; if ('entities' in msg) { // May have bot commands. const regMention = new RegExp(`@${this._botUsername}$`); for (const entity of msg.entities ?? []) { if (entity.type == 'bot_command' && entity.offset == 0) { text = msg.text?.slice(entity.length).trim() ?? ''; command = msg.text?.slice(0, entity.length) ?? ''; isMentioned = regMention.test(command); command = command.replace(regMention, ''); // Remove the mention. break; } } } return {text, command, isMentioned}; }; } export {MessageHandler}; ================================================ FILE: src/index.ts ================================================ import TelegramBot from 'node-telegram-bot-api'; import {ChatGPT} from './api'; import {MessageHandler} from './handlers/message'; import {loadConfig} from './utils'; import {DB} from './db'; async function main() { const opts = loadConfig(); const db = new DB(opts.bot); // Initialize ChatGPT API. const api = new ChatGPT(opts.api, db); await api.init(); // Initialize Telegram Bot and message handler. const bot = new TelegramBot(opts.bot.token, { polling: true, // eslint-disable-next-line @typescript-eslint/no-explicit-any request: {proxy: opts.proxy} as any, }); const messageHandler = new MessageHandler(bot, api, opts.bot, db, opts.debug); await messageHandler.init(); bot.on('message', messageHandler.handle); } main().catch((err) => { console.error(err); process.exit(1); }); ================================================ FILE: src/types.d.ts ================================================ import type {openai, FetchFn} from 'chatgpt'; export interface BotOptions { token: string; userIds: number[]; groupIds: number[]; chatCmd: string; queue: boolean; redisUri?: string; } export interface APIBrowserOptions { email: string; password: string; isGoogleLogin?: boolean; isProAccount?: boolean; executablePath?: string; proxyServer?: string; nopechaKey?: string; captchaToken?: string; userDataDir?: string; timeoutMs?: number; debug?: boolean; } export interface APIOfficialOptions { apiKey: string; apiBaseUrl?: string; completionParams?: Partial< Omit >; systemMessage?: string; maxModelTokens?: number; maxResponseTokens?: number; timeoutMs?: number; fetch?: FetchFn; debug?: boolean; } export interface APIUnofficialOptions { accessToken: string; apiReverseProxyUrl?: string; model?: string; timeoutMs?: number; fetch?: FetchFn; debug?: boolean; } export interface APIOptions { type: 'browser' | 'official' | 'unofficial'; browser?: APIBrowserOptions; official?: APIOfficialOptions; unofficial?: APIUnofficialOptions; } export interface Config { debug: number; bot: BotOptions; api: APIOptions; proxy?: string; } ================================================ FILE: src/utils.ts ================================================ import type {FetchFn, openai} from 'chatgpt'; import config from 'config'; import pkg from 'https-proxy-agent'; import fetch, {type RequestInfo, type RequestInit} from 'node-fetch'; import { Config, APIBrowserOptions, APIOfficialOptions, APIUnofficialOptions, } from './types'; import {Message} from 'node-telegram-bot-api'; const {HttpsProxyAgent} = pkg; function loadConfig(): Config { function tryGet(key: string): T | undefined { if (!config.has(key)) { return undefined; } else { return config.get(key); } } let fetchFn: FetchFn | undefined = undefined; const proxy = tryGet('proxy') || process.env.http_proxy; if (proxy) { const proxyAgent = new HttpsProxyAgent(proxy); fetchFn = ((url, opts) => fetch( url as RequestInfo, {...opts, agent: proxyAgent} as RequestInit )) as FetchFn; } const apiType = config.get<'browser' | 'official' | 'unofficial'>('api.type'); let apiBrowserCfg: APIBrowserOptions | undefined; let apiOfficialCfg: APIOfficialOptions | undefined; let apiUnofficialCfg: APIUnofficialOptions | undefined; if (apiType == 'browser') { apiBrowserCfg = { email: config.get('api.browser.email'), password: config.get('api.browser.password'), isGoogleLogin: tryGet('api.browser.isGoogleLogin') || false, isProAccount: tryGet('api.browser.isProAccount') || false, executablePath: tryGet('api.browser.executablePath') || process.env.PUPPETEER_EXECUTABLE_PATH || undefined, proxyServer: tryGet('proxy') || undefined, nopechaKey: tryGet('api.browser.nopechaKey') || undefined, captchaToken: tryGet('api.browser.captchaToken') || undefined, userDataDir: tryGet('api.browser.userDataDir') || undefined, timeoutMs: tryGet('api.browser.timeoutMs') || undefined, debug: config.get('debug') >= 2, }; } else if (apiType == 'official') { apiOfficialCfg = { apiKey: config.get('api.official.apiKey'), apiBaseUrl: tryGet('api.official.apiBaseUrl') || undefined, completionParams: tryGet< Partial> >('api.official.completionParams') || undefined, systemMessage: tryGet('api.official.systemMessage') || undefined, maxModelTokens: tryGet('api.official.maxModelTokens') || undefined, maxResponseTokens: tryGet('api.official.maxResponseTokens') || undefined, timeoutMs: tryGet('api.official.timeoutMs') || undefined, fetch: fetchFn, debug: config.get('debug') >= 2, }; } else if (apiType == 'unofficial') { apiUnofficialCfg = { accessToken: config.get('api.unofficial.accessToken'), apiReverseProxyUrl: tryGet('api.unofficial.apiReverseProxyUrl') || undefined, model: tryGet('api.unofficial.model') || undefined, timeoutMs: tryGet('api.unofficial.timeoutMs') || undefined, fetch: fetchFn, debug: config.get('debug') >= 2, }; } else { throw new RangeError('Invalid API type'); } const cfg = { debug: tryGet('debug') || 1, bot: { token: config.get('bot.token'), userIds: tryGet('bot.userIds') || [], groupIds: tryGet('bot.groupIds') || [], chatCmd: tryGet('bot.chatCmd') || '/chat', queue: config.get('bot.queue') ?? true, redisUri: config.get('bot.redisUri'), }, api: { type: apiType, browser: apiBrowserCfg, official: apiOfficialCfg, unofficial: apiUnofficialCfg, }, proxy: proxy, }; return cfg; } // eslint-disable-next-line @typescript-eslint/no-explicit-any function logWithTime(...args: any[]) { console.log(new Date().toLocaleString(), ...args); } function generateIdFromMessage( msg?: T ): T extends Message ? string : undefined { return ( msg ? `${msg.chat.id}_${msg.message_id}_${msg.from?.id}_${msg.date}` : undefined ) as T extends Message ? string : undefined; } export {loadConfig, logWithTime, generateIdFromMessage}; ================================================ FILE: tsconfig.json ================================================ { "compilerOptions": { "target": "ESNext", "module": "ESNext", "moduleResolution": "node", "strict": true, "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, "outDir": "dist", "typeRoots": ["node_modules/@types", "src/@types"] }, "include": ["src"], "exclude": ["node_modules"] }