Full Code of ChatGPTNextWeb/NextChat for AI

main c3b8c1587c04 cached
255 files
1.5 MB
451.5k tokens
870 symbols
2 requests
Download .txt
Showing preview only (1,661K chars total). Download the full file or copy to clipboard to get everything.
Repository: ChatGPTNextWeb/NextChat
Branch: main
Commit: c3b8c1587c04
Files: 255
Total size: 1.5 MB

Directory structure:
gitextract_6fj54yj_/

├── .babelrc
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── 1_bug_report.yml
│   │   ├── 1_bug_report_cn.yml
│   │   ├── 2_feature_request.yml
│   │   └── 2_feature_request_cn.yml
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── dependabot.yml
│   └── workflows/
│       ├── app.yml
│       ├── deploy_preview.yml
│       ├── docker.yml
│       ├── issue-translator.yml
│       ├── remove_deploy_preview.yml
│       ├── sync.yml
│       └── test.yml
├── .gitignore
├── .gitpod.yml
├── .husky/
│   └── pre-commit
├── .lintstagedrc.json
├── .prettierrc.js
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── README.md
├── README_CN.md
├── README_JA.md
├── README_KO.md
├── app/
│   ├── api/
│   │   ├── 302ai.ts
│   │   ├── [provider]/
│   │   │   └── [...path]/
│   │   │       └── route.ts
│   │   ├── alibaba.ts
│   │   ├── anthropic.ts
│   │   ├── artifacts/
│   │   │   └── route.ts
│   │   ├── auth.ts
│   │   ├── azure.ts
│   │   ├── baidu.ts
│   │   ├── bytedance.ts
│   │   ├── common.ts
│   │   ├── config/
│   │   │   └── route.ts
│   │   ├── deepseek.ts
│   │   ├── glm.ts
│   │   ├── google.ts
│   │   ├── iflytek.ts
│   │   ├── moonshot.ts
│   │   ├── openai.ts
│   │   ├── proxy.ts
│   │   ├── siliconflow.ts
│   │   ├── stability.ts
│   │   ├── tencent/
│   │   │   └── route.ts
│   │   ├── upstash/
│   │   │   └── [action]/
│   │   │       └── [...key]/
│   │   │           └── route.ts
│   │   ├── webdav/
│   │   │   └── [...path]/
│   │   │       └── route.ts
│   │   └── xai.ts
│   ├── client/
│   │   ├── api.ts
│   │   ├── controller.ts
│   │   └── platforms/
│   │       ├── ai302.ts
│   │       ├── alibaba.ts
│   │       ├── anthropic.ts
│   │       ├── baidu.ts
│   │       ├── bytedance.ts
│   │       ├── deepseek.ts
│   │       ├── glm.ts
│   │       ├── google.ts
│   │       ├── iflytek.ts
│   │       ├── moonshot.ts
│   │       ├── openai.ts
│   │       ├── siliconflow.ts
│   │       ├── tencent.ts
│   │       └── xai.ts
│   ├── command.ts
│   ├── components/
│   │   ├── artifacts.module.scss
│   │   ├── artifacts.tsx
│   │   ├── auth.module.scss
│   │   ├── auth.tsx
│   │   ├── button.module.scss
│   │   ├── button.tsx
│   │   ├── chat-list.tsx
│   │   ├── chat.module.scss
│   │   ├── chat.tsx
│   │   ├── emoji.tsx
│   │   ├── error.tsx
│   │   ├── exporter.module.scss
│   │   ├── exporter.tsx
│   │   ├── home.module.scss
│   │   ├── home.tsx
│   │   ├── input-range.module.scss
│   │   ├── input-range.tsx
│   │   ├── markdown.tsx
│   │   ├── mask.module.scss
│   │   ├── mask.tsx
│   │   ├── mcp-market.module.scss
│   │   ├── mcp-market.tsx
│   │   ├── message-selector.module.scss
│   │   ├── message-selector.tsx
│   │   ├── model-config.module.scss
│   │   ├── model-config.tsx
│   │   ├── new-chat.module.scss
│   │   ├── new-chat.tsx
│   │   ├── plugin.module.scss
│   │   ├── plugin.tsx
│   │   ├── realtime-chat/
│   │   │   ├── index.ts
│   │   │   ├── realtime-chat.module.scss
│   │   │   ├── realtime-chat.tsx
│   │   │   └── realtime-config.tsx
│   │   ├── sd/
│   │   │   ├── index.tsx
│   │   │   ├── sd-panel.module.scss
│   │   │   ├── sd-panel.tsx
│   │   │   ├── sd-sidebar.tsx
│   │   │   ├── sd.module.scss
│   │   │   └── sd.tsx
│   │   ├── search-chat.tsx
│   │   ├── settings.module.scss
│   │   ├── settings.tsx
│   │   ├── sidebar.tsx
│   │   ├── tts-config.tsx
│   │   ├── tts.module.scss
│   │   ├── ui-lib.module.scss
│   │   ├── ui-lib.tsx
│   │   └── voice-print/
│   │       ├── index.ts
│   │       ├── voice-print.module.scss
│   │       └── voice-print.tsx
│   ├── config/
│   │   ├── build.ts
│   │   ├── client.ts
│   │   └── server.ts
│   ├── constant.ts
│   ├── global.d.ts
│   ├── layout.tsx
│   ├── lib/
│   │   └── audio.ts
│   ├── locales/
│   │   ├── ar.ts
│   │   ├── bn.ts
│   │   ├── cn.ts
│   │   ├── cs.ts
│   │   ├── da.ts
│   │   ├── de.ts
│   │   ├── en.ts
│   │   ├── es.ts
│   │   ├── fr.ts
│   │   ├── id.ts
│   │   ├── index.ts
│   │   ├── it.ts
│   │   ├── jp.ts
│   │   ├── ko.ts
│   │   ├── no.ts
│   │   ├── pt.ts
│   │   ├── ru.ts
│   │   ├── sk.ts
│   │   ├── tr.ts
│   │   ├── tw.ts
│   │   └── vi.ts
│   ├── masks/
│   │   ├── build.ts
│   │   ├── cn.ts
│   │   ├── en.ts
│   │   ├── index.ts
│   │   ├── tw.ts
│   │   └── typing.ts
│   ├── mcp/
│   │   ├── actions.ts
│   │   ├── client.ts
│   │   ├── logger.ts
│   │   ├── mcp_config.default.json
│   │   ├── types.ts
│   │   └── utils.ts
│   ├── page.tsx
│   ├── polyfill.ts
│   ├── store/
│   │   ├── access.ts
│   │   ├── chat.ts
│   │   ├── config.ts
│   │   ├── index.ts
│   │   ├── mask.ts
│   │   ├── plugin.ts
│   │   ├── prompt.ts
│   │   ├── sd.ts
│   │   ├── sync.ts
│   │   └── update.ts
│   ├── styles/
│   │   ├── animation.scss
│   │   ├── globals.scss
│   │   ├── highlight.scss
│   │   ├── markdown.scss
│   │   └── window.scss
│   ├── typing.ts
│   ├── utils/
│   │   ├── audio.ts
│   │   ├── auth-settings-events.ts
│   │   ├── baidu.ts
│   │   ├── chat.ts
│   │   ├── clone.ts
│   │   ├── cloud/
│   │   │   ├── index.ts
│   │   │   ├── upstash.ts
│   │   │   └── webdav.ts
│   │   ├── cloudflare.ts
│   │   ├── format.ts
│   │   ├── hmac.ts
│   │   ├── hooks.ts
│   │   ├── indexedDB-storage.ts
│   │   ├── merge.ts
│   │   ├── model.ts
│   │   ├── ms_edge_tts.ts
│   │   ├── object.ts
│   │   ├── store.ts
│   │   ├── stream.ts
│   │   ├── sync.ts
│   │   ├── tencent.ts
│   │   └── token.ts
│   └── utils.ts
├── docker-compose.yml
├── docs/
│   ├── bt-cn.md
│   ├── cloudflare-pages-cn.md
│   ├── cloudflare-pages-en.md
│   ├── cloudflare-pages-es.md
│   ├── cloudflare-pages-ja.md
│   ├── cloudflare-pages-ko.md
│   ├── faq-cn.md
│   ├── faq-en.md
│   ├── faq-es.md
│   ├── faq-ja.md
│   ├── faq-ko.md
│   ├── synchronise-chat-logs-cn.md
│   ├── synchronise-chat-logs-en.md
│   ├── synchronise-chat-logs-es.md
│   ├── synchronise-chat-logs-ja.md
│   ├── synchronise-chat-logs-ko.md
│   ├── translation.md
│   ├── user-manual-cn.md
│   ├── vercel-cn.md
│   ├── vercel-es.md
│   ├── vercel-ja.md
│   └── vercel-ko.md
├── jest.config.ts
├── jest.setup.ts
├── next.config.mjs
├── package.json
├── public/
│   ├── audio-processor.js
│   ├── plugins.json
│   ├── prompts.json
│   ├── robots.txt
│   ├── serviceWorker.js
│   ├── serviceWorkerRegister.js
│   └── site.webmanifest
├── scripts/
│   ├── .gitignore
│   ├── delete-deployment-preview.sh
│   ├── fetch-prompts.mjs
│   ├── init-proxy.sh
│   ├── proxychains.template.conf
│   └── setup.sh
├── src-tauri/
│   ├── .gitignore
│   ├── Cargo.toml
│   ├── build.rs
│   ├── icons/
│   │   └── icon.icns
│   ├── src/
│   │   ├── main.rs
│   │   └── stream.rs
│   └── tauri.conf.json
├── test/
│   ├── model-available.test.ts
│   ├── model-provider.test.ts
│   ├── sum-module.test.ts
│   └── vision-model-checker.test.ts
├── tsconfig.json
└── vercel.json

================================================
FILE CONTENTS
================================================

================================================
FILE: .babelrc
================================================
{
  "presets": [
    [
      "next/babel",
      {
        "preset-env": {
          "targets": {
            "browsers": ["> 0.25%, not dead"]
          }
        }
      }
    ]
  ]
}


================================================
FILE: .dockerignore
================================================
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Node.js dependencies
/node_modules
/jspm_packages

# TypeScript v1 declaration files
typings

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variable files
.env
.env.test

# local env files
.env*.local

# Next.js build output
.next
out

# Nuxt.js build output
.nuxt
dist

# Gatsby files
.cache/


# Vuepress build output
.vuepress/dist

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# Temporary folders
tmp
temp

# IDE and editor directories
.idea
.vscode
*.swp
*.swo
*~

# OS generated files
.DS_Store
Thumbs.db

# secret key
*.key
*.key.pub


================================================
FILE: .eslintignore
================================================
public/serviceWorker.js
app/mcp/mcp_config.json
app/mcp/mcp_config.default.json

================================================
FILE: .eslintrc.json
================================================
{
  "extends": "next/core-web-vitals",
  "plugins": ["prettier", "unused-imports"],
  "rules": {
    "unused-imports/no-unused-imports": "warn"
  }
}


================================================
FILE: .github/ISSUE_TEMPLATE/1_bug_report.yml
================================================
name: '🐛 Bug Report'
description: 'Report an bug'
title: '[Bug] '
labels: ['bug']
body:
  - type: dropdown
    attributes:
      label: '📦 Deployment Method'
      multiple: true
      options:
        - 'Official installation package'
        - 'Vercel'
        - 'Zeabur'
        - 'Sealos'
        - 'Netlify'
        - 'Docker'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 Version'
    validations:
      required: true
  
  - type: dropdown
    attributes:
      label: '💻 Operating System'
      multiple: true
      options:
        - 'Windows'
        - 'macOS'
        - 'Ubuntu'
        - 'Other Linux'
        - 'iOS'
        - 'iPad OS'
        - 'Android'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 System Version'
    validations:
      required: true
  - type: dropdown
    attributes:
      label: '🌐 Browser'
      multiple: true
      options:
        - 'Chrome'
        - 'Edge'
        - 'Safari'
        - 'Firefox'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 Browser Version'
    validations:
      required: true
  - type: textarea
    attributes:
      label: '🐛 Bug Description'
      description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail.
    validations:
      required: true
  - type: textarea
    attributes:
      label: '📷 Recurrence Steps'
      description: A clear and concise description of how to recurrence.
  - type: textarea
    attributes:
      label: '🚦 Expected Behavior'
      description: A clear and concise description of what you expected to happen.
  - type: textarea
    attributes:
      label: '📝 Additional Information'
      description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here.

================================================
FILE: .github/ISSUE_TEMPLATE/1_bug_report_cn.yml
================================================
name: '🐛 反馈缺陷'
description: '反馈一个问题/缺陷'
title: '[Bug] '
labels: ['bug']
body:
  - type: dropdown
    attributes:
      label: '📦 部署方式'
      multiple: true
      options:
        - '官方安装包'
        - 'Vercel'
        - 'Zeabur'
        - 'Sealos'
        - 'Netlify'
        - 'Docker'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 软件版本'
    validations:
      required: true

  - type: dropdown
    attributes:
      label: '💻 系统环境'
      multiple: true
      options:
        - 'Windows'
        - 'macOS'
        - 'Ubuntu'
        - 'Other Linux'
        - 'iOS'
        - 'iPad OS'
        - 'Android'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 系统版本'
    validations:
      required: true
  - type: dropdown
    attributes:
      label: '🌐 浏览器'
      multiple: true
      options:
        - 'Chrome'
        - 'Edge'
        - 'Safari'
        - 'Firefox'
        - 'Other'
    validations:
      required: true
  - type: input
    attributes:
      label: '📌 浏览器版本'
    validations:
      required: true
  - type: textarea
    attributes:
      label: '🐛 问题描述'
      description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。
    validations:
      required: true
  - type: textarea
    attributes:
      label: '📷 复现步骤'
      description: 请提供一个清晰且简洁的描述,说明如何复现问题。
  - type: textarea
    attributes:
      label: '🚦 期望结果'
      description: 请提供一个清晰且简洁的描述,说明您期望发生什么。
  - type: textarea
    attributes:
      label: '📝 补充信息'
      description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。

================================================
FILE: .github/ISSUE_TEMPLATE/2_feature_request.yml
================================================
name: '🌠 Feature Request'
description: 'Suggest an idea'
title: '[Feature Request] '
labels: ['enhancement']
body:
  - type: textarea
    attributes:
      label: '🥰 Feature Description'
      description: Please add a clear and concise description of the problem you are seeking to solve with this feature request.
    validations:
      required: true
  - type: textarea
    attributes:
      label: '🧐 Proposed Solution'
      description: Describe the solution you'd like in a clear and concise manner.
    validations:
      required: true
  - type: textarea
    attributes:
      label: '📝 Additional Information'
      description: Add any other context about the problem here.

================================================
FILE: .github/ISSUE_TEMPLATE/2_feature_request_cn.yml
================================================
name: '🌠 功能需求'
description: '提出需求或建议'
title: '[Feature Request] '
labels: ['enhancement']
body:
  - type: textarea
    attributes:
      label: '🥰 需求描述'
      description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。
    validations:
      required: true
  - type: textarea
    attributes:
      label: '🧐 解决方案'
      description: 请清晰且简洁地描述您想要的解决方案。
    validations:
      required: true
  - type: textarea
    attributes:
      label: '📝 补充信息'
      description: 在这里添加关于问题的任何其他背景信息。

================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
#### 💻 变更类型 | Change Type

<!-- For change type, change [ ] to [x]. -->

- [ ] feat    <!-- 引入新功能 | Introduce new features -->
- [ ] fix    <!-- 修复 Bug | Fix a bug -->
- [ ] refactor    <!-- 重构代码(既不修复 Bug 也不添加新功能) | Refactor code that neither fixes a bug nor adds a feature -->
- [ ] perf    <!-- 提升性能的代码变更 | A code change that improves performance -->
- [ ] style    <!-- 添加或更新不影响代码含义的样式文件 | Add or update style files that do not affect the meaning of the code -->
- [ ] test    <!-- 添加缺失的测试或纠正现有的测试 | Adding missing tests or correcting existing tests -->
- [ ] docs    <!-- 仅文档更新 | Documentation only changes -->
- [ ] ci    <!-- 修改持续集成配置文件和脚本 | Changes to our CI configuration files and scripts -->
- [ ] chore    <!-- 其他不修改 src 或 test 文件的变更 | Other changes that don’t modify src or test files -->
- [ ] build    <!-- 进行架构变更 | Make architectural changes -->

#### 🔀 变更说明 | Description of Change

<!-- 
感谢您的 Pull Request ,请提供此 Pull Request 的变更说明
Thank you for your Pull Request. Please provide a description above.
-->

#### 📝 补充信息 | Additional Information

<!-- 
请添加与此 Pull Request 相关的补充信息
Add any other context about the Pull Request here.
-->


================================================
FILE: .github/dependabot.yml
================================================
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates

version: 2
updates:
  - package-ecosystem: "npm" # See documentation for possible values
    directory: "/" # Location of package manifests
    schedule:
      interval: "weekly"


================================================
FILE: .github/workflows/app.yml
================================================
name: Release App

on:
  workflow_dispatch:
  release:
    types: [published]

jobs:
  create-release:
    permissions:
      contents: write
    runs-on: ubuntu-latest
    outputs:
      release_id: ${{ steps.create-release.outputs.result }}

    steps:
      - uses: actions/checkout@v3
      - name: setup node
        uses: actions/setup-node@v3
        with:
          node-version: 18
      - name: get version
        run: echo "PACKAGE_VERSION=$(node -p "require('./src-tauri/tauri.conf.json').package.version")" >> $GITHUB_ENV
      - name: create release
        id: create-release
        uses: actions/github-script@v6
        with:
          script: |
            const { data } = await github.rest.repos.getLatestRelease({
              owner: context.repo.owner,
              repo: context.repo.repo,
            })
            return data.id

  build-tauri:
    needs: create-release
    permissions:
      contents: write
    strategy:
      fail-fast: false
      matrix:
        config:
          - os: ubuntu-latest
            arch: x86_64
            rust_target: x86_64-unknown-linux-gnu
          - os: macos-latest
            arch: aarch64
            rust_target: x86_64-apple-darwin,aarch64-apple-darwin
          - os: windows-latest
            arch: x86_64
            rust_target: x86_64-pc-windows-msvc

    runs-on: ${{ matrix.config.os }}
    steps:
      - uses: actions/checkout@v3
      - name: setup node
        uses: actions/setup-node@v3
        with:
          node-version: 18
          cache: 'yarn'
      - name: install Rust stable
        uses: dtolnay/rust-toolchain@stable
        with:
          targets: ${{ matrix.config.rust_target }}
      - uses: Swatinem/rust-cache@v2
        with:
          key: ${{ matrix.config.os }}
      - name: install dependencies (ubuntu only)
        if: matrix.config.os == 'ubuntu-latest'
        run: |
          sudo apt-get update
          sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.0-dev libappindicator3-dev librsvg2-dev patchelf
      - name: install frontend dependencies
        run: yarn install # change this to npm or pnpm depending on which one you use
      - uses: tauri-apps/tauri-action@v0
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
          TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
          TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
          APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }}
          APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
          APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }}
          APPLE_ID: ${{ secrets.APPLE_ID }}
          APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
          APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
        with:
          releaseId: ${{ needs.create-release.outputs.release_id }}
          args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }}

  publish-release:
    permissions:
      contents: write
    runs-on: ubuntu-latest
    needs: [create-release, build-tauri]

    steps:
      - name: publish release
        id: publish-release
        uses: actions/github-script@v6
        env:
          release_id: ${{ needs.create-release.outputs.release_id }}
        with:
          script: |
            github.rest.repos.updateRelease({
              owner: context.repo.owner,
              repo: context.repo.repo,
              release_id: process.env.release_id,
              draft: false,
              prerelease: false
            })


================================================
FILE: .github/workflows/deploy_preview.yml
================================================
name: VercelPreviewDeployment

on:
  pull_request_target:
    types:
      - review_requested

env:
  VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}
  VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
  VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
  VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}
  VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }}

permissions:
  contents: read
  statuses: write
  pull-requests: write

jobs:
  deploy-preview:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v2
        with:
          ref: ${{ github.event.pull_request.head.sha }}

      - name: Extract branch name
        shell: bash
        run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT"
        id: extract_branch

      - name: Hash branch name
        uses: pplanel/hash-calculator-action@v1.3.1
        id: hash_branch
        with:
          input: ${{ steps.extract_branch.outputs.branch }}
          method: MD5

      - name: Set Environment Variables
        id: set_env
        if: github.event_name == 'pull_request_target'
        run: |
          echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT

      - name: Install Vercel CLI
        run: npm install --global vercel@latest

      - name: Cache dependencies
        uses: actions/cache@v4
        id: cache-npm
        with:
          path: ~/.npm
          key: npm-${{ hashFiles('package-lock.json') }}
          restore-keys: npm-

      - name: Pull Vercel Environment Information
        run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}

      - name: Deploy Project Artifacts to Vercel
        id: vercel
        env:
          META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}}
        run: |
          set -e
          vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}
          vercel build --token=${VERCEL_TOKEN}
          vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}

          DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }})
          ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}')

          echo "New preview URL: ${DEFAULT_URL}"
          echo "New alias URL: ${ALIAS_URL}"
          echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT"

      - uses: mshick/add-pr-comment@v2
        with:
          message: |
            Your build has completed!

            [Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }})


================================================
FILE: .github/workflows/docker.yml
================================================
name: Publish Docker image

on:
  workflow_dispatch:
  release:
    types: [published]

jobs:
  push_to_registry:
    name: Push Docker image to Docker Hub
    runs-on: ubuntu-latest
    steps:
      -
        name: Check out the repo
        uses: actions/checkout@v3
      -
        name: Log in to Docker Hub
        uses: docker/login-action@v2
        with:
          username: ${{ secrets.DOCKER_USERNAME }}
          password: ${{ secrets.DOCKER_PASSWORD }}
      
      - 
        name: Extract metadata (tags, labels) for Docker
        id: meta
        uses: docker/metadata-action@v4
        with:
          images: yidadaa/chatgpt-next-web
          tags: |
            type=raw,value=latest
            type=ref,event=tag
      
      - 
        name: Set up QEMU
        uses: docker/setup-qemu-action@v2

      - 
        name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v2
      
      - 
        name: Build and push Docker image
        uses: docker/build-push-action@v4
        with:
          context: .
          platforms: linux/amd64,linux/arm64
          push: true
          tags: ${{ steps.meta.outputs.tags }}
          labels: ${{ steps.meta.outputs.labels }}
          cache-from: type=gha
          cache-to: type=gha,mode=max
            


================================================
FILE: .github/workflows/issue-translator.yml
================================================
name: Issue Translator
on: 
  issue_comment: 
    types: [created]
  issues: 
    types: [opened]

jobs:
  build:
    runs-on: ubuntu-latest
    steps:
      - uses: usthe/issues-translate-action@v2.7
        with:
          IS_MODIFY_TITLE: false
          CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.


================================================
FILE: .github/workflows/remove_deploy_preview.yml
================================================
name: Removedeploypreview

permissions:
  contents: read
  statuses: write
  pull-requests: write

env:
  VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
  VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
  VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}

on:
  pull_request_target:
    types:
      - closed

jobs:
  delete-deployments:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v2

      - name: Extract branch name
        shell: bash
        run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
        id: extract_branch

      - name: Hash branch name
        uses: pplanel/hash-calculator-action@v1.3.1
        id: hash_branch
        with:
          input: ${{ steps.extract_branch.outputs.branch }}
          method: MD5

      - name: Call the delete-deployment-preview.sh script
        env:
          META_TAG: ${{ steps.hash_branch.outputs.digest }}
        run: |
          bash ./scripts/delete-deployment-preview.sh


================================================
FILE: .github/workflows/sync.yml
================================================
name: Upstream Sync

permissions:
  contents: write

on:
  schedule:
    - cron: "0 0 * * *" # every day
  workflow_dispatch:

jobs:
  sync_latest_from_upstream:
    name: Sync latest commits from upstream repo
    runs-on: ubuntu-latest
    if: ${{ github.event.repository.fork }}

    steps:
      # Step 1: run a standard checkout action
      - name: Checkout target repo
        uses: actions/checkout@v3

      # Step 2: run the sync action
      - name: Sync upstream changes
        id: sync
        uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
        with:
          upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web
          upstream_sync_branch: main
          target_sync_branch: main
          target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set

          # Set test_mode true to run tests instead of the true action!!
          test_mode: false

      - name: Sync check
        if: failure()
        run: |
          echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,详细教程请查看:https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0"
          echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates"
          exit 1


================================================
FILE: .github/workflows/test.yml
================================================
name: Run Tests

on:
  push:
    branches:
      - main
    tags:
      - "!*"
  pull_request:
    types:
      - review_requested

jobs:
  test:
    runs-on: ubuntu-latest

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4

      - name: Set up Node.js
        uses: actions/setup-node@v3
        with:
          node-version: 18
          cache: "yarn"

      - name: Cache node_modules
        uses: actions/cache@v4
        with:
          path: node_modules
          key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
          restore-keys: |
            ${{ runner.os }}-node_modules-

      - name: Install dependencies
        run: yarn install

      - name: Run Jest tests
        run: yarn test:ci


================================================
FILE: .gitignore
================================================
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.

# dependencies
/node_modules
/.pnp
.pnp.js

# testing
/coverage

# next.js
/.next/
/out/

# production
/build

# misc
.DS_Store
*.pem

# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*

# local env files
.env*.local

# vercel
.vercel

# typescript
*.tsbuildinfo
next-env.d.ts
dev

.vscode
.idea

# docker-compose env files
.env

*.key
*.key.pub

masks.json

# mcp config
app/mcp/mcp_config.json


================================================
FILE: .gitpod.yml
================================================
# This configuration file was automatically generated by Gitpod.
# Please adjust to your needs (see https://www.gitpod.io/docs/introduction/learn-gitpod/gitpod-yaml)
# and commit this file to your remote git repository to share the goodness with others.

# Learn more from ready-to-use templates: https://www.gitpod.io/docs/introduction/getting-started/quickstart

tasks:
  - init: yarn install && yarn run dev
    command: yarn run dev




================================================
FILE: .husky/pre-commit
================================================
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"

npx lint-staged

================================================
FILE: .lintstagedrc.json
================================================
{
  "./app/**/*.{js,ts,jsx,tsx,json,html,css,md}": [
    "eslint --fix",
    "prettier --write"
  ]
}


================================================
FILE: .prettierrc.js
================================================
module.exports = {
  printWidth: 80,
  tabWidth: 2,
  useTabs: false,
  semi: true,
  singleQuote: false,
  trailingComma: 'all',
  bracketSpacing: true,
  arrowParens: 'always',
};


================================================
FILE: CODE_OF_CONDUCT.md
================================================
# Contributor Covenant Code of Conduct

## Our Pledge

We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.

We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.

## Our Standards

Examples of behavior that contributes to a positive environment for our
community include:

* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
  and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
  overall community

Examples of unacceptable behavior include:

* The use of sexualized language or imagery, and sexual attention or
  advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
  address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
  professional setting

## Enforcement Responsibilities

Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.

Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.

## Scope

This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.

## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
flynn.zhang@foxmail.com.
All complaints will be reviewed and investigated promptly and fairly.

All community leaders are obligated to respect the privacy and security of the
reporter of any incident.

## Enforcement Guidelines

Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:

### 1. Correction

**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.

**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.

### 2. Warning

**Community Impact**: A violation through a single incident or series
of actions.

**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.

### 3. Temporary Ban

**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.

**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.

### 4. Permanent Ban

**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior,  harassment of an
individual, or aggression toward or disparagement of classes of individuals.

**Consequence**: A permanent ban from any sort of public interaction within
the community.

## Attribution

This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.

Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).

[homepage]: https://www.contributor-covenant.org

For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.


================================================
FILE: Dockerfile
================================================
FROM node:18-alpine AS base

FROM base AS deps

RUN apk add --no-cache libc6-compat

WORKDIR /app

COPY package.json yarn.lock ./

RUN yarn config set registry 'https://registry.npmmirror.com/'
RUN yarn install

FROM base AS builder

RUN apk update && apk add --no-cache git

ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY=""
ENV CODE=""

WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .

RUN yarn build

FROM base AS runner
WORKDIR /app

RUN apk add proxychains-ng

ENV PROXY_URL=""
ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY=""
ENV CODE=""
ENV ENABLE_MCP=""

COPY --from=builder /app/public ./public
COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder /app/.next/server ./.next/server

RUN mkdir -p /app/app/mcp && chmod 777 /app/app/mcp
COPY --from=builder /app/app/mcp/mcp_config.default.json /app/app/mcp/mcp_config.json

EXPOSE 3000

CMD if [ -n "$PROXY_URL" ]; then \
    export HOSTNAME="0.0.0.0"; \
    protocol=$(echo $PROXY_URL | cut -d: -f1); \
    host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
    port=$(echo $PROXY_URL | cut -d: -f3); \
    conf=/etc/proxychains.conf; \
    echo "strict_chain" > $conf; \
    echo "proxy_dns" >> $conf; \
    echo "remote_dns_subnet 224" >> $conf; \
    echo "tcp_read_time_out 15000" >> $conf; \
    echo "tcp_connect_time_out 8000" >> $conf; \
    echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \
    echo "localnet ::1/128" >> $conf; \
    echo "[ProxyList]" >> $conf; \
    echo "$protocol $host $port" >> $conf; \
    cat /etc/proxychains.conf; \
    proxychains -f $conf node server.js; \
    else \
    node server.js; \
    fi


================================================
FILE: LICENSE
================================================
MIT License

Copyright (c) 2023-2025 NextChat

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
<div align="center">

<a href='https://nextchat.club'>
  <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a>

<h1 align="center">NextChat</h1>

English / [简体中文](./README_CN.md)

<a href="https://trendshift.io/repositories/5973" target="_blank"><img src="https://trendshift.io/api/badge/repositories/5973" alt="ChatGPTNextWeb%2FChatGPT-Next-Web | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>

✨ Light and Fast AI Assistant,with Claude, DeepSeek, GPT4 & Gemini Pro support.

[![Saas][Saas-image]][saas-url]
[![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url]
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]

[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.club) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)

[saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.club/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu

[<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://vercel.com/button" alt="Deploy on Vercel" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/ChatGPTNextWeb/NextChat)

[<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="50" width="" >](https://monica.im/?utm=nxcrp)

</div>

## ❤️ Sponsor AI API

<a href='https://302.ai/'>
  <img src="https://github.com/user-attachments/assets/a03edf82-2031-4f23-bdb8-bfc0bfd168a4" width="100%" alt="icon"/>
</a>

[302.AI](https://302.ai/) is a pay-as-you-go AI application platform that offers the most comprehensive AI APIs and online applications available.

## 🥳 Cheer for NextChat iOS Version Online!

> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)

> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)

![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)

## 🫣 NextChat Support MCP !

> Before build, please set env ENABLE_MCP=true

<img src="https://github.com/user-attachments/assets/d8851f40-4e36-4335-b1a4-ec1e11488c7e"/>

## Enterprise Edition

Meeting Your Company's Privatization and Customization Deployment Requirements:

- **Brand Customization**: Tailored VI/UI to seamlessly align with your corporate brand image.
- **Resource Integration**: Unified configuration and management of dozens of AI resources by company administrators, ready for use by team members.
- **Permission Control**: Clearly defined member permissions, resource permissions, and knowledge base permissions, all controlled via a corporate-grade Admin Panel.
- **Knowledge Integration**: Combining your internal knowledge base with AI capabilities, making it more relevant to your company's specific business needs compared to general AI.
- **Security Auditing**: Automatically intercept sensitive inquiries and trace all historical conversation records, ensuring AI adherence to corporate information security standards.
- **Private Deployment**: Enterprise-level private deployment supporting various mainstream private cloud solutions, ensuring data security and privacy protection.
- **Continuous Updates**: Ongoing updates and upgrades in cutting-edge capabilities like multimodal AI, ensuring consistent innovation and advancement.

For enterprise inquiries, please contact: **business@nextchat.dev**

## Screenshots

![Settings](./docs/images/settings.png)

![More](./docs/images/more.png)

## Features

- **Deploy for free with one-click** on Vercel in under 1 minute
- Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI)
- Privacy first, all data is stored locally in the browser
- Markdown support: LaTex, mermaid, code highlight, etc.
- Responsive design, dark mode and PWA
- Fast first screen loading speed (~100kb), support streaming response
- New in v2: create, share and debug your chat tools with prompt templates (mask)
- Awesome prompts powered by [awesome-chatgpt-prompts-zh](https://github.com/PlexPt/awesome-chatgpt-prompts-zh) and [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts)
- Automatically compresses chat history to support long conversations while also saving your tokens
- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia

<div align="center">
   
![主界面](./docs/images/cover.png)

</div>

## Roadmap

- [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)
- [x] User Prompt: user can edit and save custom prompts to prompt list
- [x] Prompt Template: create a new chat with pre-defined in-context prompts [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993)
- [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741)
- [x] Desktop App with tauri
- [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc.
- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
  - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- [ ] local knowledge base

## What's New

- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms!
- 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).

## Get Started

1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. Click
   [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password;
3. Enjoy :)

## FAQ

[English > FAQ](./docs/faq-en.md)

## Keep Updated

If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly.

We recommend that you follow the steps below to re-deploy:

- Delete the original repository;
- Use the fork button in the upper right corner of the page to fork this project;
- Choose and deploy in Vercel again, [please see the detailed tutorial](./docs/vercel-cn.md).

### Enable Automatic Updates

> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code).

After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour:

![Automatic Updates](./docs/images/enable-actions.jpg)

![Enable Automatic Updates](./docs/images/enable-actions-sync.jpg)

### Manually Updating Code

If you want to update instantly, you can check out the [GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) to learn how to synchronize a forked project with upstream code.

You can star or watch this project or follow author to get release notifications in time.

## Access Password

This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this:

```
code1,code2,code3
```

After adding or modifying this environment variable, please redeploy the project for the changes to take effect.

## Environment Variables

### `CODE` (optional)

Access password, separated by comma.

### `OPENAI_API_KEY` (required)

Your openai api key, join multiple api keys with comma.

### `BASE_URL` (optional)

> Default: `https://api.openai.com`

> Examples: `http://your-openai-proxy.com`

Override openai api request base url.

### `OPENAI_ORG_ID` (optional)

Specify OpenAI organization ID.

### `AZURE_URL` (optional)

> Example: https://{azure-resource-url}/openai

Azure deploy url.

### `AZURE_API_KEY` (optional)

Azure Api Key.

### `AZURE_API_VERSION` (optional)

Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions).

### `GOOGLE_API_KEY` (optional)

Google Gemini Pro Api Key.

### `GOOGLE_URL` (optional)

Google Gemini Pro Api Url.

### `ANTHROPIC_API_KEY` (optional)

anthropic claude Api Key.

### `ANTHROPIC_API_VERSION` (optional)

anthropic claude Api version.

### `ANTHROPIC_URL` (optional)

anthropic claude Api Url.

### `BAIDU_API_KEY` (optional)

Baidu Api Key.

### `BAIDU_SECRET_KEY` (optional)

Baidu Secret Key.

### `BAIDU_URL` (optional)

Baidu Api Url.

### `BYTEDANCE_API_KEY` (optional)

ByteDance Api Key.

### `BYTEDANCE_URL` (optional)

ByteDance Api Url.

### `ALIBABA_API_KEY` (optional)

Alibaba Cloud Api Key.

### `ALIBABA_URL` (optional)

Alibaba Cloud Api Url.

### `IFLYTEK_URL` (Optional)

iflytek Api Url.

### `IFLYTEK_API_KEY` (Optional)

iflytek Api Key.

### `IFLYTEK_API_SECRET` (Optional)

iflytek Api Secret.

### `CHATGLM_API_KEY` (optional)

ChatGLM Api Key.

### `CHATGLM_URL` (optional)

ChatGLM Api Url.

### `DEEPSEEK_API_KEY` (optional)

DeepSeek Api Key.

### `DEEPSEEK_URL` (optional)

DeepSeek Api Url.

### `HIDE_USER_API_KEY` (optional)

> Default: Empty

If you do not want users to input their own API key, set this value to 1.

### `DISABLE_GPT4` (optional)

> Default: Empty

If you do not want users to use GPT-4, set this value to 1.

### `ENABLE_BALANCE_QUERY` (optional)

> Default: Empty

If you do want users to query balance, set this value to 1.

### `DISABLE_FAST_LINK` (optional)

> Default: Empty

If you want to disable parse settings from url, set this to 1.

### `CUSTOM_MODELS` (optional)

> Default: Empty
> Example: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list, and display `gpt-4-1106-preview` as `gpt-4-turbo`.

To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.

User `-all` to disable all default models, `+all` to enable all default models.

For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.

> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.

For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.

> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.

### `DEFAULT_MODEL` (optional)

Change default model

### `VISION_MODELS` (optional)

> Default: Empty
> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc).

Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas.

### `WHITE_WEBDAV_ENDPOINTS` (optional)

You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format:

- Each address must be a complete endpoint
  > `https://xxxx/yyy`
- Multiple addresses are connected by ', '

### `DEFAULT_INPUT_TEMPLATE` (optional)

Customize the default template used to initialize the User Input Preprocessing configuration item in Settings.

### `STABILITY_API_KEY` (optional)

Stability API key.

### `STABILITY_URL` (optional)

Customize Stability API url.

### `ENABLE_MCP` (optional)

Enable MCP(Model Context Protocol)Feature

### `SILICONFLOW_API_KEY` (optional)

SiliconFlow API Key.

### `SILICONFLOW_URL` (optional)

SiliconFlow API URL.

### `AI302_API_KEY` (optional)

302.AI API Key.

### `AI302_URL` (optional)

302.AI API URL.

## Requirements

NodeJS >= 18, Docker >= 20

## Development

[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

Before starting development, you must create a new `.env.local` file at project root, and place your api key into it:

```
OPENAI_API_KEY=<your api key here>

# if you are not able to access openai service, use this BASE_URL
BASE_URL=https://chatgpt1.nextweb.fun/api/proxy
```

### Local Development

```shell
# 1. install nodejs and yarn first
# 2. config local env vars in `.env.local`
# 3. run
yarn install
yarn dev
```

## Deployment

### Docker (Recommended)

```shell
docker pull yidadaa/chatgpt-next-web

docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   yidadaa/chatgpt-next-web
```

You can start service behind a proxy:

```shell
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   -e PROXY_URL=http://localhost:7890 \
   yidadaa/chatgpt-next-web
```

If your proxy needs password, use:

```shell
-e PROXY_URL="http://127.0.0.1:7890 user pass"
```

If enable MCP, use:

```
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   -e ENABLE_MCP=true \
   yidadaa/chatgpt-next-web
```

### Shell

```shell
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```

## Synchronizing Chat Records (UpStash)

| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)

## Documentation

> Please go to the [docs][./docs] directory for more documentation instructions.

- [Deploy with cloudflare (Deprecated)](./docs/cloudflare-pages-en.md)
- [Frequent Ask Questions](./docs/faq-en.md)
- [How to add a new translation](./docs/translation.md)
- [How to use Vercel (No English)](./docs/vercel-cn.md)
- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md)

## Translation

If you want to add a new translation, read this [document](./docs/translation.md).

## Donation

[Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa)

## Special Thanks

### Contributors

<a href="https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/graphs/contributors">
  <img src="https://contrib.rocks/image?repo=ChatGPTNextWeb/ChatGPT-Next-Web" />
</a>

## LICENSE

[MIT](https://opensource.org/license/mit/)


================================================
FILE: README_CN.md
================================================
<div align="center">

<a href='#企业版'>
  <img src="./docs/images/ent.svg" alt="icon"/>
</a>

<h1 align="center">NextChat</h1>

一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。

[NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)

[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

</div>

## Sponsor AI API

<a href='https://302.ai/'>
  <img src="https://github.com/user-attachments/assets/d8c0c513-1e18-4d3b-a2a9-ff3696aec0d4" width="100%" alt="icon"/>
</a>

[302.AI](https://302.ai/) 是一个按需付费的AI应用平台,提供市面上最全的AI API和AI在线应用。

## 企业版

满足您公司私有化部署和定制需求

- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合
- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用
- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制
- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求
- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范
- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护
- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进

企业版咨询: **business@nextchat.dev**

<img width="300" src="https://github.com/user-attachments/assets/bb29a11d-ff75-48a8-b1f8-d2d7238cf987">

## 开始使用

1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. 点击右侧按钮开始部署:
   [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE;
3. 部署完毕后,即可开始使用;
4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。

<div align="center">
   
![主界面](./docs/images/cover.png)

</div>

## 保持更新

如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。
推荐你按照下列步骤重新部署:

- 删除掉原先的仓库;
- 使用页面右上角的 fork 按钮,fork 本项目;
- 在 Vercel 重新选择并部署,[请查看详细教程](./docs/vercel-cn.md#如何新建项目)。

### 打开自动更新

> 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)!

当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新:

![自动更新](./docs/images/enable-actions.jpg)

![启用自动更新](./docs/images/enable-actions-sync.jpg)

### 手动更新代码

如果你想让手动立即更新,可以查看 [Github 的文档](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) 了解如何让 fork 的项目与上游代码同步。

你可以 star/watch 本项目或者 follow 作者来及时获得新功能更新通知。

## 配置页面访问密码

> 配置密码后,用户需要在设置页手动填写访问码才可以正常聊天,否则会通过消息提示未授权状态。

> **警告**:请务必将密码的位数设置得足够长,最好 7 位以上,否则[会被爆破](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。

本项目提供有限的权限控制功能,请在 Vercel 项目控制面板的环境变量页增加名为 `CODE` 的环境变量,值为用英文逗号分隔的自定义密码:

```
code1,code2,code3
```

增加或修改该环境变量后,请**重新部署**项目使改动生效。

## 环境变量

> 本项目大多数配置项都通过环境变量来设置,教程:[如何修改 Vercel 环境变量](./docs/vercel-cn.md)。

### `OPENAI_API_KEY` (必填项)

OpenAI 密钥,你在 openai 账户页面申请的 api key,使用英文逗号隔开多个 key,这样可以随机轮询这些 key。

### `CODE` (可选)

访问密码,可选,可以使用逗号隔开多个密码。

**警告**:如果不填写此项,则任何人都可以直接使用你部署后的网站,可能会导致你的 token 被急速消耗完毕,建议填写此选项。

### `BASE_URL` (可选)

> Default: `https://api.openai.com`

> Examples: `http://your-openai-proxy.com`

OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填写此选项。

> 如果遇到 ssl 证书问题,请将 `BASE_URL` 的协议设置为 http。

### `OPENAI_ORG_ID` (可选)

指定 OpenAI 中的组织 ID。

### `AZURE_URL` (可选)

> 形如:https://{azure-resource-url}/openai

Azure 部署地址。

### `AZURE_API_KEY` (可选)

Azure 密钥。

### `AZURE_API_VERSION` (可选)

Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。

### `GOOGLE_API_KEY` (可选)

Google Gemini Pro 密钥.

### `GOOGLE_URL` (可选)

Google Gemini Pro Api Url.

### `ANTHROPIC_API_KEY` (可选)

anthropic claude Api Key.

### `ANTHROPIC_API_VERSION` (可选)

anthropic claude Api version.

### `ANTHROPIC_URL` (可选)

anthropic claude Api Url.

### `BAIDU_API_KEY` (可选)

Baidu Api Key.

### `BAIDU_SECRET_KEY` (可选)

Baidu Secret Key.

### `BAIDU_URL` (可选)

Baidu Api Url.

### `BYTEDANCE_API_KEY` (可选)

ByteDance Api Key.

### `BYTEDANCE_URL` (可选)

ByteDance Api Url.

### `ALIBABA_API_KEY` (可选)

阿里云(千问)Api Key.

### `ALIBABA_URL` (可选)

阿里云(千问)Api Url.

### `IFLYTEK_URL` (可选)

讯飞星火Api Url.

### `IFLYTEK_API_KEY` (可选)

讯飞星火Api Key.

### `IFLYTEK_API_SECRET` (可选)

讯飞星火Api Secret.

### `CHATGLM_API_KEY` (可选)

ChatGLM Api Key.

### `CHATGLM_URL` (可选)

ChatGLM Api Url.

### `DEEPSEEK_API_KEY` (可选)

DeepSeek Api Key.

### `DEEPSEEK_URL` (可选)

DeepSeek Api Url.

### `HIDE_USER_API_KEY` (可选)

如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。

### `DISABLE_GPT4` (可选)

如果你不想让用户使用 GPT-4,将此环境变量设置为 1 即可。

### `ENABLE_BALANCE_QUERY` (可选)

如果你想启用余额查询功能,将此环境变量设置为 1 即可。

### `DISABLE_FAST_LINK` (可选)

如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。

### `WHITE_WEBDAV_ENDPOINTS` (可选)

如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求:

- 每一个地址必须是一个完整的 endpoint
  > `https://xxxx/xxx`
- 多个地址以`,`相连

### `CUSTOM_MODELS` (可选)

> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。
> 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo`

用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。

在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)

> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`

在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)

> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项

### `DEFAULT_MODEL` (可选)

更改默认模型

### `VISION_MODELS` (可选)

> 默认值:空
> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。

在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。

### `DEFAULT_INPUT_TEMPLATE` (可选)

自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项

### `STABILITY_API_KEY` (optional)

Stability API密钥

### `STABILITY_URL` (optional)

自定义的Stability API请求地址

### `ENABLE_MCP` (optional)

启用MCP(Model Context Protocol)功能

### `SILICONFLOW_API_KEY` (optional)

SiliconFlow API Key.

### `SILICONFLOW_URL` (optional)

SiliconFlow API URL.

### `AI302_API_KEY` (optional)

302.AI API Key.

### `AI302_URL` (optional)

302.AI API URL.

## 开发

点击下方按钮,开始二次开发:

[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

在开始写代码之前,需要在项目根目录新建一个 `.env.local` 文件,里面填入环境变量:

```
OPENAI_API_KEY=<your api key here>

# 中国大陆用户,可以使用本项目自带的代理进行开发,你也可以自由选择其他代理地址
BASE_URL=https://b.nextweb.fun/api/proxy
```

### 本地开发

1. 安装 nodejs 18 和 yarn,具体细节请询问 ChatGPT;
2. 执行 `yarn install && yarn dev` 即可。⚠️ 注意:此命令仅用于本地开发,不要用于部署!
3. 如果你想本地部署,请使用 `yarn install && yarn build && yarn start` 命令,你可以配合 pm2 来守护进程,防止被杀死,详情询问 ChatGPT。

## 部署

### 宝塔面板部署

> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)

### 容器部署 (推荐)

> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。

> ⚠️ 注意:docker 版本在大多数时间都会落后最新的版本 1 到 2 天,所以部署后会持续出现“存在更新”的提示,属于正常现象。

```shell
docker pull yidadaa/chatgpt-next-web

docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=页面访问密码 \
   yidadaa/chatgpt-next-web
```

你也可以指定 proxy:

```shell
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=页面访问密码 \
   --net=host \
   -e PROXY_URL=http://127.0.0.1:7890 \
   yidadaa/chatgpt-next-web
```

如需启用 MCP 功能,可以使用:

```shell
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=页面访问密码 \
   -e ENABLE_MCP=true \
   yidadaa/chatgpt-next-web
```

如果你的本地代理需要账号密码,可以使用:

```shell
-e PROXY_URL="http://127.0.0.1:7890 user password"
```

如果你需要指定其他环境变量,请自行在上述命令中增加 `-e 环境变量=环境变量值` 来指定。

### 本地部署

在控制台运行下方命令:

```shell
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```

⚠️ 注意:如果你安装过程中遇到了问题,请使用 docker 部署。

## 鸣谢

### 捐赠者

> 见英文版。

### 贡献者

[见项目贡献者列表](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)

### 相关项目

- [one-api](https://github.com/songquanpeng/one-api): 一站式大模型额度管理平台,支持市面上所有主流大语言模型

## 开源协议

[MIT](https://opensource.org/license/mit/)


================================================
FILE: README_JA.md
================================================
<div align="center">
<img src="./docs/images/ent.svg" alt="プレビュー"/>

<h1 align="center">NextChat</h1>

ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。

[NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)

[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

</div>

## Sponsor AI API

<a href='https://302.ai/'>
  <img src="https://github.com/user-attachments/assets/6cf24233-1010-43e0-9a83-a11159866175" width="100%" alt="icon"/>
</a>

[302.AI](https://302.ai/) は、オンデマンドで支払うAIアプリケーションプラットフォームで、最も安全なAI APIとAIオンラインアプリケーションを提供します。

## 企業版

あなたの会社のプライベートデプロイとカスタマイズのニーズに応える

- **ブランドカスタマイズ**:企業向けに特別に設計された VI/UI、企業ブランドイメージとシームレスにマッチ
- **リソース統合**:企業管理者が数十種類のAIリソースを統一管理、チームメンバーはすぐに使用可能
- **権限管理**:メンバーの権限、リソースの権限、ナレッジベースの権限を明確にし、企業レベルのAdmin Panelで統一管理
- **知識の統合**:企業内部のナレッジベースとAI機能を結びつけ、汎用AIよりも企業自身の業務ニーズに近づける
- **セキュリティ監査**:機密質問を自動的にブロックし、すべての履歴対話を追跡可能にし、AIも企業の情報セキュリティ基準に従わせる
- **プライベートデプロイ**:企業レベルのプライベートデプロイ、主要なプライベートクラウドデプロイをサポートし、データのセキュリティとプライバシーを保護
- **継続的な更新**:マルチモーダル、エージェントなどの最先端機能を継続的に更新し、常に最新であり続ける

企業版のお問い合わせ: **business@nextchat.dev**

## 始めに

1. [OpenAI API Key](https://platform.openai.com/account/api-keys)を準備する;
2. 右側のボタンをクリックしてデプロイを開始:
   [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) 、GitHubアカウントで直接ログインし、環境変数ページにAPI Keyと[ページアクセスパスワード](#設定ページアクセスパスワード) CODEを入力してください;
3. デプロイが完了したら、すぐに使用を開始できます;
4. (オプション)[カスタムドメインをバインド](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercelが割り当てたドメインDNSは一部の地域で汚染されているため、カスタムドメインをバインドすると直接接続できます。

<div align="center">
   
![メインインターフェース](./docs/images/cover.png)

</div>

## 更新を維持する

もし上記の手順に従ってワンクリックでプロジェクトをデプロイした場合、「更新があります」というメッセージが常に表示されることがあります。これは、Vercel がデフォルトで新しいプロジェクトを作成するためで、本プロジェクトを fork していないことが原因です。そのため、正しく更新を検出できません。

以下の手順に従って再デプロイすることをお勧めします:

- 元のリポジトリを削除する
- ページ右上の fork ボタンを使って、本プロジェクトを fork する
- Vercel で再度選択してデプロイする、[詳細な手順はこちらを参照してください](./docs/vercel-ja.md)。

### 自動更新を開く

> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください!

プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります:

![自動更新](./docs/images/enable-actions.jpg)

![自動更新を有効にする](./docs/images/enable-actions-sync.jpg)

### 手動でコードを更新する

手動で即座に更新したい場合は、[GitHub のドキュメント](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork)を参照して、fork したプロジェクトを上流のコードと同期する方法を確認してください。

このプロジェクトをスターまたはウォッチしたり、作者をフォローすることで、新機能の更新通知をすぐに受け取ることができます。

## ページアクセスパスワードを設定する

> パスワードを設定すると、ユーザーは設定ページでアクセスコードを手動で入力しない限り、通常のチャットができず、未承認の状態であることを示すメッセージが表示されます。

> **警告**:パスワードの桁数は十分に長く設定してください。7桁以上が望ましいです。さもないと、[ブルートフォース攻撃を受ける可能性があります](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。

このプロジェクトは限られた権限管理機能を提供しています。Vercel プロジェクトのコントロールパネルで、環境変数ページに `CODE` という名前の環境変数を追加し、値をカンマで区切ったカスタムパスワードに設定してください:

```
code1,code2,code3
```

この環境変数を追加または変更した後、**プロジェクトを再デプロイ**して変更を有効にしてください。

## 環境変数

> 本プロジェクトのほとんどの設定は環境変数で行います。チュートリアル:[Vercel の環境変数を変更する方法](./docs/vercel-ja.md)。

### `OPENAI_API_KEY` (必須)

OpenAI の API キー。OpenAI アカウントページで申請したキーをカンマで区切って複数設定できます。これにより、ランダムにキーが選択されます。

### `CODE` (オプション)

アクセスパスワード。カンマで区切って複数設定可能。

**警告**:この項目を設定しないと、誰でもデプロイしたウェブサイトを利用でき、トークンが急速に消耗する可能性があるため、設定をお勧めします。

### `BASE_URL` (オプション)

> デフォルト: `https://api.openai.com`

> 例: `http://your-openai-proxy.com`

OpenAI API のプロキシ URL。手動で OpenAI API のプロキシを設定している場合はこのオプションを設定してください。

> SSL 証明書の問題がある場合は、`BASE_URL` のプロトコルを http に設定してください。

### `OPENAI_ORG_ID` (オプション)

OpenAI の組織 ID を指定します。

### `AZURE_URL` (オプション)

> 形式: https://{azure-resource-url}/openai/deployments/{deploy-name}
> `CUSTOM_MODELS` で `displayName` 形式で {deploy-name} を設定した場合、`AZURE_URL` から {deploy-name} を省略できます。

Azure のデプロイ URL。

### `AZURE_API_KEY` (オプション)

Azure の API キー。

### `AZURE_API_VERSION` (オプション)

Azure API バージョン。[Azure ドキュメント](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)で確認できます。

### `GOOGLE_API_KEY` (オプション)

Google Gemini Pro API キー。

### `GOOGLE_URL` (オプション)

Google Gemini Pro API の URL。

### `ANTHROPIC_API_KEY` (オプション)

Anthropic Claude API キー。

### `ANTHROPIC_API_VERSION` (オプション)

Anthropic Claude API バージョン。

### `ANTHROPIC_URL` (オプション)

Anthropic Claude API の URL。

### `BAIDU_API_KEY` (オプション)

Baidu API キー。

### `BAIDU_SECRET_KEY` (オプション)

Baidu シークレットキー。

### `BAIDU_URL` (オプション)

Baidu API の URL。

### `BYTEDANCE_API_KEY` (オプション)

ByteDance API キー。

### `BYTEDANCE_URL` (オプション)

ByteDance API の URL。

### `ALIBABA_API_KEY` (オプション)

アリババ(千问)API キー。

### `ALIBABA_URL` (オプション)

アリババ(千问)API の URL。

### `HIDE_USER_API_KEY` (オプション)

ユーザーが API キーを入力できないようにしたい場合は、この環境変数を 1 に設定します。

### `DISABLE_GPT4` (オプション)

ユーザーが GPT-4 を使用できないようにしたい場合は、この環境変数を 1 に設定します。

### `ENABLE_BALANCE_QUERY` (オプション)

バランスクエリ機能を有効にしたい場合は、この環境変数を 1 に設定します。

### `DISABLE_FAST_LINK` (オプション)

リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。

### `WHITE_WEBDAV_ENDPOINTS` (オプション)

アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件:

- 各アドレスは完全なエンドポイントでなければなりません。
  > `https://xxxx/xxx`
- 複数のアドレスは `,` で接続します。

### `CUSTOM_MODELS` (オプション)

> 例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` は `qwen-7b-chat` と `glm-6b` をモデルリストに追加し、`gpt-3.5-turbo` を削除し、`gpt-4-1106-preview` のモデル名を `gpt-4-turbo` として表示します。
> すべてのモデルを無効にし、特定のモデルを有効にしたい場合は、`-all,+gpt-3.5-turbo` を使用します。これは `gpt-3.5-turbo` のみを有効にすることを意味します。

モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。

Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。

> 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。

ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。

> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。

### `DEFAULT_MODEL` (オプション)

デフォルトのモデルを変更します。

### `VISION_MODELS` (オプション)

> デフォルト:空
> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。

デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。

### `DEFAULT_INPUT_TEMPLATE` (オプション)

『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。

### `AI302_API_KEY` (オプション)

302.AI API キー.

### `AI302_URL` (オプション)

302.AI API の URL.

## 開発

下のボタンをクリックして二次開発を開始してください:

[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

コードを書く前に、プロジェクトのルートディレクトリに `.env.local` ファイルを新規作成し、環境変数を記入します:

```
OPENAI_API_KEY=<your api key here>
```

### ローカル開発

1. Node.js 18 と Yarn をインストールします。具体的な方法は ChatGPT にお尋ねください。
2. `yarn install && yarn dev` を実行します。⚠️ 注意:このコマンドはローカル開発用であり、デプロイには使用しないでください。
3. ローカルでデプロイしたい場合は、`yarn install && yarn build && yarn start` コマンドを使用してください。プロセスを守るために pm2 を使用することもできます。詳細は ChatGPT にお尋ねください。

## デプロイ

### コンテナデプロイ(推奨)

> Docker バージョンは 20 以上が必要です。それ以下だとイメージが見つからないというエラーが出ます。

> ⚠️ 注意:Docker バージョンは最新バージョンより 1~2 日遅れることが多いため、デプロイ後に「更新があります」の通知が出続けることがありますが、正常です。

```shell
docker pull yidadaa/chatgpt-next-web

docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=ページアクセスパスワード \
   yidadaa/chatgpt-next-web
```

プロキシを指定することもできます:

```shell
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=ページアクセスパスワード \
   --net=host \
   -e PROXY_URL=http://127.0.0.1:7890 \
   yidadaa/chatgpt-next-web
```

ローカルプロキシがアカウントとパスワードを必要とする場合は、以下を使用できます:

```shell
-e PROXY_URL="http://127.0.0.1:7890 user password"
```

他の環境変数を指定する必要がある場合は、上記のコマンドに `-e 環境変数=環境変数値` を追加して指定してください。

### ローカルデプロイ

コンソールで以下のコマンドを実行します:

```shell
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```

⚠️ 注意:インストール中に問題が発生した場合は、Docker を使用してデプロイしてください。

## 謝辞

### 寄付者

> 英語版をご覧ください。

### 貢献者

[プロジェクトの貢献者リストはこちら](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)

### 関連プロジェクト

- [one-api](https://github.com/songquanpeng/one-api): 一つのプラットフォームで大規模モデルのクォータ管理を提供し、市場に出回っているすべての主要な大規模言語モデルをサポートします。

## オープンソースライセンス

[MIT](https://opensource.org/license/mit/)


================================================
FILE: README_KO.md
================================================
<div align="center">

<a href='https://nextchat.club'>
  <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a>

<h1 align="center">NextChat</h1>

영어 / [简体中文](./README_CN.md)

<a href="https://trendshift.io/repositories/5973" target="_blank">
  <img src="https://trendshift.io/api/badge/repositories/5973" alt="ChatGPTNextWeb%2FChatGPT-Next-Web | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/>
</a>

✨ 빠르고 가벼운 AI 어시스턴트, Claude, DeepSeek, GPT-4, Gemini Pro 지원

[![Saas][Saas-image]][saas-url]
[![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url]
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]

[NextChatAI 웹사이트](https://nextchat.club?utm_source=readme) / [iOS 앱](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [웹 데모](https://app.nextchat.club) / [데스크톱 앱](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [엔터프라이즈 버전](#enterprise-edition)

[saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.club/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu

[<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://vercel.com/button" alt="Deploy on Vercel" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/ChatGPTNextWeb/NextChat)

[<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="50" width="" >](https://monica.im/?utm=nxcrp)

</div>

## ❤️ AI API 후원사

<a href='https://302.ai/'>
  <img src="https://github.com/user-attachments/assets/a03edf82-2031-4f23-bdb8-bfc0bfd168a4" width="100%" alt="icon"/>
</a>

[302.AI](https://302.ai/)는 사용한 만큼만 비용을 지불하는 AI 애플리케이션 플랫폼으로, 다양한 AI API 및 온라인 애플리케이션을 제공합니다.

## 🥳 NextChat iOS 버전 출시!

> 👉 [지금 설치하기](https://apps.apple.com/us/app/nextchat-ai/id6743085599)

> ❤️ [소스 코드 곧 공개 예정](https://github.com/ChatGPTNextWeb/NextChat-iOS)

![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)

## 🫣 NextChat, MCP 지원!

> 빌드 전 환경 변수(env) `ENABLE_MCP=true` 설정 필요

<img src="https://github.com/user-attachments/assets/d8851f40-4e36-4335-b1a4-ec1e11488c7e" />

## 엔터프라이즈 버전

회사 내부 시스템에 맞춘 프라이빗 배포 및 맞춤형 커스터마이징 지원:

- **브랜드 커스터마이징**: 기업 이미지에 맞는 UI/UX 테마 적용
- **리소스 통합 관리**: 다양한 AI 모델을 통합하여 팀원이 손쉽게 사용 가능
- **권한 제어**: 관리자 패널을 통한 멤버·리소스·지식 베이스 권한 설정
- **지식 통합**: 사내 문서 및 데이터와 AI를 결합한 맞춤형 답변 제공
- **보안 감사**: 민감한 질문 차단 및 모든 기록 추적 가능
- **프라이빗 배포 지원**: 주요 클라우드 서비스에 맞춘 배포 옵션
- **지속적 업데이트**: 멀티모달 등 최신 AI 기능 지속 반영

엔터프라이즈 문의: **business@nextchat.dev**

## 🖼️ 스크린샷

![설정](./docs/images/settings.png)
![기타](./docs/images/more.png)

## 주요 기능 소개

- Vercel에서 원클릭 무료 배포 (1분 내 완성)
- 모든 OS(Linux/Windows/MacOS)에서 사용 가능한 클라이언트 (~5MB) [지금 다운 받기](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
- 자체 LLM 서버와 완벽 호환. [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) 또는 [LocalAI](https://github.com/go-skynet/LocalAI)와 함께 사용하는 것을 추천
- 개인 정보 보호: 모든 대화 기록은 브라우저에만 저장
- Markdown 지원: LaTex, Mermaid, 코드 하이라이팅 등
- 반응형 디자인, 다크 모드, PWA 지원
- 빠른 초기 로딩 속도 (~100kb), 스트리밍 응답
- 프롬프트 템플릿 생성/공유/디버깅 지원 (v2)
- v2: 프롬프트 템플릿 기반 도구 생성, 공유, 디버깅 가능
- 고급 프롬프트 내장 [awesome-chatgpt-prompts-zh](https://github.com/PlexPt/awesome-chatgpt-prompts-zh) and [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts)
- 긴 대화 내용 자동 압축 저장으로 토큰 절약
- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia

<div align="center">
   
![主界面](./docs/images/cover.png)

</div>

## 개발 로드맵

- [x] 시스템 프롬프트: 사용자가 정의한 프롬프트를 시스템 프롬프트로 고정하기 [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)
- [x] 사용자 프롬프트: 사용자 정의 프롬프트를 편집 및 저장하여 리스트로 관리 가능
- [x] 프롬프트 템플릿: 사전 정의된 인컨텍스트 프롬프트로 새 채팅 생성 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993)
- [x] 이미지로 공유하거나 ShareGPT로 공유 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741)
- [x] Tauri 기반 데스크톱 앱
- [x] 자체 모델 호스팅: [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), [LocalAI](https://github.com/go-skynet/LocalAI) 등 서버 배포 모델들과 완벽 호환 (llama, gpt4all, rwkv, vicuna, koala, gpt4all-j, cerebras, falcon, dolly 등)
- [x] 아티팩트: 생성된 콘텐츠 및 웹페이지를 별도 창으로 미리보기, 복사, 공유 가능 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
- [x] 플러그인: 웹 검색, 계산기, 기타 외부 API 기능 지원 [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] 실시간 채팅 지원 [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- [ ] 로컬 지식 베이스 지원 예정

## 🚀 최근 업데이트

- 🚀 v2.15.8 실시간 채팅 지원 [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 Tauri 기반 LLM API 호출 기능 추가 → 보안 강화 [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 플러그인 기능 추가 → [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 아티팩트 및 Stable Diffusion 기능 추가
- 🚀 v2.10.1 Google Gemini Pro 모델 지원
- 🚀 v2.9.11 Azure Endpoint 사용 가능
- 🚀 v2.8 모든 플랫폼에서 실행 가능한 클라이언트 출시
- 🚀 v2.7 대화 내용을 이미지로, 또는 ShareGPT로 공유 가능
- 🚀 v2.0 릴리즈: 프롬프트 템플릿 생성 및 아이디어 구현 가능! → [ChatGPT Prompt Engineering Tips](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/)

## 시작하기

1. [OpenAI API 키](https://platform.openai.com/account/api-keys)를 발급받습니다.
2. 
   [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) 버튼을 클릭해 Vercel에 배포합니다. `CODE`는 페이지 비밀번호라는 점을 기억하세요.

3. Enjoy :)

## FAQ

[FAQ](./docs/faq-ko.md)

## 최신 상태 유지 (Keep Updated)

Vercel로 배포한 경우, "Updates Available" 메시지가 계속 나타날 수 있습니다. 이는 프로젝트를 포크하지 않고 새로 생성했기 때문입니다.

다음 절차에 따라 다시 배포를 권장합니다:

1. 기존 레포 삭제
2. 우측 상단 "Fork" 버튼 클릭 → 포크 생성
3. 포크된 프로젝트를 다시 Vercel에 배포  
   → [자세한 튜토리얼 보기](./docs/vercel-ko.md)

### 자동 업데이트 활성화 (Enable Automatic Updates)

> Upstream Sync 오류 발생 시, [수동으로 코드 업데이트](./README_KO.md#manually-updating-code)하세요.

프로젝트 포크 후에는 GitHub의 제약으로 인해 Actions 페이지에서 아래 항목들을 수동으로 활성화해야 합니다:

- `Workflows`
- `Upstream Sync Action`

이후 매 시간 자동으로 업데이트됩니다:

![자동 업데이트 활성화](./docs/images/enable-actions.jpg)  
![업스트림 동기화 활성화](./docs/images/enable-actions-sync.jpg)

### 수동 업데이트 방법 (Manually Updating Code)

즉시 업데이트가 필요한 경우, [깃헙 문서](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork)를 참고해 포크된 프로젝트를 upstream code와 동기화하세요.

릴리스 알림을 원하시면 star 또는 watch를 눌러주세요.

## 접근 비밀번호 설정 (Access Password)

이 프로젝트는 제한된 접근 제어를 제공합니다.  
Vercel 환경 변수에 `CODE`를 다음 형식으로 추가하세요. value는 ,를 통해 구분된 비밀번호여야 합니다.:

```
code1,code2,code3
```

수정 후 반드시 다시 배포해야 적용됩니다.

## 환경 변수 (Environment Variables)

### `CODE` (선택 사항)

접속 비밀번호. 쉼표로 구분합니다.

### `OPENAI_API_KEY` (필수)

당신의 OpenAI API 키, 여러 개를 사용하려면 쉼표로 연결합니다.

### `BASE_URL` (선택 사항)

> 기본값: `https://api.openai.com`

> 예시: `http://your-openai-proxy.com`

OpenAI API 요청의 기본 URL을 재정의합니다.

### `OPENAI_ORG_ID` (선택 사항)

OpenAI organization ID를 지정합니다.

### `AZURE_URL` (선택 사항)

> 예시: https://{azure-resource-url}/openai

Azure 배포 URL입니다.

### `AZURE_API_KEY` (선택 사항)

Azure API 키입니다.

### `AZURE_API_VERSION` (선택 사항)

Azure API 버전입니다. [Azure 문서](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)에서 확인할 수 있습니다.

### `GOOGLE_API_KEY` (선택 사항)

Google Gemini Pro API 키입니다.

### `GOOGLE_URL` (선택 사항)

Google Gemini Pro API URL입니다.

### `ANTHROPIC_API_KEY` (선택 사항)

Anthropic Claude API 키입니다.

### `ANTHROPIC_API_VERSION` (선택 사항)

Anthropic Claude API 버전입니다.

### `ANTHROPIC_URL` (선택 사항)

Anthropic Claude API URL입니다.

### `BAIDU_API_KEY` (선택 사항)

Baidu API 키입니다.

### `BAIDU_SECRET_KEY` (선택 사항)

Baidu Secret 키입니다.

### `BAIDU_URL` (선택 사항)

Baidu API URL입니다.

### `BYTEDANCE_API_KEY` (선택 사항)

ByteDance API 키입니다.

### `BYTEDANCE_URL` (선택 사항)

ByteDance API URL입니다.

### `ALIBABA_API_KEY` (선택 사항)

Alibaba Cloud API 키입니다.

### `ALIBABA_URL` (선택 사항)

Alibaba Cloud API URL입니다.

### `IFLYTEK_URL` (선택 사항)

iflytek API URL입니다.

### `IFLYTEK_API_KEY` (선택 사항)

iflytek API 키입니다.

### `IFLYTEK_API_SECRET` (선택 사항)

iflytek API 시크릿입니다.

### `CHATGLM_API_KEY` (선택 사항)

ChatGLM API 키입니다.

### `CHATGLM_URL` (선택 사항)

ChatGLM API URL입니다.

### `DEEPSEEK_API_KEY` (선택 사항)

DeepSeek API 키입니다.

### `DEEPSEEK_URL` (선택 사항)

DeepSeek API URL입니다.

### `HIDE_USER_API_KEY` (선택 사항)

> 기본값: 비어 있음

사용자가 자신의 API 키를 입력하지 못하게 하려면 이 값을 1로 설정하세요.

### `DISABLE_GPT4` (선택 사항)

> 기본값: 비어 있음

사용자가 GPT-4를 사용하지 못하게 하려면 이 값을 1로 설정하세요.

### `ENABLE_BALANCE_QUERY` (선택 사항)

> 기본값: 비어 있음

사용자가 쿼리 잔액을 조회할 수 있도록 하려면 이 값을 1로 설정하세요.

### `DISABLE_FAST_LINK` (선택 사항)

> 기본값: 비어 있음

URL에서 설정을 파싱하는 기능을 비활성화하려면 이 값을 1로 설정하세요.

### `CUSTOM_MODELS` (선택 사항)

> 기본값: 비어 있음  
> 예시: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo`  
이는 `llama`, `claude-2`를 모델 리스트에 추가하고, `gpt-3.5-turbo`를 제거하며, `gpt-4-1106-preview`를 `gpt-4-turbo`로 표시합니다.

사용자 지정 모델 제어 시 `+`는 추가, `-`는 제거, `이름=표시이름`은 모델명 커스터마이징을 의미합니다. 쉼표로 구분하세요.

- `-all`은 기본 모델을 모두 비활성화  
- `+all`은 기본 모델을 모두 활성화

Azure 용법 예시: `modelName@Azure=deploymentName` → 배포 이름을 커스터마이징 가능  
> 예시: `+gpt-3.5-turbo@Azure=gpt35` → 리스트에 `gpt35(Azure)` 표시됨  
> Azure 모델만 사용할 경우: `-all,+gpt-3.5-turbo@Azure=gpt35`

ByteDance 용법 예시: `modelName@bytedance=deploymentName`  
> 예시: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` → `Doubao-lite-4k(ByteDance)`로 표시됨

### `DEFAULT_MODEL` (선택 사항)

기본 모델을 변경합니다.

### `VISION_MODELS` (선택 사항)

> 기본값: 비어 있음  
> 예시: `gpt-4-vision,claude-3-opus,my-custom-model`  
위의 모델들에 시각 기능을 부여합니다 (기본적으로 `"vision"`, `"claude-3"`, `"gemini-1.5"` 키워드를 포함한 모델은 자동 인식됨). 기본 모델 외에도 모델을 추가할 수 있습니다. 쉼표로 구분하세요.

### `WHITE_WEBDAV_ENDPOINTS` (선택 사항)

접속 허용할 WebDAV 서비스 주소를 늘리고자 할 때 사용합니다.

- 각 주소는 완전한 endpoint 여야 함: `https://xxxx/yyy`  
- 여러 주소는 `,`로 구분

### `DEFAULT_INPUT_TEMPLATE` (선택 사항)

설정 메뉴의 사용자 입력 전처리 구성 항목 초기화 시 사용할 기본 템플릿을 설정합니다.

### `STABILITY_API_KEY` (선택 사항)

Stability API 키입니다.

### `STABILITY_URL` (선택 사항)

Stability API URL을 커스터마이징합니다.

### `ENABLE_MCP` (선택 사항)

MCP (Model Context Protocol) 기능을 활성화합니다.

### `SILICONFLOW_API_KEY` (선택 사항)

SiliconFlow API 키입니다.

### `SILICONFLOW_URL` (선택 사항)

SiliconFlow API URL입니다.

### `AI302_API_KEY` (선택 사항)

302.AI API 키입니다.

### `AI302_URL` (선택 사항)

302.AI API URL입니다.

## 요구 사항 (Requirements)

NodeJS >= 18, Docker >= 20

## 개발 (Development)

[![Gitpod에서 열기](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

개발을 시작하기 전에 프로젝트 루트에 `.env.local` 파일을 만들고, 아래와 같이 API 키를 입력하세요:

```
OPENAI_API_KEY=<여기에 API 키 입력>

# OpenAI 서비스를 사용할 수 없는 경우 아래 BASE_URL 사용
BASE_URL=https://chatgpt1.nextweb.fun/api/proxy
```

### 로컬 개발 실행

```shell
# 1. Node.js와 Yarn을 먼저 설치
# 2. `.env.local` 파일에 환경 변수 설정
# 3. 실행
yarn install
yarn dev
```

## 배포 (Deployment)

### Docker (권장)

```shell
docker pull yidadaa/chatgpt-next-web

docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   yidadaa/chatgpt-next-web
```

서비스에 프록시를 사용하려면:

```shell
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   -e PROXY_URL=http://localhost:7890 \
   yidadaa/chatgpt-next-web
```

프록시에 인증이 필요한 경우:

```shell
-e PROXY_URL="http://127.0.0.1:7890 user pass"
```

MCP를 활성화하려면:

```
docker run -d -p 3000:3000 \
   -e OPENAI_API_KEY=sk-xxxx \
   -e CODE=your-password \
   -e ENABLE_MCP=true \
   yidadaa/chatgpt-next-web
```

### 로컬 배포

콘솔에서 다음 명령을 실행하세요.

```shell
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```

⚠️ 참고: 설치 중에 문제가 발생하면 Docker 배포를 사용하세요.

## 채팅 기록 동기화 (UpStash)

| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)

## 문서 (Documentation)

> 더 많은 문서는 [docs](./docs) 디렉토리를 참고하세요.

- [Cloudflare 배포 가이드 (폐기됨)](./docs/cloudflare-pages-ko.md)
- [자주 묻는 질문](./docs/faq-ko.md)
- [새 번역 추가 방법](./docs/translation.md)
- [Vercel 사용법 (중문)](./docs/vercel-cn.md)
- [사용자 매뉴얼 (중문, 작성 중)](./docs/user-manual-cn.md)

## 번역 (Translation)

새로운 번역을 추가하고 싶다면, [이 문서](./docs/translation.md)를 읽어보세요.

## 후원 (Donation)

[Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa)

## 특별 감사 (Special Thanks)

### 기여자 (Contributors)

<a href="https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/graphs/contributors">
  <img src="https://contrib.rocks/image?repo=ChatGPTNextWeb/ChatGPT-Next-Web" />
</a>

## 라이선스 (LICENSE)

[MIT](https://opensource.org/license/mit/)


================================================
FILE: app/api/302ai.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  AI302_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[302.AI Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider["302.AI"]);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[302.AI] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath["302.AI"], "");

  let baseUrl = serverConfig.ai302Url || AI302_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider["302.AI"] as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[302.AI] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/[provider]/[...path]/route.ts
================================================
import { ApiPath } from "@/app/constant";
import { NextRequest } from "next/server";
import { handle as openaiHandler } from "../../openai";
import { handle as azureHandler } from "../../azure";
import { handle as googleHandler } from "../../google";
import { handle as anthropicHandler } from "../../anthropic";
import { handle as baiduHandler } from "../../baidu";
import { handle as bytedanceHandler } from "../../bytedance";
import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability";
import { handle as iflytekHandler } from "../../iflytek";
import { handle as deepseekHandler } from "../../deepseek";
import { handle as siliconflowHandler } from "../../siliconflow";
import { handle as xaiHandler } from "../../xai";
import { handle as chatglmHandler } from "../../glm";
import { handle as proxyHandler } from "../../proxy";
import { handle as ai302Handler } from "../../302ai";

async function handle(
  req: NextRequest,
  { params }: { params: { provider: string; path: string[] } },
) {
  const apiPath = `/api/${params.provider}`;
  console.log(`[${params.provider} Route] params `, params);
  switch (apiPath) {
    case ApiPath.Azure:
      return azureHandler(req, { params });
    case ApiPath.Google:
      return googleHandler(req, { params });
    case ApiPath.Anthropic:
      return anthropicHandler(req, { params });
    case ApiPath.Baidu:
      return baiduHandler(req, { params });
    case ApiPath.ByteDance:
      return bytedanceHandler(req, { params });
    case ApiPath.Alibaba:
      return alibabaHandler(req, { params });
    // case ApiPath.Tencent: using "/api/tencent"
    case ApiPath.Moonshot:
      return moonshotHandler(req, { params });
    case ApiPath.Stability:
      return stabilityHandler(req, { params });
    case ApiPath.Iflytek:
      return iflytekHandler(req, { params });
    case ApiPath.DeepSeek:
      return deepseekHandler(req, { params });
    case ApiPath.XAI:
      return xaiHandler(req, { params });
    case ApiPath.ChatGLM:
      return chatglmHandler(req, { params });
    case ApiPath.SiliconFlow:
      return siliconflowHandler(req, { params });
    case ApiPath.OpenAI:
      return openaiHandler(req, { params });
    case ApiPath["302.AI"]:
      return ai302Handler(req, { params });
    default:
      return proxyHandler(req, { params });
  }
}

export const GET = handle;
export const POST = handle;

export const runtime = "edge";
export const preferredRegion = [
  "arn1",
  "bom1",
  "cdg1",
  "cle1",
  "cpt1",
  "dub1",
  "fra1",
  "gru1",
  "hnd1",
  "iad1",
  "icn1",
  "kix1",
  "lhr1",
  "pdx1",
  "sfo1",
  "sin1",
  "syd1",
];


================================================
FILE: app/api/alibaba.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  ALIBABA_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Alibaba Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Qwen);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Alibaba] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, "");

  let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
      "X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.Alibaba as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[Alibaba] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/anthropic.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  ANTHROPIC_BASE_URL,
  Anthropic,
  ApiPath,
  ServiceProvider,
  ModelProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";

const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Anthropic Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const subpath = params.path.join("/");

  if (!ALLOWD_PATH.has(subpath)) {
    console.log("[Anthropic Route] forbidden path ", subpath);
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + subpath,
      },
      {
        status: 403,
      },
    );
  }

  const authResult = auth(req, ModelProvider.Claude);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Anthropic] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

const serverConfig = getServerSideConfig();

async function request(req: NextRequest) {
  const controller = new AbortController();

  let authHeaderName = "x-api-key";
  let authValue =
    req.headers.get(authHeaderName) ||
    req.headers.get("Authorization")?.replaceAll("Bearer ", "").trim() ||
    serverConfig.anthropicApiKey ||
    "";

  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, "");

  let baseUrl =
    serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  // try rebuild url, when using cloudflare ai gateway in server
  const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}${path}`);

  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      "Cache-Control": "no-store",
      "anthropic-dangerous-direct-browser-access": "true",
      [authHeaderName]: authValue,
      "anthropic-version":
        req.headers.get("anthropic-version") ||
        serverConfig.anthropicApiVersion ||
        Anthropic.Vision,
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.Anthropic as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[Anthropic] filter`, e);
    }
  }
  // console.log("[Anthropic request]", fetchOptions.headers, req.method);
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // console.log(
    //   "[Anthropic response]",
    //   res.status,
    //   "   ",
    //   res.headers,
    //   res.url,
    // );
    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/artifacts/route.ts
================================================
import md5 from "spark-md5";
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";

async function handle(req: NextRequest, res: NextResponse) {
  const serverConfig = getServerSideConfig();
  const storeUrl = () =>
    `https://api.cloudflare.com/client/v4/accounts/${serverConfig.cloudflareAccountId}/storage/kv/namespaces/${serverConfig.cloudflareKVNamespaceId}`;
  const storeHeaders = () => ({
    Authorization: `Bearer ${serverConfig.cloudflareKVApiKey}`,
  });
  if (req.method === "POST") {
    const clonedBody = await req.text();
    const hashedCode = md5.hash(clonedBody).trim();
    const body: {
      key: string;
      value: string;
      expiration_ttl?: number;
    } = {
      key: hashedCode,
      value: clonedBody,
    };
    try {
      const ttl = parseInt(serverConfig.cloudflareKVTTL as string);
      if (ttl > 60) {
        body["expiration_ttl"] = ttl;
      }
    } catch (e) {
      console.error(e);
    }
    const res = await fetch(`${storeUrl()}/bulk`, {
      headers: {
        ...storeHeaders(),
        "Content-Type": "application/json",
      },
      method: "PUT",
      body: JSON.stringify([body]),
    });
    const result = await res.json();
    console.log("save data", result);
    if (result?.success) {
      return NextResponse.json(
        { code: 0, id: hashedCode, result },
        { status: res.status },
      );
    }
    return NextResponse.json(
      { error: true, msg: "Save data error" },
      { status: 400 },
    );
  }
  if (req.method === "GET") {
    const id = req?.nextUrl?.searchParams?.get("id");
    const res = await fetch(`${storeUrl()}/values/${id}`, {
      headers: storeHeaders(),
      method: "GET",
    });
    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: res.headers,
    });
  }
  return NextResponse.json(
    { error: true, msg: "Invalid request" },
    { status: 400 },
  );
}

export const POST = handle;
export const GET = handle;

export const runtime = "edge";


================================================
FILE: app/api/auth.ts
================================================
import { NextRequest } from "next/server";
import { getServerSideConfig } from "../config/server";
import md5 from "spark-md5";
import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant";

function getIP(req: NextRequest) {
  let ip = req.ip ?? req.headers.get("x-real-ip");
  const forwardedFor = req.headers.get("x-forwarded-for");

  if (!ip && forwardedFor) {
    ip = forwardedFor.split(",").at(0) ?? "";
  }

  return ip;
}

function parseApiKey(bearToken: string) {
  const token = bearToken.trim().replaceAll("Bearer ", "").trim();
  const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);

  return {
    accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
    apiKey: isApiKey ? token : "",
  };
}

export function auth(req: NextRequest, modelProvider: ModelProvider) {
  const authToken = req.headers.get("Authorization") ?? "";

  // check if it is openai api key or user token
  const { accessCode, apiKey } = parseApiKey(authToken);

  const hashedCode = md5.hash(accessCode ?? "").trim();

  const serverConfig = getServerSideConfig();
  console.log("[Auth] allowed hashed codes: ", [...serverConfig.codes]);
  console.log("[Auth] got access code:", accessCode);
  console.log("[Auth] hashed access code:", hashedCode);
  console.log("[User IP] ", getIP(req));
  console.log("[Time] ", new Date().toLocaleString());

  if (serverConfig.needCode && !serverConfig.codes.has(hashedCode) && !apiKey) {
    return {
      error: true,
      msg: !accessCode ? "empty access code" : "wrong access code",
    };
  }

  if (serverConfig.hideUserApiKey && !!apiKey) {
    return {
      error: true,
      msg: "you are not allowed to access with your own api key",
    };
  }

  // if user does not provide an api key, inject system api key
  if (!apiKey) {
    const serverConfig = getServerSideConfig();

    // const systemApiKey =
    //   modelProvider === ModelProvider.GeminiPro
    //     ? serverConfig.googleApiKey
    //     : serverConfig.isAzure
    //     ? serverConfig.azureApiKey
    //     : serverConfig.apiKey;

    let systemApiKey: string | undefined;

    switch (modelProvider) {
      case ModelProvider.Stability:
        systemApiKey = serverConfig.stabilityApiKey;
        break;
      case ModelProvider.GeminiPro:
        systemApiKey = serverConfig.googleApiKey;
        break;
      case ModelProvider.Claude:
        systemApiKey = serverConfig.anthropicApiKey;
        break;
      case ModelProvider.Doubao:
        systemApiKey = serverConfig.bytedanceApiKey;
        break;
      case ModelProvider.Ernie:
        systemApiKey = serverConfig.baiduApiKey;
        break;
      case ModelProvider.Qwen:
        systemApiKey = serverConfig.alibabaApiKey;
        break;
      case ModelProvider.Moonshot:
        systemApiKey = serverConfig.moonshotApiKey;
        break;
      case ModelProvider.Iflytek:
        systemApiKey =
          serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
        break;
      case ModelProvider.DeepSeek:
        systemApiKey = serverConfig.deepseekApiKey;
        break;
      case ModelProvider.XAI:
        systemApiKey = serverConfig.xaiApiKey;
        break;
      case ModelProvider.ChatGLM:
        systemApiKey = serverConfig.chatglmApiKey;
        break;
      case ModelProvider.SiliconFlow:
        systemApiKey = serverConfig.siliconFlowApiKey;
        break;
      case ModelProvider.GPT:
      default:
        if (req.nextUrl.pathname.includes("azure/deployments")) {
          systemApiKey = serverConfig.azureApiKey;
        } else {
          systemApiKey = serverConfig.apiKey;
        }
    }

    if (systemApiKey) {
      console.log("[Auth] use system api key");
      req.headers.set("Authorization", `Bearer ${systemApiKey}`);
    } else {
      console.log("[Auth] admin did not provide an api key");
    }
  } else {
    console.log("[Auth] use user api key");
  }

  return {
    error: false,
  };
}


================================================
FILE: app/api/azure.ts
================================================
import { ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { requestOpenai } from "./common";

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Azure Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const subpath = params.path.join("/");

  const authResult = auth(req, ModelProvider.GPT);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    return await requestOpenai(req);
  } catch (e) {
    console.error("[Azure] ", e);
    return NextResponse.json(prettyObject(e));
  }
}


================================================
FILE: app/api/baidu.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  BAIDU_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Baidu Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Ernie);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  if (!serverConfig.baiduApiKey || !serverConfig.baiduSecretKey) {
    return NextResponse.json(
      {
        error: true,
        message: `missing BAIDU_API_KEY or BAIDU_SECRET_KEY in server env vars`,
      },
      {
        status: 401,
      },
    );
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Baidu] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, "");

  let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const { access_token } = await getAccessToken(
    serverConfig.baiduApiKey as string,
    serverConfig.baiduSecretKey as string,
  );
  const fetchUrl = `${baseUrl}${path}?access_token=${access_token}`;

  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.Baidu as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[Baidu] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/bytedance.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  BYTEDANCE_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[ByteDance Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Doubao);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[ByteDance] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, "");

  let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;

  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.ByteDance as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[ByteDance] filter`, e);
    }
  }

  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/common.ts
================================================
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
import { getModelProvider, isModelNotavailableInServer } from "../utils/model";

const serverConfig = getServerSideConfig();

export async function requestOpenai(req: NextRequest) {
  const controller = new AbortController();

  const isAzure = req.nextUrl.pathname.includes("azure/deployments");

  var authValue,
    authHeaderName = "";
  if (isAzure) {
    authValue =
      req.headers
        .get("Authorization")
        ?.trim()
        .replaceAll("Bearer ", "")
        .trim() ?? "";

    authHeaderName = "api-key";
  } else {
    authValue = req.headers.get("Authorization") ?? "";
    authHeaderName = "Authorization";
  }

  let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", "");

  let baseUrl =
    (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  if (isAzure) {
    const azureApiVersion =
      req?.nextUrl?.searchParams?.get("api-version") ||
      serverConfig.azureApiVersion;
    baseUrl = baseUrl.split("/deployments").shift() as string;
    path = `${req.nextUrl.pathname.replaceAll(
      "/api/azure/",
      "",
    )}?api-version=${azureApiVersion}`;

    // Forward compatibility:
    // if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL
    // then using default '{deploy-id}'
    if (serverConfig.customModels && serverConfig.azureUrl) {
      const modelName = path.split("/")[1];
      let realDeployName = "";
      serverConfig.customModels
        .split(",")
        .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName))
        .forEach((m) => {
          const [fullName, displayName] = m.split("=");
          const [_, providerName] = getModelProvider(fullName);
          if (providerName === "azure" && !displayName) {
            const [_, deployId] = (serverConfig?.azureUrl ?? "").split(
              "deployments/",
            );
            if (deployId) {
              realDeployName = deployId;
            }
          }
        });
      if (realDeployName) {
        console.log("[Replace with DeployId", realDeployName);
        path = path.replaceAll(modelName, realDeployName);
      }
    }
  }

  const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
  console.log("fetchUrl", fetchUrl);
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      "Cache-Control": "no-store",
      [authHeaderName]: authValue,
      ...(serverConfig.openaiOrgId && {
        "OpenAI-Organization": serverConfig.openaiOrgId,
      }),
    },
    method: req.method,
    body: req.body,
    // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse gpt4 request
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          [
            ServiceProvider.OpenAI,
            ServiceProvider.Azure,
            jsonBody?.model as string, // support provider-unspecified model
          ],
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error("[OpenAI] gpt4 filter", e);
    }
  }

  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // Extract the OpenAI-Organization header from the response
    const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");

    // Check if serverConfig.openaiOrgId is defined and not an empty string
    if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
      // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
      console.log("[Org ID]", openaiOrganizationHeader);
    } else {
      console.log("[Org ID] is not set up.");
    }

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
    // Also, this is to prevent the header from being sent to the client
    if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") {
      newHeaders.delete("OpenAI-Organization");
    }

    // The latest version of the OpenAI API forced the content-encoding to be "br" in json response
    // So if the streaming is disabled, we need to remove the content-encoding header
    // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
    // The browser will try to decode the response with brotli and fail
    newHeaders.delete("content-encoding");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/config/route.ts
================================================
import { NextResponse } from "next/server";

import { getServerSideConfig } from "../../config/server";

const serverConfig = getServerSideConfig();

// Danger! Do not hard code any secret value here!
// 警告!不要在这里写入任何敏感信息!
const DANGER_CONFIG = {
  needCode: serverConfig.needCode,
  hideUserApiKey: serverConfig.hideUserApiKey,
  disableGPT4: serverConfig.disableGPT4,
  hideBalanceQuery: serverConfig.hideBalanceQuery,
  disableFastLink: serverConfig.disableFastLink,
  customModels: serverConfig.customModels,
  defaultModel: serverConfig.defaultModel,
  visionModels: serverConfig.visionModels,
};

declare global {
  type DangerConfig = typeof DANGER_CONFIG;
}

async function handle() {
  return NextResponse.json(DANGER_CONFIG);
}

export const GET = handle;
export const POST = handle;

export const runtime = "edge";


================================================
FILE: app/api/deepseek.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  DEEPSEEK_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[DeepSeek Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.DeepSeek);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[DeepSeek] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");

  let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.DeepSeek as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[DeepSeek] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/glm.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  CHATGLM_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[GLM Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.ChatGLM);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[GLM] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, "");

  let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  console.log("[Fetch Url] ", fetchUrl);
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.ChatGLM as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[GLM] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/google.ts
================================================
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { getServerSideConfig } from "@/app/config/server";
import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { provider: string; path: string[] } },
) {
  console.log("[Google Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.GeminiPro);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  const bearToken =
    req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || "";
  const token = bearToken.trim().replaceAll("Bearer ", "").trim();

  const apiKey = token ? token : serverConfig.googleApiKey;

  if (!apiKey) {
    return NextResponse.json(
      {
        error: true,
        message: `missing GOOGLE_API_KEY in server env vars`,
      },
      {
        status: 401,
      },
    );
  }
  try {
    const response = await request(req, apiKey);
    return response;
  } catch (e) {
    console.error("[Google] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

export const GET = handle;
export const POST = handle;

export const runtime = "edge";
export const preferredRegion = [
  "bom1",
  "cle1",
  "cpt1",
  "gru1",
  "hnd1",
  "iad1",
  "icn1",
  "kix1",
  "pdx1",
  "sfo1",
  "sin1",
  "syd1",
];

async function request(req: NextRequest, apiKey: string) {
  const controller = new AbortController();

  let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;

  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, "");

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );
  const fetchUrl = `${baseUrl}${path}${
    req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : ""
  }`;

  console.log("[Fetch Url] ", fetchUrl);
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      "Cache-Control": "no-store",
      "x-goog-api-key":
        req.headers.get("x-goog-api-key") ||
        (req.headers.get("Authorization") ?? "").replace("Bearer ", ""),
    },
    method: req.method,
    body: req.body,
    // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  try {
    const res = await fetch(fetchUrl, fetchOptions);
    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/iflytek.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  IFLYTEK_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Iflytek Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Iflytek);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Iflytek] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // iflytek use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, "");

  let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.Iflytek as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[Iflytek] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/moonshot.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  MOONSHOT_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Moonshot Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Moonshot);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Moonshot] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, "");

  let baseUrl = serverConfig.moonshotUrl || MOONSHOT_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.Moonshot as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[Moonshot] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/openai.ts
================================================
import { type OpenAIListModelResponse } from "@/app/client/platforms/openai";
import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, OpenaiPath } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { requestOpenai } from "./common";

const ALLOWED_PATH = new Set(Object.values(OpenaiPath));

function getModels(remoteModelRes: OpenAIListModelResponse) {
  const config = getServerSideConfig();

  if (config.disableGPT4) {
    remoteModelRes.data = remoteModelRes.data.filter(
      (m) =>
        !(
          m.id.startsWith("gpt-4") ||
          m.id.startsWith("chatgpt-4o") ||
          m.id.startsWith("o1") ||
          m.id.startsWith("o3")
        ) || m.id.startsWith("gpt-4o-mini"),
    );
  }

  return remoteModelRes;
}

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[OpenAI Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const subpath = params.path.join("/");

  if (!ALLOWED_PATH.has(subpath)) {
    console.log("[OpenAI Route] forbidden path ", subpath);
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + subpath,
      },
      {
        status: 403,
      },
    );
  }

  const authResult = auth(req, ModelProvider.GPT);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await requestOpenai(req);

    // list models
    if (subpath === OpenaiPath.ListModelPath && response.status === 200) {
      const resJson = (await response.json()) as OpenAIListModelResponse;
      const availableModels = getModels(resJson);
      return NextResponse.json(availableModels, {
        status: response.status,
      });
    }

    return response;
  } catch (e) {
    console.error("[OpenAI] ", e);
    return NextResponse.json(prettyObject(e));
  }
}


================================================
FILE: app/api/proxy.ts
================================================
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Proxy Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }
  const serverConfig = getServerSideConfig();

  // remove path params from searchParams
  req.nextUrl.searchParams.delete("path");
  req.nextUrl.searchParams.delete("provider");

  const subpath = params.path.join("/");
  const fetchUrl = `${req.headers.get(
    "x-base-url",
  )}/${subpath}?${req.nextUrl.searchParams.toString()}`;
  const skipHeaders = ["connection", "host", "origin", "referer", "cookie"];
  const headers = new Headers(
    Array.from(req.headers.entries()).filter((item) => {
      if (
        item[0].indexOf("x-") > -1 ||
        item[0].indexOf("sec-") > -1 ||
        skipHeaders.includes(item[0])
      ) {
        return false;
      }
      return true;
    }),
  );
  // if dalle3 use openai api key
    const baseUrl = req.headers.get("x-base-url");
    if (baseUrl?.includes("api.openai.com")) {
      if (!serverConfig.apiKey) {
        return NextResponse.json(
          { error: "OpenAI API key not configured" },
          { status: 500 },
        );
      }
      headers.set("Authorization", `Bearer ${serverConfig.apiKey}`);
    }

  const controller = new AbortController();
  const fetchOptions: RequestInit = {
    headers,
    method: req.method,
    body: req.body,
    // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  try {
    const res = await fetch(fetchUrl, fetchOptions);
    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    // The latest version of the OpenAI API forced the content-encoding to be "br" in json response
    // So if the streaming is disabled, we need to remove the content-encoding header
    // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
    // The browser will try to decode the response with brotli and fail
    newHeaders.delete("content-encoding");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/siliconflow.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  SILICONFLOW_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[SiliconFlow Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.SiliconFlow);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[SiliconFlow] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.SiliconFlow, "");

  let baseUrl = serverConfig.siliconFlowUrl || SILICONFLOW_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.SiliconFlow as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[SiliconFlow] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/stability.ts
================================================
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant";
import { auth } from "@/app/api/auth";

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Stability] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const controller = new AbortController();

  const serverConfig = getServerSideConfig();

  let baseUrl = serverConfig.stabilityUrl || STABILITY_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  let path = `${req.nextUrl.pathname}`.replaceAll("/api/stability/", "");

  console.log("[Stability Proxy] ", path);
  console.log("[Stability Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const authResult = auth(req, ModelProvider.Stability);

  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  const bearToken = req.headers.get("Authorization") ?? "";
  const token = bearToken.trim().replaceAll("Bearer ", "").trim();

  const key = token ? token : serverConfig.stabilityApiKey;

  if (!key) {
    return NextResponse.json(
      {
        error: true,
        message: `missing STABILITY_API_KEY in server env vars`,
      },
      {
        status: 401,
      },
    );
  }

  const fetchUrl = `${baseUrl}/${path}`;
  console.log("[Stability Url] ", fetchUrl);
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": req.headers.get("Content-Type") || "multipart/form-data",
      Accept: req.headers.get("Accept") || "application/json",
      Authorization: `Bearer ${key}`,
    },
    method: req.method,
    body: req.body,
    // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  try {
    const res = await fetch(fetchUrl, fetchOptions);
    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");
    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/tencent/route.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { getHeader } from "@/app/utils/tencent";

const serverConfig = getServerSideConfig();

async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[Tencent Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.Hunyuan);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[Tencent] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

export const GET = handle;
export const POST = handle;

export const runtime = "edge";
export const preferredRegion = [
  "arn1",
  "bom1",
  "cdg1",
  "cle1",
  "cpt1",
  "dub1",
  "fra1",
  "gru1",
  "hnd1",
  "iad1",
  "icn1",
  "kix1",
  "lhr1",
  "pdx1",
  "sfo1",
  "sin1",
  "syd1",
];

async function request(req: NextRequest) {
  const controller = new AbortController();

  let baseUrl = serverConfig.tencentUrl || TENCENT_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = baseUrl;

  const body = await req.text();
  const headers = await getHeader(
    body,
    serverConfig.tencentSecretId as string,
    serverConfig.tencentSecretKey as string,
  );
  const fetchOptions: RequestInit = {
    headers,
    method: req.method,
    body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/api/upstash/[action]/[...key]/route.ts
================================================
import { NextRequest, NextResponse } from "next/server";

async function handle(
  req: NextRequest,
  { params }: { params: { action: string; key: string[] } },
) {
  const requestUrl = new URL(req.url);
  const endpoint = requestUrl.searchParams.get("endpoint");

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }
  const [...key] = params.key;
  // only allow to request to *.upstash.io
  if (!endpoint || !new URL(endpoint).hostname.endsWith(".upstash.io")) {
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + params.key.join("/"),
      },
      {
        status: 403,
      },
    );
  }

  // only allow upstash get and set method
  if (params.action !== "get" && params.action !== "set") {
    console.log("[Upstash Route] forbidden action ", params.action);
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + params.action,
      },
      {
        status: 403,
      },
    );
  }

  const targetUrl = `${endpoint}/${params.action}/${params.key.join("/")}`;

  const method = req.method;
  const shouldNotHaveBody = ["get", "head"].includes(
    method?.toLowerCase() ?? "",
  );

  const fetchOptions: RequestInit = {
    headers: {
      authorization: req.headers.get("authorization") ?? "",
    },
    body: shouldNotHaveBody ? null : req.body,
    method,
    // @ts-ignore
    duplex: "half",
  };

  console.log("[Upstash Proxy]", targetUrl, fetchOptions);
  const fetchResult = await fetch(targetUrl, fetchOptions);

  console.log("[Any Proxy]", targetUrl, {
    status: fetchResult.status,
    statusText: fetchResult.statusText,
  });

  return fetchResult;
}

export const POST = handle;
export const GET = handle;
export const OPTIONS = handle;

export const runtime = "edge";


================================================
FILE: app/api/webdav/[...path]/route.ts
================================================
import { NextRequest, NextResponse } from "next/server";
import { STORAGE_KEY, internalAllowedWebDavEndpoints } from "../../../constant";
import { getServerSideConfig } from "@/app/config/server";

const config = getServerSideConfig();

const mergedAllowedWebDavEndpoints = [
  ...internalAllowedWebDavEndpoints,
  ...config.allowedWebDavEndpoints,
].filter((domain) => Boolean(domain.trim()));

const normalizeUrl = (url: string) => {
  try {
    return new URL(url);
  } catch (err) {
    return null;
  }
};

async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }
  const folder = STORAGE_KEY;
  const fileName = `${folder}/backup.json`;

  const requestUrl = new URL(req.url);
  let endpoint = requestUrl.searchParams.get("endpoint");
  let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method;

  // Validate the endpoint to prevent potential SSRF attacks
  if (
    !endpoint ||
    !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => {
      const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint);
      const normalizedEndpoint = normalizeUrl(endpoint as string);

      return (
        normalizedEndpoint &&
        normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname &&
        normalizedEndpoint.pathname.startsWith(
          normalizedAllowedEndpoint.pathname,
        )
      );
    })
  ) {
    return NextResponse.json(
      {
        error: true,
        msg: "Invalid endpoint",
      },
      {
        status: 400,
      },
    );
  }

  if (!endpoint?.endsWith("/")) {
    endpoint += "/";
  }

  const endpointPath = params.path.join("/");
  const targetPath = `${endpoint}${endpointPath}`;

  // only allow MKCOL, GET, PUT
  if (
    proxy_method !== "MKCOL" &&
    proxy_method !== "GET" &&
    proxy_method !== "PUT"
  ) {
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + targetPath,
      },
      {
        status: 403,
      },
    );
  }

  // for MKCOL request, only allow request ${folder}
  if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) {
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + targetPath,
      },
      {
        status: 403,
      },
    );
  }

  // for GET request, only allow request ending with fileName
  if (proxy_method === "GET" && !targetPath.endsWith(fileName)) {
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + targetPath,
      },
      {
        status: 403,
      },
    );
  }

  //   for PUT request, only allow request ending with fileName
  if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) {
    return NextResponse.json(
      {
        error: true,
        msg: "you are not allowed to request " + targetPath,
      },
      {
        status: 403,
      },
    );
  }

  const targetUrl = targetPath;

  const method = proxy_method || req.method;
  const shouldNotHaveBody = ["get", "head"].includes(
    method?.toLowerCase() ?? "",
  );

  const fetchOptions: RequestInit = {
    headers: {
      authorization: req.headers.get("authorization") ?? "",
    },
    body: shouldNotHaveBody ? null : req.body,
    redirect: "manual",
    method,
    // @ts-ignore
    duplex: "half",
  };

  let fetchResult;

  try {
    fetchResult = await fetch(targetUrl, fetchOptions);
  } finally {
    console.log(
      "[Any Proxy]",
      targetUrl,
      {
        method: method,
      },
      {
        status: fetchResult?.status,
        statusText: fetchResult?.statusText,
      },
    );
  }

  return fetchResult;
}

export const PUT = handle;
export const GET = handle;
export const OPTIONS = handle;

export const runtime = "edge";


================================================
FILE: app/api/xai.ts
================================================
import { getServerSideConfig } from "@/app/config/server";
import {
  XAI_BASE_URL,
  ApiPath,
  ModelProvider,
  ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";

const serverConfig = getServerSideConfig();

export async function handle(
  req: NextRequest,
  { params }: { params: { path: string[] } },
) {
  console.log("[XAI Route] params ", params);

  if (req.method === "OPTIONS") {
    return NextResponse.json({ body: "OK" }, { status: 200 });
  }

  const authResult = auth(req, ModelProvider.XAI);
  if (authResult.error) {
    return NextResponse.json(authResult, {
      status: 401,
    });
  }

  try {
    const response = await request(req);
    return response;
  } catch (e) {
    console.error("[XAI] ", e);
    return NextResponse.json(prettyObject(e));
  }
}

async function request(req: NextRequest) {
  const controller = new AbortController();

  // alibaba use base url or just remove the path
  let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, "");

  let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL;

  if (!baseUrl.startsWith("http")) {
    baseUrl = `https://${baseUrl}`;
  }

  if (baseUrl.endsWith("/")) {
    baseUrl = baseUrl.slice(0, -1);
  }

  console.log("[Proxy] ", path);
  console.log("[Base Url]", baseUrl);

  const timeoutId = setTimeout(
    () => {
      controller.abort();
    },
    10 * 60 * 1000,
  );

  const fetchUrl = `${baseUrl}${path}`;
  const fetchOptions: RequestInit = {
    headers: {
      "Content-Type": "application/json",
      Authorization: req.headers.get("Authorization") ?? "",
    },
    method: req.method,
    body: req.body,
    redirect: "manual",
    // @ts-ignore
    duplex: "half",
    signal: controller.signal,
  };

  // #1815 try to refuse some request to some models
  if (serverConfig.customModels && req.body) {
    try {
      const clonedBody = await req.text();
      fetchOptions.body = clonedBody;

      const jsonBody = JSON.parse(clonedBody) as { model?: string };

      // not undefined and is false
      if (
        isModelNotavailableInServer(
          serverConfig.customModels,
          jsonBody?.model as string,
          ServiceProvider.XAI as string,
        )
      ) {
        return NextResponse.json(
          {
            error: true,
            message: `you are not allowed to use ${jsonBody?.model} model`,
          },
          {
            status: 403,
          },
        );
      }
    } catch (e) {
      console.error(`[XAI] filter`, e);
    }
  }
  try {
    const res = await fetch(fetchUrl, fetchOptions);

    // to prevent browser prompt for credentials
    const newHeaders = new Headers(res.headers);
    newHeaders.delete("www-authenticate");
    // to disable nginx buffering
    newHeaders.set("X-Accel-Buffering", "no");

    return new Response(res.body, {
      status: res.status,
      statusText: res.statusText,
      headers: newHeaders,
    });
  } finally {
    clearTimeout(timeoutId);
  }
}


================================================
FILE: app/client/api.ts
================================================
import { getClientConfig } from "../config/client";
import {
  ACCESS_CODE_PREFIX,
  ModelProvider,
  ServiceProvider,
} from "../constant";
import {
  ChatMessageTool,
  ChatMessage,
  ModelType,
  useAccessStore,
  useChatStore,
} from "../store";
import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai";
import { GeminiProApi } from "./platforms/google";
import { ClaudeApi } from "./platforms/anthropic";
import { ErnieApi } from "./platforms/baidu";
import { DoubaoApi } from "./platforms/bytedance";
import { QwenApi } from "./platforms/alibaba";
import { HunyuanApi } from "./platforms/tencent";
import { MoonshotApi } from "./platforms/moonshot";
import { SparkApi } from "./platforms/iflytek";
import { DeepSeekApi } from "./platforms/deepseek";
import { XAIApi } from "./platforms/xai";
import { ChatGLMApi } from "./platforms/glm";
import { SiliconflowApi } from "./platforms/siliconflow";
import { Ai302Api } from "./platforms/ai302";

export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];

export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
export const TTSModels = ["tts-1", "tts-1-hd"] as const;
export type ChatModel = ModelType;

export interface MultimodalContent {
  type: "text" | "image_url";
  text?: string;
  image_url?: {
    url: string;
  };
}

export interface MultimodalContentForAlibaba {
  text?: string;
  image?: string;
}

export interface RequestMessage {
  role: MessageRole;
  content: string | MultimodalContent[];
}

export interface LLMConfig {
  model: string;
  providerName?: string;
  temperature?: number;
  top_p?: number;
  stream?: boolean;
  presence_penalty?: number;
  frequency_penalty?: number;
  size?: DalleRequestPayload["size"];
  quality?: DalleRequestPayload["quality"];
  style?: DalleRequestPayload["style"];
}

export interface SpeechOptions {
  model: string;
  input: string;
  voice: string;
  response_format?: string;
  speed?: number;
  onController?: (controller: AbortController) => void;
}

export interface ChatOptions {
  messages: RequestMessage[];
  config: LLMConfig;

  onUpdate?: (message: string, chunk: string) => void;
  onFinish: (message: string, responseRes: Response) => void;
  onError?: (err: Error) => void;
  onController?: (controller: AbortController) => void;
  onBeforeTool?: (tool: ChatMessageTool) => void;
  onAfterTool?: (tool: ChatMessageTool) => void;
}

export interface LLMUsage {
  used: number;
  total: number;
}

export interface LLMModel {
  name: string;
  displayName?: string;
  available: boolean;
  provider: LLMModelProvider;
  sorted: number;
}

export interface LLMModelProvider {
  id: string;
  providerName: string;
  providerType: string;
  sorted: number;
}

export abstract class LLMApi {
  abstract chat(options: ChatOptions): Promise<void>;
  abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
  abstract usage(): Promise<LLMUsage>;
  abstract models(): Promise<LLMModel[]>;
}

type ProviderName = "openai" | "azure" | "claude" | "palm";

interface Model {
  name: string;
  provider: ProviderName;
  ctxlen: number;
}

interface ChatProvider {
  name: ProviderName;
  apiConfig: {
    baseUrl: string;
    apiKey: string;
    summaryModel: Model;
  };
  models: Model[];

  chat: () => void;
  usage: () => void;
}

export class ClientApi {
  public llm: LLMApi;

  constructor(provider: ModelProvider = ModelProvider.GPT) {
    switch (provider) {
      case ModelProvider.GeminiPro:
        this.llm = new GeminiProApi();
        break;
      case ModelProvider.Claude:
        this.llm = new ClaudeApi();
        break;
      case ModelProvider.Ernie:
        this.llm = new ErnieApi();
        break;
      case ModelProvider.Doubao:
        this.llm = new DoubaoApi();
        break;
      case ModelProvider.Qwen:
        this.llm = new QwenApi();
        break;
      case ModelProvider.Hunyuan:
        this.llm = new HunyuanApi();
        break;
      case ModelProvider.Moonshot:
        this.llm = new MoonshotApi();
        break;
      case ModelProvider.Iflytek:
        this.llm = new SparkApi();
        break;
      case ModelProvider.DeepSeek:
        this.llm = new DeepSeekApi();
        break;
      case ModelProvider.XAI:
        this.llm = new XAIApi();
        break;
      case ModelProvider.ChatGLM:
        this.llm = new ChatGLMApi();
        break;
      case ModelProvider.SiliconFlow:
        this.llm = new SiliconflowApi();
        break;
      case ModelProvider["302.AI"]:
        this.llm = new Ai302Api();
        break;
      default:
        this.llm = new ChatGPTApi();
    }
  }

  config() {}

  prompts() {}

  masks() {}

  async share(messages: ChatMessage[], avatarUrl: string | null = null) {
    const msgs = messages
      .map((m) => ({
        from: m.role === "user" ? "human" : "gpt",
        value: m.content,
      }))
      .concat([
        {
          from: "human",
          value:
            "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
        },
      ]);
    // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
    // Please do not modify this message

    console.log("[Share]", messages, msgs);
    const clientConfig = getClientConfig();
    const proxyUrl = "/sharegpt";
    const rawUrl = "https://sharegpt.com/api/conversations";
    const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl;
    const res = await fetch(shareUrl, {
      body: JSON.stringify({
        avatarUrl,
        items: msgs,
      }),
      headers: {
        "Content-Type": "application/json",
      },
      method: "POST",
    });

    const resJson = await res.json();
    console.log("[Share]", resJson);
    if (resJson.id) {
      return `https://shareg.pt/${resJson.id}`;
    }
  }
}

export function getBearerToken(
  apiKey: string,
  noBearer: boolean = false,
): string {
  return validString(apiKey)
    ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
    : "";
}

export function validString(x: string): boolean {
  return x?.length > 0;
}

export function getHeaders(ignoreHeaders: boolean = false) {
  const accessStore = useAccessStore.getState();
  const chatStore = useChatStore.getState();
  let headers: Record<string, string> = {};
  if (!ignoreHeaders) {
    headers = {
      "Content-Type": "application/json",
      Accept: "application/json",
    };
  }

  const clientConfig = getClientConfig();

  function getConfig() {
    const modelConfig = chatStore.currentSession().mask.modelConfig;
    const isGoogle = modelConfig.providerName === ServiceProvider.Google;
    const isAzure = modelConfig.providerName === ServiceProvider.Azure;
    const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
    const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
    const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
    const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
    const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
    const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
    const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
    const isXAI = modelConfig.providerName === ServiceProvider.XAI;
    const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
    const isSiliconFlow =
      modelConfig.providerName === ServiceProvider.SiliconFlow;
    const isAI302 = modelConfig.providerName === ServiceProvider["302.AI"];
    const isEnabledAccessControl = accessStore.enabledAccessControl();
    const apiKey = isGoogle
      ? accessStore.googleApiKey
      : isAzure
      ? accessStore.azureApiKey
      : isAnthropic
      ? accessStore.anthropicApiKey
      : isByteDance
      ? accessStore.bytedanceApiKey
      : isAlibaba
      ? accessStore.alibabaApiKey
      : isMoonshot
      ? accessStore.moonshotApiKey
      : isXAI
      ? accessStore.xaiApiKey
      : isDeepSeek
      ? accessStore.deepseekApiKey
      : isChatGLM
      ? accessStore.chatglmApiKey
      : isSiliconFlow
      ? accessStore.siliconflowApiKey
      : isIflytek
      ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
        ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
        : ""
      : isAI302
      ? accessStore.ai302ApiKey
      : accessStore.openaiApiKey;
    return {
      isGoogle,
      isAzure,
      isAnthropic,
      isBaidu,
      isByteDance,
      isAlibaba,
      isMoonshot,
      isIflytek,
      isDeepSeek,
      isXAI,
      isChatGLM,
      isSiliconFlow,
      isAI302,
      apiKey,
      isEnabledAccessControl,
    };
  }

  function getAuthHeader(): string {
    return isAzure
      ? "api-key"
      : isAnthropic
      ? "x-api-key"
      : isGoogle
      ? "x-goog-api-key"
      : "Authorization";
  }

  const {
    isGoogle,
    isAzure,
    isAnthropic,
    isBaidu,
    isByteDance,
    isAlibaba,
    isMoonshot,
    isIflytek,
    isDeepSeek,
    isXAI,
    isChatGLM,
    isSiliconFlow,
    isAI302,
    apiKey,
    isEnabledAccessControl,
  } = getConfig();
  // when using baidu api in app, not set auth header
  if (isBaidu && clientConfig?.isApp) return headers;

  const authHeader = getAuthHeader();

  const bearerToken = getBearerToken(
    apiKey,
    isAzure || isAnthropic || isGoogle,
  );

  if (bearerToken) {
    headers[authHeader] = bearerToken;
  } else if (isEnabledAccessControl && validString(accessStore.accessCode)) {
    headers["Authorization"] = getBearerToken(
      ACCESS_CODE_PREFIX + accessStore.accessCode,
    );
  }

  return headers;
}

export function getClientApi(provider: ServiceProvider): ClientApi {
  switch (provider) {
    case ServiceProvider.Google:
      return new ClientApi(ModelProvider.GeminiPro);
    case ServiceProvider.Anthropic:
      return new ClientApi(ModelProvider.Claude);
    case ServiceProvider.Baidu:
      return new ClientApi(ModelProvider.Ernie);
    case ServiceProvider.ByteDance:
      return new ClientApi(ModelProvider.Doubao);
    case ServiceProvider.Alibaba:
      return new ClientApi(ModelProvider.Qwen);
    case ServiceProvider.Tencent:
      return new ClientApi(ModelProvider.Hunyuan);
    case ServiceProvider.Moonshot:
      return new ClientApi(ModelProvider.Moonshot);
    case ServiceProvider.Iflytek:
      return new ClientApi(ModelProvider.Iflytek);
    case ServiceProvider.DeepSeek:
      return new ClientApi(ModelProvider.DeepSeek);
    case ServiceProvider.XAI:
      return new ClientApi(ModelProvider.XAI);
    case ServiceProvider.ChatGLM:
      return new ClientApi(ModelProvider.ChatGLM);
    case ServiceProvider.SiliconFlow:
      return new ClientApi(ModelProvider.SiliconFlow);
    case ServiceProvider["302.AI"]:
      return new ClientApi(ModelProvider["302.AI"]);
    default:
      return new ClientApi(ModelProvider.GPT);
  }
}


================================================
FILE: app/client/controller.ts
================================================
// To store message streaming controller
export const ChatControllerPool = {
  controllers: {} as Record<string, AbortController>,

  addController(
    sessionId: string,
    messageId: string,
    controller: AbortController,
  ) {
    const key = this.key(sessionId, messageId);
    this.controllers[key] = controller;
    return key;
  },

  stop(sessionId: string, messageId: string) {
    const key = this.key(sessionId, messageId);
    const controller = this.controllers[key];
    controller?.abort();
  },

  stopAll() {
    Object.values(this.controllers).forEach((v) => v.abort());
  },

  hasPending() {
    return Object.values(this.controllers).length > 0;
  },

  remove(sessionId: string, messageId: string) {
    const key = this.key(sessionId, messageId);
    delete this.controllers[key];
  },

  key(sessionId: string, messageIndex: string) {
    return `${sessionId},${messageIndex}`;
  },
};


================================================
FILE: app/client/platforms/ai302.ts
================================================
"use client";

import {
  ApiPath,
  AI302_BASE_URL,
  DEFAULT_MODELS,
  AI302,
} from "@/app/constant";
import {
  useAccessStore,
  useAppConfig,
  useChatStore,
  ChatMessageTool,
  usePluginStore,
} from "@/app/store";
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import {
  ChatOptions,
  getHeaders,
  LLMApi,
  LLMModel,
  SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
  getMessageTextContent,
  getMessageTextContentWithoutThinking,
  isVisionModel,
  getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";

import { fetch } from "@/app/utils/stream";
export interface Ai302ListModelResponse {
  object: string;
  data: Array<{
    id: string;
    object: string;
    root: string;
  }>;
}

export class Ai302Api implements LLMApi {
  private disableListModels = false;

  path(path: string): string {
    const accessStore = useAccessStore.getState();

    let baseUrl = "";

    if (accessStore.useCustomConfig) {
      baseUrl = accessStore.ai302Url;
    }

    if (baseUrl.length === 0) {
      const isApp = !!getClientConfig()?.isApp;
      const apiPath = ApiPath["302.AI"];
      baseUrl = isApp ? AI302_BASE_URL : apiPath;
    }

    if (baseUrl.endsWith("/")) {
      baseUrl = baseUrl.slice(0, baseUrl.length - 1);
    }
    if (
      !baseUrl.startsWith("http") &&
      !baseUrl.startsWith(ApiPath["302.AI"])
    ) {
      baseUrl = "https://" + baseUrl;
    }

    console.log("[Proxy Endpoint] ", baseUrl, path);

    return [baseUrl, path].join("/");
  }

  extractMessage(res: any) {
    return res.choices?.at(0)?.message?.content ?? "";
  }

  speech(options: SpeechOptions): Promise<ArrayBuffer> {
    throw new Error("Method not implemented.");
  }

  async chat(options: ChatOptions) {
    const visionModel = isVisionModel(options.config.model);
    const messages: ChatOptions["messages"] = [];
    for (const v of options.messages) {
      if (v.role === "assistant") {
        const content = getMessageTextContentWithoutThinking(v);
        messages.push({ role: v.role, content });
      } else {
        const content = visionModel
          ? await preProcessImageContent(v.content)
          : getMessageTextContent(v);
        messages.push({ role: v.role, content });
      }
    }

    const modelConfig = {
      ...useAppConfig.getState().modelConfig,
      ...useChatStore.getState().currentSession().mask.modelConfig,
      ...{
        model: options.config.model,
        providerName: options.config.providerName,
      },
    };

    const requestPayload: RequestPayload = {
      messages,
      stream: options.config.stream,
      model: modelConfig.model,
      temperature: modelConfig.temperature,
      presence_penalty: modelConfig.presence_penalty,
      frequency_penalty: modelConfig.frequency_penalty,
      top_p: modelConfig.top_p,
      // max_tokens: Math.max(modelConfig.max_tokens, 1024),
      // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
    };

    console.log("[Request] openai payload: ", requestPayload);

    const shouldStream = !!options.config.stream;
    const controller = new AbortController();
    options.onController?.(controller);

    try {
      const chatPath = this.path(AI302.ChatPath);
      const chatPayload = {
        method: "POST",
        body: JSON.stringify(requestPayload),
        signal: controller.signal,
        headers: getHeaders(),
      };

      // console.log(chatPayload);

      // Use extended timeout for thinking models as they typically require more processing time
      const requestTimeoutId = setTimeout(
        () => controller.abort(),
        getTimeoutMSByModel(options.config.model),
      );

      if (shouldStream) {
        const [tools, funcs] = usePluginStore
          .getState()
          .getAsTools(
            useChatStore.getState().currentSession().mask?.plugin || [],
          );
        return streamWithThink(
          chatPath,
          requestPayload,
          getHeaders(),
          tools as any,
          funcs,
          controller,
          // parseSSE
          (text: string, runTools: ChatMessageTool[]) => {
            // console.log("parseSSE", text, runTools);
            const json = JSON.parse(text);
            const choices = json.choices as Array<{
              delta: {
                content: string | null;
                tool_calls: ChatMessageTool[];
                reasoning_content: string | null;
              };
            }>;
            const tool_calls = choices[0]?.delta?.tool_calls;
            if (tool_calls?.length > 0) {
              const index = tool_calls[0]?.index;
              const id = tool_calls[0]?.id;
              const args = tool_calls[0]?.function?.arguments;
              if (id) {
                runTools.push({
                  id,
                  type: tool_calls[0]?.type,
                  function: {
                    name: tool_calls[0]?.function?.name as string,
                    arguments: args,
                  },
                });
              } else {
                // @ts-ignore
                runTools[index]["function"]["arguments"] += args;
              }
            }
            const reasoning = choices[0]?.delta?.reasoning_content;
            const content = choices[0]?.delta?.content;

            // Skip if both content and reasoning_content are empty or null
            if (
              (!reasoning || reasoning.length === 0) &&
              (!content || content.length === 0)
            ) {
              return {
                isThinking: false,
                content: "",
              };
            }

            if (reasoning && reasoning.length > 0) {
              return {
                isThinking: true,
                content: reasoning,
              };
            } else if (content && content.length > 0) {
              return {
                isThinking: false,
                content: content,
              };
            }

            return {
              isThinking: false,
              content: "",
            };
          },
          // processToolMessage, include tool_calls message and tool call results
          (
            requestPayload: RequestPayload,
            toolCallMessage: any,
            toolCallResult: any[],
          ) => {
            // @ts-ignore
            requestPayload?.messages?.splice(
              // @ts-ignore
              requestPayload?.messages?.length,
              0,
              toolCallMessage,
              ...toolCallResult,
            );
          },
          options,
        );
      } else {
        const res = await fetch(chatPath, chatPayload);
        clearTimeout(requestTimeoutId);

        const resJson = await res.json();
        const message = this.extractMessage(resJson);
        options.onFinish(message, res);
      }
    } catch (e) {
      console.log("[Request] failed to make a chat request", e);
      options.onError?.(e as Error);
    }
  }
  async usage() {
    return {
      used: 0,
      total: 0,
    };
  }

  async models(): Promise<LLMModel[]> {
    if (this.disableListModels) {
      return DEFAULT_MODELS.slice();
    }

    const res = await fetch(this.path(AI302.ListModelPath), {
      method: "GET",
      headers: {
        ...getHeaders(),
      },
    });

    const resJson = (await res.json()) as Ai302ListModelResponse;
    const chatModels = resJson.data;
    console.log("[Models]", chatModels);

    if (!chatModels) {
      return [];
    }

    let seq = 1000; //同 Constant.ts 中的排序保持一致
    return chatModels.map((m) => ({
      name: m.id,
      available: true,
      sorted: seq++,
      provider: {
        id: "ai302",
        providerName: "302.AI",
        providerType: "ai302",
        sorted: 15,
      },
    }));
  }
}


================================================
FILE: app/client/platforms/alibaba.ts
================================================
"use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import {
  useAccessStore,
  useAppConfig,
  useChatStore,
  ChatMessageTool,
  usePluginStore,
} from "@/app/store";
import {
  preProcessImageContentForAlibabaDashScope,
  streamWithThink,
} from "@/app/utils/chat";
import {
  ChatOptions,
  getHeaders,
  LLMApi,
  LLMModel,
  SpeechOptions,
  MultimodalContent,
  MultimodalContentForAlibaba,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
  getMessageTextContent,
  getMessageTextContentWithoutThinking,
  getTimeoutMSByModel,
  isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
  object: string;
  data: Array<{
    id: string;
    object: string;
    root: string;
  }>;
}

interface RequestInput {
  messages: {
    role: "system" | "user" | "assistant";
    content: string | MultimodalContent[];
  }[];
}
interface RequestParam {
  result_format: string;
  incremental_output?: boolean;
  temperature: number;
  repetition_penalty?: number;
  top_p: number;
  max_tokens?: number;
}
interface RequestPayload {
  model: string;
  input: RequestInput;
  parameters: RequestParam;
}

export class QwenApi implements LLMApi {
  path(path: string): string {
    const accessStore = useAccessStore.getState();

    let baseUrl = "";

    if (accessStore.useCustomConfig) {
      baseUrl = accessStore.alibabaUrl;
    }

    if (baseUrl.length === 0) {
      const isApp = !!getClientConfig()?.isApp;
      baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
    }

    if (baseUrl.endsWith("/")) {
      baseUrl = baseUrl.slice(0, baseUrl.length - 1);
    }
    if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) {
      baseUrl = "https://" + baseUrl;
    }

    console.log("[Proxy Endpoint] ", baseUrl, path);

    return [baseUrl, path].join("/");
  }

  extractMessage(res: any) {
    return res?.output?.choices?.at(0)?.message?.content ?? "";
  }

  speech(options: SpeechOptions): Promise<ArrayBuffer> {
    throw new Error("Method not implemented.");
  }

  async chat(options: ChatOptions) {
    const modelConfig = {
      ...useAppConfig.getState().modelConfig,
      ...useChatStore.getState().currentSession().mask.modelConfig,
      ...{
        model: options.config.model,
      },
    };

    const visionModel = isVisionModel(options.config.model);

    const messages: ChatOptions["messages"] = [];
    for (const v of options.messages) {
      const content = (
        visionModel
          ? await preProcessImageContentForAlibabaDashScope(v.content)
          : v.role === "assistant"
          ? getMessageTextContentWithoutThinking(v)
          : getMessageTextContent(v)
      ) as any;

      messages.push({ role: v.role, content });
    }

    const shouldStream = !!options.config.stream;
    const requestPayload: RequestPayload = {
      model: modelConfig.model,
      input: {
        messages,
      },
      parameters: {
        result_format: "message",
        incremental_output: shouldStream,
        temperature: modelConfig.temperature,
        // max_tokens: modelConfig.max_tokens,
        top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1
      },
    };

    const controller = new AbortController();
    options.onController?.(controller);

    try {
      const headers = {
        ...getHeaders(),
        "X-DashScope-SSE": shouldStream ? "enable" : "disable",
      };

      const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
      const chatPayload = {
        method: "POST",
        body: JSON.stringify(requestPayload),
        signal: controller.signal,
        headers: headers,
      };

      // make a fetch request
      const requestTimeoutId = setTimeout(
        () => controller.abort(),
        getTimeoutMSByModel(options.config.model),
      );

      if (shouldStream) {
        const [tools, funcs] = usePluginStore
          .getState()
          .getAsTools(
            useChatStore.getState().currentSession().mask?.plugin || [],
          );
        return streamWithThink(
          chatPath,
          requestPayload,
          headers,
          tools as any,
          funcs,
          controller,
          // parseSSE
          (text: string, runTools: ChatMessageTool[]) => {
            // console.log("parseSSE", text, runTools);
            const json = JSON.parse(text);
            const choices = json.output.choices as Array<{
              message: {
                content: string | null | MultimodalContentForAlibaba[];
                tool_calls: ChatMessageTool[];
                reasoning_content: string | null;
              };
            }>;

            if (!choices?.length) return { isThinking: false, content: "" };

            const tool_calls = choices[0]?.message?.tool_calls;
            if (tool_calls?.length > 0) {
              const index = tool_calls[0]?.index;
              const id = tool_calls[0]?.id;
              const args = tool_calls[0]?.function?.arguments;
              if (id) {
                runTools.push({
                  id,
                  type: tool_calls[0]?.type,
                  function: {
                    name: tool_calls[0]?.function?.name as string,
                    arguments: args,
                  },
                });
              } else {
                // @ts-ignore
                runTools[index]["function"]["arguments"] += args;
              }
            }

            const reasoning = choices[0]?.message?.reasoning_content;
            const content = choices[0]?.message?.content;

            // Skip if both content and reasoning_content are empty or null
            if (
              (!reasoning || reasoning.length === 0) &&
              (!content || content.length === 0)
            ) {
              return {
                isThinking: false,
                content: "",
              };
            }

            if (reasoning && reasoning.length > 0) {
              return {
                isThinking: true,
                content: reasoning,
              };
            } else if (content && content.length > 0) {
              return {
                isThinking: false,
                content: Array.isArray(content)
                  ? content.map((item) => item.text).join(",")
                  : content,
              };
            }

            return {
              isThinking: false,
              content: "",
            };
          },
          // processToolMessage, include tool_calls message and tool call results
          (
            requestPayload: RequestPayload,
            toolCallMessage: any,
            toolCallResult: any[],
          ) => {
            requestPayload?.input?.messages?.splice(
              requestPayload?.input?.messages?.length,
              0,
              toolCallMessage,
              ...toolCallResult,
            );
          },
          options,
        );
      } else {
        const res = await fetch(chatPath, chatPayload);
        clearTimeout(requestTimeoutId);

        const resJson = await res.json();
        const message = this.extractMessage(resJson);
        options.onFinish(message, res);
      }
    } catch (e) {
      console.log("[Request] failed to make a chat request", e);
      options.onError?.(e as Error);
    }
  }
  async usage() {
    return {
      used: 0,
      total: 0,
    };
  }

  async models(): Promise<LLMModel[]> {
    return [];
  }
}
export { Alibaba };


================================================
FILE: app/client/platforms/anthropic.ts
================================================
import { Anthropic, ApiPath } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api";
import {
  useAccessStore,
  useAppConfig,
  useChatStore,
  usePluginStore,
  ChatMessageTool,
} from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import { ANTHROPIC_BASE_URL } from "@/app/constant";
import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";

export type MultiBlockContent = {
  type: "image" | "text";
  source?: {
    type: string;
    media_type: string;
    data: string;
  };
  text?: string;
};

export type AnthropicMessage = {
  role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper];
  content: string | MultiBlockContent[];
};

export interface AnthropicChatRequest {
  model: string; // The model that will complete your prompt.
  messages: AnthropicMessage[]; // The prompt that you want Claude to complete.
  max_tokens: number; // The maximum number of tokens to generate before stopping.
  stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
  temperature?: number; // Amount of randomness injected into the response.
  top_p?: number; // Use nucleus sampling.
  top_k?: number; // Only sample from the top K options for each subsequent token.
  metadata?: object; // An object describing metadata about the request.
  stream?: boolean; // Whether to incrementally stream the response using server-sent events.
}

export interface ChatRequest {
  model: string; // The model that will complete your prompt.
  prompt: string; // The prompt that you want Claude to complete.
  max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping.
  stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
  temperature?: number; // Amount of randomness injected into the response.
  top_p?: number; // Use nucleus sampling.
  top_k?: number; // Only sample from the top K options for each subsequent token.
  metadata?: object; // An object describing metadata about the request.
  stream?: boolean; // Whether to incrementally stream the response using server-sent events.
}

export interface ChatResponse {
  completion: string;
  stop_reason: "stop_sequence" | "max_tokens";
  model: string;
}

export type ChatStreamResponse = ChatResponse & {
  stop?: string;
  log_id: string;
};

const ClaudeMapper = {
  assistant: "assistant",
  user: "user",
  system: "user",
} as const;

const keys = ["claude-2, claude-instant-1"];

export class ClaudeApi implements LLMApi {
  speech(options: SpeechOptions): Promise<ArrayBuffer> {
    throw new Error("Method not implemented.");
  }

  extractMessage(res: any) {
    console.log("[Response] claude response: ", res);

    return res?.content?.[0]?.text;
  }
  async chat(options: ChatOptions): Promise<void> {
    const visionModel = isVisionModel(options.config.model);

    const accessStore = useAccessStore.getState();

    const shouldStream = !!options.config.stream;

    const modelConfig = {
      ...useAppConfig.getState().modelConfig,
      ...useChatStore.getState().currentSession().mask.modelConfig,
      ...{
        model: options.config.model,
      },
    };

    // try get base64image from local cache image_url
    const messages: ChatOptions["messages"] = [];
    for (const v of options.messages) {
      const content = await preProcessImageContent(v.content);
      messages.push({ role: v.role, content });
    }

    const keys = ["system", "user"];

    // roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages
    for (let i = 0; i < messages.length - 1; i++) {
      const message = messages[i];
      const nextMessage = messages[i + 1];

      if (keys.includes(message.role) && keys.includes(nextMessage.role)) {
        messages[i] = [
          message,
          {
            role: "assistant",
            content: ";",
          },
        ] as any;
      }
    }

    const prompt = messages
      .flat()
      .filter((v) => {
        if (!v.content) return false;
        if (typeof v.content === "string" && !v.content.trim()) return false;
        return true;
      })
      .map((v) => {
        const { role, content } = v;
        const insideRole = ClaudeMapper[role] ?? "user";

        if (!visionModel || typeof content === "string") {
          return {
            role: insideRole,
            content: getMessageTextContent(v),
          };
        }
        return {
          role: insideRole,
          content: content
            .filter((v) => v.image_url || v.text)
            .map(({ type, text, image_url }) => {
              if (type === "text") {
                return {
                  type,
                  text: text!,
                };
              }
              const { url = "" } = image_url || {};
              const colonIndex = url.indexOf(":");
              const semicolonIndex = url.indexOf(";");
              const comma = url.indexOf(",");

              const mimeType = url.slice(colonIndex + 1, semicolonIndex);
              const encodeType = url.slice(semicolonIndex + 1, comma);
              const data = url.slice(comma + 1);

              return {
                type: "image" as const,
                source: {
                  type: encodeType,
                  media_type: mimeType,
                  data,
                },
              };
            }),
        };
      });

    if (prompt[0]?.role === "assistant") {
      prompt.unshift({
        role: "user",
        content: ";",
      });
    }

    const requestBody: AnthropicChatRequest = {
      messages: prompt,
      stream: shouldStream,

      model: modelConfig.model,
      max_tokens: modelConfig.max_tokens,
      temperature: modelConfig.temperature,
      top_p: modelConfig.top_p,
      // top_k: modelConfig.top_k,
      top_k: 5,
    };

    const path = this.path(Anthropic.ChatPath);

    const controller = new AbortController();
    options.onController?.(controller);

    if (shouldStream) {
      let index = -1;
      const [tools, funcs] = usePluginStore
        .getState()
        .getAsTools(
          useChatStore.getState().currentSession().mask?.plugin || [],
        );
      return stream(
        path,
        requestBody,
        {
          ...getHeaders(),
          "anthropic-version": accessStore.anthropicApiVersion,
        },
        // @ts-ignore
        tools.map((tool) => ({
          name: tool?.function?.name,
          description: tool?.function?.description,
          input_schema: tool?.function?.parameters,
        })),
        funcs,
        controller,
        // parseSSE
        (text: string, runTools: ChatMessageTool[]) => {
          // console.log("parseSSE", text, runTools);
          let chunkJson:
            | undefined
            | {
                type: "content_block_delta" | "content_block_stop" | "message_delta" | "message_stop";
                content_block?: {
                  type: "tool_use";
                  id: string;
                  name: string;
                };
                delta?: {
                  type: "text_delta" | "input_json_delta";
                  text?: string;
                  partial_json?: string;
                  stop_reason?: string;
                };
                index: number;
              };
          chunkJson = JSON.parse(text);

          // Handle refusal stop reason in message_delta
          if (chunkJson?.delta?.stop_reason === "refusal") {
            // Return a message to display to the user
            const refusalMessage = "\n\n[Assistant refused to respond. Please modify your request and try again.]";
            options.onError?.(new Error("Content policy violation: " + refusalMessage));
            return refusalMessage;
          }

          if (chunkJson?.content_block?.type == "tool_use") {
            index += 1;
            const id = chunkJson?.content_block.id;
            const name = chunkJson?.content_block.name;
            runTools.push({
              id,
              type: "function",
              function: {
                name,
                arguments: "",
              },
            });
          }
          if (
            chunkJson?.delta?.type == "input_json_delta" &&
            chunkJson?.delta?.partial_json
          ) {
            // @ts-ignore
            runTools[index]["function"]["arguments"] +=
              chunkJson?.delta?.partial_json;
          }
          return chunkJson?.delta?.text;
        },
        // processToolMessage, include tool_calls message and tool call results
        (
          requestPayload: RequestPayload,
          toolCallMessage: any,
          toolCallResult: any[],
        ) => {
          // reset index value
          index = -1;
          // @ts-ignore
          requestPayload?.messages?.splice(
            // @ts-ignore
            requestPayload?.messages?.length,
            0,
            {
              role: "assistant",
              content: toolCallMessage.tool_calls.map(
                (tool: ChatMessageTool) => ({
                  type: "tool_use",
                  id: tool.id,
                  name: tool?.function?.name,
                  input: tool?.function?.arguments
                    ? JSON.parse(tool?.function?.arguments)
                    : {},
                }),
              ),
            },
            // @ts-ignore
            ...toolCallResult.map((result) => ({
              role: "user",
              content: [
                {
                  type: "tool_result",
                  tool_use_id: result.tool_call_id,
                  content: result.content,
                },
              ],
            })),
          );
        },
        options,
      );
    } else {
      const payload = {
        method: "POST",
        body: JSON.stringify(requestBody),
        signal: controller.signal,
        headers: {
          ...getHeaders(), // get common headers
          "anthropic-version": accessStore.anthropicApiVersion,
          // do not send `anthropicApiKey` in browser!!!
          // Authorization: getAuthKey(accessStore.anthropicApiKey),
        },
      };

      try {
        controller.signal.onabort = () =>
          options.onFinish("", new Response(null, { status: 400 }));

        const res = await fetch(path, payload);
        const resJson = await res.json();

        const message = this.extractMessage(resJson);
        options.onFinish(message, res);
      } catch (e) {
        console.error("failed to chat", e);
        options.onError?.(e as Error);
      }
    }
  }
  async usage() {
    return {
      used: 0,
      total: 0,
    };
  }
  async models() {
    // const provider = {
    //   id: "anthropic",
    //   providerName: "Anthropic",
    //   providerType: "anthropic",
    // };

    return [
      // {
      //   name: "claude-instant-1.2",
      //   available: true,
      //   provider,
      // },
      // {
      //   name: "claude-2.0",
      //   available: true,
      //   provider,
      // },
      // {
      //   name: "claude-2.1",
      //   available: true,
      //   provider,
      // },
      // {
      //   name: "claude-3-opus-20240229",
      //   available: true,
      //   provider,
      // },
      // {
      //   name: "claude-3-sonnet-20240229",
      //   available: true,
      //   provider,
      // },
      // {
      //   name: "claude-3-haiku-20240307",
      //   available: true,
      //   provider,
      // },
    ];
  }
  path(path: string): string {
    const accessStore = useAccessStore.getState();

    let baseUrl: string = "";

    if (accessStore.useCustomConfig) {
      baseUrl = accessStore.anthropicUrl;
    }

    // if endpoint is empty, use default endpoint
    if (baseUrl.trim().length === 0) {
      const isApp = !!getClientConfig()?.isApp;

      baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
    }

    if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
      baseUrl = "https://" + baseUrl;
    }

    baseUrl = trimEnd(baseUrl, "/");

    // try rebuild url, when using cloudflare ai gateway in client
    return cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
  }
}

function trimEnd(s: string, end = " ") {
  if (end.length === 0) return s;

  while (s.endsWith(end)) {
    s = s.slice(0, -end.length);
  }

  return s;
}


================================================
FILE: app/client/platforms/baidu.ts
================================================
"use client";
import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu";

import {
  ChatOptions,
  getHeaders,
  LLMApi,
  LLMModel,
  MultimodalContent,
  SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
  EventStreamContentType,
  fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
  object: string;
  data: Array<{
    id: string;
    object: string;
    root: string;
  }>;
}

interface RequestPayload {
  messages: {
    role: "system" | "user" | "assistant";
    content: string | MultimodalContent[];
  }[];
  stream?: boolean;
  model: string;
  temperature: number;
  presence_penalty: number;
  frequency_penalty: number;
  top_p: number;
  max_tokens?: number;
}

export class ErnieApi implements LLMApi {
  path(path: string): string {
    const accessStore = useAccessStore.getState();

    let baseUrl = "";

    if (accessStore.useCustomConfig) {
      baseUrl = accessStore.baiduUrl;
    }

    if (baseUrl.length === 0) {
      const isApp = !!getClientConfig()?.isApp;
      // do not use proxy for baidubce api
      baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
    }

    if (baseUrl.endsWith("/")) {
      baseUrl = baseUrl.slice(0, baseUrl.length - 1);
    }
    if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) {
      baseUrl = "https://" + baseUrl;
    }

    console.log("[Proxy Endpoint] ", baseUrl, path);

    return [baseUrl, path].join("/");
  }

  speech(options: SpeechOptions): Promise<ArrayBuffer> {
    throw new Error("Method not implemented.");
  }

  async chat(options: ChatOptions) {
    const messages = options.messages.map((v) => ({
      // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",
      role: v.role === "system" ? "user" : v.role,
      content: getMessageTextContent(v),
    }));

    // "error_code": 336006, "error_msg": "the length of messages must be an odd number",
    if (messages.length % 2 === 0) {
      if (messages.at(0)?.role === "user") {
        messages.splice(1, 0, {
          role: "assistant",
          content: " ",
        });
      } else {
        messages.unshift({
          role: "user",
          content: " ",
        });
      }
    }

    const modelConfig = {
      ...useAppConfig.getState().modelConfig,
      ...useChatStore.getState().currentSession().mask.modelConfig,
      ...{
        model: options.config.model,
      },
    };

    const shouldStream = !!options.config.stream;
    const requestPayload: RequestPayload = {
      messages,
      stream: shouldStream,
      model: modelConfig.model,
      temperature: modelConfig.temperature,
      presence_penalty: modelConfig.presence_penalty,
      frequency_penalty: modelConfig.frequency_penalty,
      top_p: modelConfig.top_p,
    };

    console.log("[Request] Baidu payload: ", requestPayload);

    const controller = new AbortController();
    options.onController?.(controller);

    try {
      let chatPath = this.path(Baidu.ChatPath(modelConfig.model));

      // getAccessToken can not run in browser, because cors error
      if (!!getClientConfig()?.isApp) {
    
Download .txt
gitextract_6fj54yj_/

├── .babelrc
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github/
│   ├── ISSUE_TEMPLATE/
│   │   ├── 1_bug_report.yml
│   │   ├── 1_bug_report_cn.yml
│   │   ├── 2_feature_request.yml
│   │   └── 2_feature_request_cn.yml
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── dependabot.yml
│   └── workflows/
│       ├── app.yml
│       ├── deploy_preview.yml
│       ├── docker.yml
│       ├── issue-translator.yml
│       ├── remove_deploy_preview.yml
│       ├── sync.yml
│       └── test.yml
├── .gitignore
├── .gitpod.yml
├── .husky/
│   └── pre-commit
├── .lintstagedrc.json
├── .prettierrc.js
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── README.md
├── README_CN.md
├── README_JA.md
├── README_KO.md
├── app/
│   ├── api/
│   │   ├── 302ai.ts
│   │   ├── [provider]/
│   │   │   └── [...path]/
│   │   │       └── route.ts
│   │   ├── alibaba.ts
│   │   ├── anthropic.ts
│   │   ├── artifacts/
│   │   │   └── route.ts
│   │   ├── auth.ts
│   │   ├── azure.ts
│   │   ├── baidu.ts
│   │   ├── bytedance.ts
│   │   ├── common.ts
│   │   ├── config/
│   │   │   └── route.ts
│   │   ├── deepseek.ts
│   │   ├── glm.ts
│   │   ├── google.ts
│   │   ├── iflytek.ts
│   │   ├── moonshot.ts
│   │   ├── openai.ts
│   │   ├── proxy.ts
│   │   ├── siliconflow.ts
│   │   ├── stability.ts
│   │   ├── tencent/
│   │   │   └── route.ts
│   │   ├── upstash/
│   │   │   └── [action]/
│   │   │       └── [...key]/
│   │   │           └── route.ts
│   │   ├── webdav/
│   │   │   └── [...path]/
│   │   │       └── route.ts
│   │   └── xai.ts
│   ├── client/
│   │   ├── api.ts
│   │   ├── controller.ts
│   │   └── platforms/
│   │       ├── ai302.ts
│   │       ├── alibaba.ts
│   │       ├── anthropic.ts
│   │       ├── baidu.ts
│   │       ├── bytedance.ts
│   │       ├── deepseek.ts
│   │       ├── glm.ts
│   │       ├── google.ts
│   │       ├── iflytek.ts
│   │       ├── moonshot.ts
│   │       ├── openai.ts
│   │       ├── siliconflow.ts
│   │       ├── tencent.ts
│   │       └── xai.ts
│   ├── command.ts
│   ├── components/
│   │   ├── artifacts.module.scss
│   │   ├── artifacts.tsx
│   │   ├── auth.module.scss
│   │   ├── auth.tsx
│   │   ├── button.module.scss
│   │   ├── button.tsx
│   │   ├── chat-list.tsx
│   │   ├── chat.module.scss
│   │   ├── chat.tsx
│   │   ├── emoji.tsx
│   │   ├── error.tsx
│   │   ├── exporter.module.scss
│   │   ├── exporter.tsx
│   │   ├── home.module.scss
│   │   ├── home.tsx
│   │   ├── input-range.module.scss
│   │   ├── input-range.tsx
│   │   ├── markdown.tsx
│   │   ├── mask.module.scss
│   │   ├── mask.tsx
│   │   ├── mcp-market.module.scss
│   │   ├── mcp-market.tsx
│   │   ├── message-selector.module.scss
│   │   ├── message-selector.tsx
│   │   ├── model-config.module.scss
│   │   ├── model-config.tsx
│   │   ├── new-chat.module.scss
│   │   ├── new-chat.tsx
│   │   ├── plugin.module.scss
│   │   ├── plugin.tsx
│   │   ├── realtime-chat/
│   │   │   ├── index.ts
│   │   │   ├── realtime-chat.module.scss
│   │   │   ├── realtime-chat.tsx
│   │   │   └── realtime-config.tsx
│   │   ├── sd/
│   │   │   ├── index.tsx
│   │   │   ├── sd-panel.module.scss
│   │   │   ├── sd-panel.tsx
│   │   │   ├── sd-sidebar.tsx
│   │   │   ├── sd.module.scss
│   │   │   └── sd.tsx
│   │   ├── search-chat.tsx
│   │   ├── settings.module.scss
│   │   ├── settings.tsx
│   │   ├── sidebar.tsx
│   │   ├── tts-config.tsx
│   │   ├── tts.module.scss
│   │   ├── ui-lib.module.scss
│   │   ├── ui-lib.tsx
│   │   └── voice-print/
│   │       ├── index.ts
│   │       ├── voice-print.module.scss
│   │       └── voice-print.tsx
│   ├── config/
│   │   ├── build.ts
│   │   ├── client.ts
│   │   └── server.ts
│   ├── constant.ts
│   ├── global.d.ts
│   ├── layout.tsx
│   ├── lib/
│   │   └── audio.ts
│   ├── locales/
│   │   ├── ar.ts
│   │   ├── bn.ts
│   │   ├── cn.ts
│   │   ├── cs.ts
│   │   ├── da.ts
│   │   ├── de.ts
│   │   ├── en.ts
│   │   ├── es.ts
│   │   ├── fr.ts
│   │   ├── id.ts
│   │   ├── index.ts
│   │   ├── it.ts
│   │   ├── jp.ts
│   │   ├── ko.ts
│   │   ├── no.ts
│   │   ├── pt.ts
│   │   ├── ru.ts
│   │   ├── sk.ts
│   │   ├── tr.ts
│   │   ├── tw.ts
│   │   └── vi.ts
│   ├── masks/
│   │   ├── build.ts
│   │   ├── cn.ts
│   │   ├── en.ts
│   │   ├── index.ts
│   │   ├── tw.ts
│   │   └── typing.ts
│   ├── mcp/
│   │   ├── actions.ts
│   │   ├── client.ts
│   │   ├── logger.ts
│   │   ├── mcp_config.default.json
│   │   ├── types.ts
│   │   └── utils.ts
│   ├── page.tsx
│   ├── polyfill.ts
│   ├── store/
│   │   ├── access.ts
│   │   ├── chat.ts
│   │   ├── config.ts
│   │   ├── index.ts
│   │   ├── mask.ts
│   │   ├── plugin.ts
│   │   ├── prompt.ts
│   │   ├── sd.ts
│   │   ├── sync.ts
│   │   └── update.ts
│   ├── styles/
│   │   ├── animation.scss
│   │   ├── globals.scss
│   │   ├── highlight.scss
│   │   ├── markdown.scss
│   │   └── window.scss
│   ├── typing.ts
│   ├── utils/
│   │   ├── audio.ts
│   │   ├── auth-settings-events.ts
│   │   ├── baidu.ts
│   │   ├── chat.ts
│   │   ├── clone.ts
│   │   ├── cloud/
│   │   │   ├── index.ts
│   │   │   ├── upstash.ts
│   │   │   └── webdav.ts
│   │   ├── cloudflare.ts
│   │   ├── format.ts
│   │   ├── hmac.ts
│   │   ├── hooks.ts
│   │   ├── indexedDB-storage.ts
│   │   ├── merge.ts
│   │   ├── model.ts
│   │   ├── ms_edge_tts.ts
│   │   ├── object.ts
│   │   ├── store.ts
│   │   ├── stream.ts
│   │   ├── sync.ts
│   │   ├── tencent.ts
│   │   └── token.ts
│   └── utils.ts
├── docker-compose.yml
├── docs/
│   ├── bt-cn.md
│   ├── cloudflare-pages-cn.md
│   ├── cloudflare-pages-en.md
│   ├── cloudflare-pages-es.md
│   ├── cloudflare-pages-ja.md
│   ├── cloudflare-pages-ko.md
│   ├── faq-cn.md
│   ├── faq-en.md
│   ├── faq-es.md
│   ├── faq-ja.md
│   ├── faq-ko.md
│   ├── synchronise-chat-logs-cn.md
│   ├── synchronise-chat-logs-en.md
│   ├── synchronise-chat-logs-es.md
│   ├── synchronise-chat-logs-ja.md
│   ├── synchronise-chat-logs-ko.md
│   ├── translation.md
│   ├── user-manual-cn.md
│   ├── vercel-cn.md
│   ├── vercel-es.md
│   ├── vercel-ja.md
│   └── vercel-ko.md
├── jest.config.ts
├── jest.setup.ts
├── next.config.mjs
├── package.json
├── public/
│   ├── audio-processor.js
│   ├── plugins.json
│   ├── prompts.json
│   ├── robots.txt
│   ├── serviceWorker.js
│   ├── serviceWorkerRegister.js
│   └── site.webmanifest
├── scripts/
│   ├── .gitignore
│   ├── delete-deployment-preview.sh
│   ├── fetch-prompts.mjs
│   ├── init-proxy.sh
│   ├── proxychains.template.conf
│   └── setup.sh
├── src-tauri/
│   ├── .gitignore
│   ├── Cargo.toml
│   ├── build.rs
│   ├── icons/
│   │   └── icon.icns
│   ├── src/
│   │   ├── main.rs
│   │   └── stream.rs
│   └── tauri.conf.json
├── test/
│   ├── model-available.test.ts
│   ├── model-provider.test.ts
│   ├── sum-module.test.ts
│   └── vision-model-checker.test.ts
├── tsconfig.json
└── vercel.json
Download .txt
SYMBOL INDEX (870 symbols across 151 files)

FILE: app/api/302ai.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/[provider]/[...path]/route.ts
  function handle (line 20) | async function handle(
  constant GET (line 63) | const GET = handle;
  constant POST (line 64) | const POST = handle;

FILE: app/api/alibaba.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/anthropic.ts
  constant ALLOWD_PATH (line 15) | const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
  function handle (line 17) | async function handle(
  function request (line 60) | async function request(req: NextRequest) {

FILE: app/api/artifacts/route.ts
  function handle (line 5) | async function handle(req: NextRequest, res: NextResponse) {
  constant POST (line 70) | const POST = handle;
  constant GET (line 71) | const GET = handle;

FILE: app/api/auth.ts
  function getIP (line 6) | function getIP(req: NextRequest) {
  function parseApiKey (line 17) | function parseApiKey(bearToken: string) {
  function auth (line 27) | function auth(req: NextRequest, modelProvider: ModelProvider) {

FILE: app/api/azure.ts
  function handle (line 7) | async function handle(

FILE: app/api/baidu.ts
  function handle (line 16) | async function handle(
  function request (line 54) | async function request(req: NextRequest) {

FILE: app/api/bytedance.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/common.ts
  function requestOpenai (line 9) | async function requestOpenai(req: NextRequest) {

FILE: app/api/config/route.ts
  constant DANGER_CONFIG (line 9) | const DANGER_CONFIG = {
  type DangerConfig (line 21) | type DangerConfig = typeof DANGER_CONFIG;
  function handle (line 24) | async function handle() {
  constant GET (line 28) | const GET = handle;
  constant POST (line 29) | const POST = handle;

FILE: app/api/deepseek.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/glm.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/google.ts
  function handle (line 9) | async function handle(
  constant GET (line 52) | const GET = handle;
  constant POST (line 53) | const POST = handle;
  function request (line 71) | async function request(req: NextRequest, apiKey: string) {

FILE: app/api/iflytek.ts
  function handle (line 16) | async function handle(
  function request (line 42) | async function request(req: NextRequest) {

FILE: app/api/moonshot.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/openai.ts
  constant ALLOWED_PATH (line 9) | const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
  function getModels (line 11) | function getModels(remoteModelRes: OpenAIListModelResponse) {
  function handle (line 29) | async function handle(

FILE: app/api/proxy.ts
  function handle (line 4) | async function handle(

FILE: app/api/siliconflow.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/api/stability.ts
  function handle (line 6) | async function handle(

FILE: app/api/tencent/route.ts
  function handle (line 10) | async function handle(
  constant GET (line 36) | const GET = handle;
  constant POST (line 37) | const POST = handle;
  function request (line 60) | async function request(req: NextRequest) {

FILE: app/api/upstash/[action]/[...key]/route.ts
  function handle (line 3) | async function handle(
  constant POST (line 69) | const POST = handle;
  constant GET (line 70) | const GET = handle;
  constant OPTIONS (line 71) | const OPTIONS = handle;

FILE: app/api/webdav/[...path]/route.ts
  function handle (line 20) | async function handle(
  constant PUT (line 163) | const PUT = handle;
  constant GET (line 164) | const GET = handle;
  constant OPTIONS (line 165) | const OPTIONS = handle;

FILE: app/api/xai.ts
  function handle (line 15) | async function handle(
  function request (line 41) | async function request(req: NextRequest) {

FILE: app/client/api.ts
  constant ROLES (line 29) | const ROLES = ["system", "user", "assistant"] as const;
  type MessageRole (line 30) | type MessageRole = (typeof ROLES)[number];
  type ChatModel (line 34) | type ChatModel = ModelType;
  type MultimodalContent (line 36) | interface MultimodalContent {
  type MultimodalContentForAlibaba (line 44) | interface MultimodalContentForAlibaba {
  type RequestMessage (line 49) | interface RequestMessage {
  type LLMConfig (line 54) | interface LLMConfig {
  type SpeechOptions (line 67) | interface SpeechOptions {
  type ChatOptions (line 76) | interface ChatOptions {
  type LLMUsage (line 88) | interface LLMUsage {
  type LLMModel (line 93) | interface LLMModel {
  type LLMModelProvider (line 101) | interface LLMModelProvider {
  type ProviderName (line 115) | type ProviderName = "openai" | "azure" | "claude" | "palm";
  type Model (line 117) | interface Model {
  type ChatProvider (line 123) | interface ChatProvider {
  class ClientApi (line 136) | class ClientApi {
    method constructor (line 139) | constructor(provider: ModelProvider = ModelProvider.GPT) {
    method config (line 185) | config() {}
    method prompts (line 187) | prompts() {}
    method masks (line 189) | masks() {}
    method share (line 191) | async share(messages: ChatMessage[], avatarUrl: string | null = null) {
  function getBearerToken (line 231) | function getBearerToken(
  function validString (line 240) | function validString(x: string): boolean {
  function getHeaders (line 244) | function getHeaders(ignoreHeaders: boolean = false) {
  function getClientApi (line 368) | function getClientApi(provider: ServiceProvider): ClientApi {

FILE: app/client/controller.ts
  method addController (line 5) | addController(
  method stop (line 15) | stop(sessionId: string, messageId: string) {
  method stopAll (line 21) | stopAll() {
  method hasPending (line 25) | hasPending() {
  method remove (line 29) | remove(sessionId: string, messageId: string) {
  method key (line 34) | key(sessionId: string, messageIndex: string) {

FILE: app/client/platforms/ai302.ts
  type Ai302ListModelResponse (line 34) | interface Ai302ListModelResponse {
  class Ai302Api (line 43) | class Ai302Api implements LLMApi {
    method path (line 46) | path(path: string): string {
    method extractMessage (line 76) | extractMessage(res: any) {
    method speech (line 80) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 84) | async chat(options: ChatOptions) {
    method usage (line 247) | async usage() {
    method models (line 254) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/alibaba.ts
  type OpenAIListModelResponse (line 32) | interface OpenAIListModelResponse {
  type RequestInput (line 41) | interface RequestInput {
  type RequestParam (line 47) | interface RequestParam {
  type RequestPayload (line 55) | interface RequestPayload {
  class QwenApi (line 61) | class QwenApi implements LLMApi {
    method path (line 62) | path(path: string): string {
    method extractMessage (line 88) | extractMessage(res: any) {
    method speech (line 92) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 96) | async chat(options: ChatOptions) {
    method usage (line 266) | async usage() {
    method models (line 273) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/anthropic.ts
  type MultiBlockContent (line 18) | type MultiBlockContent = {
  type AnthropicMessage (line 28) | type AnthropicMessage = {
  type AnthropicChatRequest (line 33) | interface AnthropicChatRequest {
  type ChatRequest (line 45) | interface ChatRequest {
  type ChatResponse (line 57) | interface ChatResponse {
  type ChatStreamResponse (line 63) | type ChatStreamResponse = ChatResponse & {
  class ClaudeApi (line 76) | class ClaudeApi implements LLMApi {
    method speech (line 77) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method extractMessage (line 81) | extractMessage(res: any) {
    method chat (line 86) | async chat(options: ChatOptions): Promise<void> {
    method usage (line 343) | async usage() {
    method models (line 349) | async models() {
    method path (line 389) | path(path: string): string {
  function trimEnd (line 416) | function trimEnd(s: string, end = " ") {

FILE: app/client/platforms/baidu.ts
  type OpenAIListModelResponse (line 24) | interface OpenAIListModelResponse {
  type RequestPayload (line 33) | interface RequestPayload {
  class ErnieApi (line 47) | class ErnieApi implements LLMApi {
    method path (line 48) | path(path: string): string {
    method speech (line 75) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 79) | async chat(options: ChatOptions) {
    method usage (line 273) | async usage() {
    method models (line 280) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/bytedance.ts
  type OpenAIListModelResponse (line 29) | interface OpenAIListModelResponse {
  type RequestPayloadForByteDance (line 38) | interface RequestPayloadForByteDance {
  class DoubaoApi (line 52) | class DoubaoApi implements LLMApi {
    method path (line 53) | path(path: string): string {
    method extractMessage (line 79) | extractMessage(res: any) {
    method speech (line 83) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 87) | async chat(options: ChatOptions) {
    method usage (line 239) | async usage() {
    method models (line 246) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/deepseek.ts
  class DeepSeekApi (line 28) | class DeepSeekApi implements LLMApi {
    method path (line 31) | path(path: string): string {
    method extractMessage (line 58) | extractMessage(res: any) {
    method speech (line 62) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 66) | async chat(options: ChatOptions) {
    method usage (line 243) | async usage() {
    method models (line 250) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/glm.ts
  type BasePayload (line 28) | interface BasePayload {
  type ChatPayload (line 32) | interface ChatPayload extends BasePayload {
  type ImageGenerationPayload (line 41) | interface ImageGenerationPayload extends BasePayload {
  type VideoGenerationPayload (line 47) | interface VideoGenerationPayload extends BasePayload {
  type ModelType (line 54) | type ModelType = "chat" | "image" | "video";
  class ChatGLMApi (line 56) | class ChatGLMApi implements LLMApi {
    method getModelType (line 59) | private getModelType(model: string): ModelType {
    method getModelPath (line 65) | private getModelPath(type: ModelType): string {
    method createPayload (line 76) | private createPayload(
    method parseResponse (line 108) | private parseResponse(modelType: ModelType, json: any): string {
    method path (line 123) | path(path: string): string {
    method extractMessage (line 148) | extractMessage(res: any) {
    method speech (line 152) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 156) | async chat(options: ChatOptions) {
    method usage (line 282) | async usage() {
    method models (line 289) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/google.ts
  class GeminiProApi (line 32) | class GeminiProApi implements LLMApi {
    method path (line 33) | path(path: string, shouldStream = false): string {
    method extractMessage (line 61) | extractMessage(res: any) {
    method speech (line 87) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 91) | async chat(options: ChatOptions): Promise<void> {
    method usage (line 311) | usage(): Promise<LLMUsage> {
    method models (line 314) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/iflytek.ts
  class SparkApi (line 29) | class SparkApi implements LLMApi {
    method path (line 32) | path(path: string): string {
    method extractMessage (line 59) | extractMessage(res: any) {
    method speech (line 63) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 67) | async chat(options: ChatOptions) {
    method usage (line 243) | async usage() {
    method models (line 250) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/moonshot.ts
  class MoonshotApi (line 29) | class MoonshotApi implements LLMApi {
    method path (line 32) | path(path: string): string {
    method extractMessage (line 59) | extractMessage(res: any) {
    method speech (line 63) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 67) | async chat(options: ChatOptions) {
    method usage (line 190) | async usage() {
    method models (line 197) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/openai.ts
  type OpenAIListModelResponse (line 48) | interface OpenAIListModelResponse {
  type RequestPayload (line 57) | interface RequestPayload {
  type DalleRequestPayload (line 72) | interface DalleRequestPayload {
  class ChatGPTApi (line 82) | class ChatGPTApi implements LLMApi {
    method path (line 85) | path(path: string): string {
    method extractMessage (line 124) | async extractMessage(res: any) {
    method speech (line 148) | async speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 186) | async chat(options: ChatOptions) {
    method usage (line 431) | async usage() {
    method models (line 497) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/siliconflow.ts
  type SiliconFlowListModelResponse (line 34) | interface SiliconFlowListModelResponse {
  class SiliconflowApi (line 43) | class SiliconflowApi implements LLMApi {
    method path (line 46) | path(path: string): string {
    method extractMessage (line 76) | extractMessage(res: any) {
    method speech (line 80) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 84) | async chat(options: ChatOptions) {
    method usage (line 247) | async usage() {
    method models (line 254) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/tencent.ts
  type OpenAIListModelResponse (line 31) | interface OpenAIListModelResponse {
  type RequestPayload (line 40) | interface RequestPayload {
  function capitalizeKeys (line 51) | function capitalizeKeys(obj: any): any {
  class HunyuanApi (line 66) | class HunyuanApi implements LLMApi {
    method path (line 67) | path(): string {
    method extractMessage (line 92) | extractMessage(res: any) {
    method speech (line 96) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 100) | async chat(options: ChatOptions) {
    method usage (line 268) | async usage() {
    method models (line 275) | async models(): Promise<LLMModel[]> {

FILE: app/client/platforms/xai.ts
  class XAIApi (line 25) | class XAIApi implements LLMApi {
    method path (line 28) | path(path: string): string {
    method extractMessage (line 55) | extractMessage(res: any) {
    method speech (line 59) | speech(options: SpeechOptions): Promise<ArrayBuffer> {
    method chat (line 63) | async chat(options: ChatOptions) {
    method usage (line 184) | async usage() {
    method models (line 191) | async models(): Promise<LLMModel[]> {

FILE: app/command.ts
  type Command (line 5) | type Command = (param: string) => void;
  type Commands (line 6) | interface Commands {
  function useCommand (line 14) | function useCommand(commands: Commands = {}) {
  type ChatCommands (line 35) | interface ChatCommands {
  function useChatCommand (line 48) | function useChatCommand(commands: ChatCommands = {}) {

FILE: app/components/artifacts.tsx
  type HTMLPreviewProps (line 25) | type HTMLPreviewProps = {
  type HTMLPreviewHandler (line 32) | type HTMLPreviewHandler = {
  function ArtifactsShareButton (line 109) | function ArtifactsShareButton({
  function Artifacts (line 205) | function Artifacts() {

FILE: app/components/auth.tsx
  function AuthPage (line 25) | function AuthPage() {
  function TopBanner (line 129) | function TopBanner() {

FILE: app/components/button.tsx
  type ButtonType (line 7) | type ButtonType = "primary" | "danger" | null;
  function IconButton (line 9) | function IconButton(props: {

FILE: app/components/chat-list.tsx
  function ChatItem (line 23) | function ChatItem(props: {
  function ChatList (line 105) | function ChatList(props: { narrow?: boolean }) {

FILE: app/components/chat.tsx
  function SessionConfigModel (line 165) | function SessionConfigModel(props: { onClose: () => void }) {
  function PromptToast (line 233) | function PromptToast(props: {
  function useSubmitHandler (line 263) | function useSubmitHandler() {
  type RenderPrompt (line 310) | type RenderPrompt = Pick<Prompt, "title" | "content">;
  function PromptHints (line 312) | function PromptHints(props: {
  function ClearContextDivider (line 382) | function ClearContextDivider() {
  function ChatAction (line 404) | function ChatAction(props: {
  function useScrollToBottom (line 453) | function useScrollToBottom(
  function ChatActions (line 494) | function ChatActions(props: {
  function EditMessageModal (line 850) | function EditMessageModal(props: { onClose: () => void }) {
  function DeleteImageButton (line 914) | function DeleteImageButton(props: { deleteImage: () => void }) {
  function ShortcutKeyModal (line 922) | function ShortcutKeyModal(props: { onClose: () => void }) {
  function _Chat (line 989) | function _Chat() {
  function Chat (line 2167) | function Chat() {

FILE: app/components/emoji.tsx
  function getEmojiUrl (line 25) | function getEmojiUrl(unified: string, style: EmojiStyle) {
  function AvatarPicker (line 32) | function AvatarPicker(props: {
  function Avatar (line 48) | function Avatar(props: { model?: ModelType; avatar?: string }) {
  function EmojiAvatar (line 109) | function EmojiAvatar(props: { avatar: string; size?: number }) {

FILE: app/components/error.tsx
  type IErrorBoundaryState (line 13) | interface IErrorBoundaryState {
  class ErrorBoundary (line 19) | class ErrorBoundary extends React.Component<any, IErrorBoundaryState> {
    method constructor (line 20) | constructor(props: any) {
    method componentDidCatch (line 25) | componentDidCatch(error: Error, info: React.ErrorInfo) {
    method clearAndSaveData (line 30) | clearAndSaveData() {
    method render (line 38) | render() {

FILE: app/components/exporter.tsx
  function ExportMessageModal (line 48) | function ExportMessageModal(props: { onClose: () => void }) {
  function useSteps (line 75) | function useSteps(
  function Steps (line 97) | function Steps<
  function MessageExporter (line 140) | function MessageExporter() {
  function RenderExport (line 258) | function RenderExport(props: {
  function PreviewActions (line 304) | function PreviewActions(props: {
  function ImagePreviewer (line 409) | function ImagePreviewer(props: {
  function MarkdownPreviewer (line 618) | function MarkdownPreviewer(props: {
  function JsonPreviewer (line 655) | function JsonPreviewer(props: {

FILE: app/components/home.tsx
  function Loading (line 34) | function Loading(props: { noLogo?: boolean }) {
  function useSwitchTheme (line 85) | function useSwitchTheme() {
  function useHtmlLang (line 116) | function useHtmlLang() {
  function WindowContent (line 152) | function WindowContent(props: { children: React.ReactNode }) {
  function Screen (line 160) | function Screen() {
  function useLoadData (line 223) | function useLoadData() {
  function Home (line 237) | function Home() {

FILE: app/components/input-range.tsx
  type InputRangeProps (line 5) | interface InputRangeProps {
  function InputRange (line 16) | function InputRange({

FILE: app/components/markdown.tsx
  function Mermaid (line 28) | function Mermaid(props: { code: string }) {
  function PreCode (line 74) | function PreCode(props: { children: any }) {
  function CustomCode (line 176) | function CustomCode(props: { children: any; className?: string }) {
  function escapeBrackets (line 231) | function escapeBrackets(text: string) {
  function tryWrapHtmlCode (line 249) | function tryWrapHtmlCode(text: string) {
  function _MarkDownContent (line 270) | function _MarkDownContent(props: { content: string }) {
  function Markdown (line 321) | function Markdown(

FILE: app/components/mask.tsx
  function reorder (line 61) | function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] {
  function MaskAvatar (line 68) | function MaskAvatar(props: { avatar: string; model?: ModelType }) {
  function MaskConfig (line 76) | function MaskConfig(props: {
  function ContextPromptItem (line 260) | function ContextPromptItem(props: {
  function ContextPrompts (line 324) | function ContextPrompts(props: {
  function MaskPage (line 442) | function MaskPage() {

FILE: app/components/mcp-market.tsx
  type ConfigProperty (line 36) | interface ConfigProperty {
  function McpMarketPage (line 43) | function McpMarketPage() {

FILE: app/components/message-selector.tsx
  function useShiftRange (line 13) | function useShiftRange() {
  function useMessageSelector (line 55) | function useMessageSelector() {
  function MessageSelector (line 69) | function MessageSelector(props: {

FILE: app/components/model-config.tsx
  function ModelConfigList (line 12) | function ModelConfigList(props: {

FILE: app/components/new-chat.tsx
  function MaskItem (line 21) | function MaskItem(props: { mask: Mask; onClick?: () => void }) {
  function useMaskGroup (line 35) | function useMaskGroup(masks: Mask[]) {
  function NewChat (line 77) | function NewChat() {

FILE: app/components/plugin.tsx
  function PluginPage (line 33) | function PluginPage() {

FILE: app/components/realtime-chat/realtime-chat.tsx
  type RealtimeChatProps (line 25) | interface RealtimeChatProps {
  function RealtimeChat (line 31) | function RealtimeChat({

FILE: app/components/realtime-chat/realtime-config.tsx
  function RealtimeConfigList (line 16) | function RealtimeConfigList(props: {

FILE: app/components/sd/sd-panel.tsx
  function ControlParamItem (line 132) | function ControlParamItem(props: {
  function ControlParam (line 159) | function ControlParam(props: {
  function SdPanel (line 279) | function SdPanel() {

FILE: app/components/sd/sd-sidebar.tsx
  function SideBar (line 33) | function SideBar(props: { className?: string }) {

FILE: app/components/sd/sd.tsx
  function getSdTaskStatus (line 41) | function getSdTaskStatus(item: any) {
  function Sd (line 90) | function Sd() {

FILE: app/components/search-chat.tsx
  type Item (line 13) | type Item = {
  function SearchChatPage (line 18) | function SearchChatPage() {

FILE: app/components/settings.tsx
  function EditPromptModal (line 93) | function EditPromptModal(props: { id: string; onClose: () => void }) {
  function UserPromptModal (line 142) | function UserPromptModal(props: { onClose?: () => void }) {
  function DangerItems (line 247) | function DangerItems() {
  function CheckButton (line 287) | function CheckButton() {
  function SyncConfigModal (line 328) | function SyncConfigModal(props: { onClose?: () => void }) {
  function SyncItems (line 489) | function SyncItems() {
  function Settings (line 584) | function Settings() {

FILE: app/components/sidebar.tsx
  constant DISCOVERY (line 36) | const DISCOVERY = [
  function useHotKey (line 46) | function useHotKey() {
  function useDragSideBar (line 65) | function useDragSideBar() {
  function SideBarContainer (line 139) | function SideBarContainer(props: {
  function SideBarHeader (line 172) | function SideBarHeader(props: {
  function SideBarBody (line 201) | function SideBarBody(props: {
  function SideBarTail (line 213) | function SideBarTail(props: {
  function SideBar (line 227) | function SideBar(props: { className?: string }) {

FILE: app/components/tts-config.tsx
  function TTSConfigList (line 13) | function TTSConfigList(props: {

FILE: app/components/ui-lib.tsx
  function Popover (line 29) | function Popover(props: {
  function Card (line 48) | function Card(props: { children: JSX.Element[]; className?: string }) {
  function ListItem (line 54) | function ListItem(props: {
  function List (line 90) | function List(props: { children: React.ReactNode; id?: string }) {
  function Loading (line 98) | function Loading() {
  type ModalProps (line 114) | interface ModalProps {
  function Modal (line 122) | function Modal(props: ModalProps) {
  function showModal (line 181) | function showModal(props: ModalProps) {
  type ToastProps (line 202) | type ToastProps = {
  function Toast (line 211) | function Toast(props: ToastProps) {
  function showToast (line 232) | function showToast(
  type InputProps (line 258) | type InputProps = React.HTMLProps<HTMLTextAreaElement> & {
  function Input (line 263) | function Input(props: InputProps) {
  function PasswordInput (line 272) | function PasswordInput(
  function Select (line 297) | function Select(
  function showConfirm (line 324) | function showConfirm(content: any) {
  function PromptInput (line 375) | function PromptInput(props: {
  function showPrompt (line 397) | function showPrompt(content: any, value = "", rows = 3) {
  function showImageModal (line 452) | function showImageModal(
  function Selector (line 477) | function Selector<T>(props: {
  function FullScreen (line 555) | function FullScreen(props: any) {

FILE: app/components/voice-print/voice-print.tsx
  type VoicePrintProps (line 4) | interface VoicePrintProps {
  function VoicePrint (line 9) | function VoicePrint({ frequencies, isActive }: VoicePrintProps) {

FILE: app/config/build.ts
  type BuildConfig (line 46) | type BuildConfig = ReturnType<typeof getBuildConfig>;

FILE: app/config/client.ts
  function getClientConfig (line 3) | function getClientConfig() {
  function queryMeta (line 15) | function queryMeta(key: string, defaultValue?: string): string {

FILE: app/config/server.ts
  type ProcessEnv (line 7) | interface ProcessEnv {
  constant ACCESS_CODES (line 103) | const ACCESS_CODES = (function getAccessCodes(): Set<string> {
  function getApiKey (line 116) | function getApiKey(keys?: string) {

FILE: app/constant.ts
  constant OWNER (line 1) | const OWNER = "ChatGPTNextWeb";
  constant REPO (line 2) | const REPO = "ChatGPT-Next-Web";
  constant REPO_URL (line 3) | const REPO_URL = `https://github.com/${OWNER}/${REPO}`;
  constant PLUGINS_REPO_URL (line 4) | const PLUGINS_REPO_URL = `https://github.com/${OWNER}/NextChat-Awesome-P...
  constant ISSUE_URL (line 5) | const ISSUE_URL = `https://github.com/${OWNER}/${REPO}/issues`;
  constant UPDATE_URL (line 6) | const UPDATE_URL = `${REPO_URL}#keep-updated`;
  constant RELEASE_URL (line 7) | const RELEASE_URL = `${REPO_URL}/releases`;
  constant FETCH_COMMIT_URL (line 8) | const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/...
  constant FETCH_TAG_URL (line 9) | const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tag...
  constant RUNTIME_CONFIG_DOM (line 10) | const RUNTIME_CONFIG_DOM = "danger-runtime-config";
  constant STABILITY_BASE_URL (line 12) | const STABILITY_BASE_URL = "https://api.stability.ai";
  constant OPENAI_BASE_URL (line 14) | const OPENAI_BASE_URL = "https://api.openai.com";
  constant ANTHROPIC_BASE_URL (line 15) | const ANTHROPIC_BASE_URL = "https://api.anthropic.com";
  constant GEMINI_BASE_URL (line 17) | const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
  constant BAIDU_BASE_URL (line 19) | const BAIDU_BASE_URL = "https://aip.baidubce.com";
  constant BAIDU_OATUH_URL (line 20) | const BAIDU_OATUH_URL = `${BAIDU_BASE_URL}/oauth/2.0/token`;
  constant BYTEDANCE_BASE_URL (line 22) | const BYTEDANCE_BASE_URL = "https://ark.cn-beijing.volces.com";
  constant ALIBABA_BASE_URL (line 24) | const ALIBABA_BASE_URL = "https://dashscope.aliyuncs.com/api/";
  constant TENCENT_BASE_URL (line 26) | const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
  constant MOONSHOT_BASE_URL (line 28) | const MOONSHOT_BASE_URL = "https://api.moonshot.ai";
  constant IFLYTEK_BASE_URL (line 29) | const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
  constant DEEPSEEK_BASE_URL (line 31) | const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
  constant XAI_BASE_URL (line 33) | const XAI_BASE_URL = "https://api.x.ai";
  constant CHATGLM_BASE_URL (line 35) | const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
  constant SILICONFLOW_BASE_URL (line 37) | const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
  constant AI302_BASE_URL (line 39) | const AI302_BASE_URL = "https://api.302.ai";
  constant CACHE_URL_PREFIX (line 41) | const CACHE_URL_PREFIX = "/api/cache";
  constant UPLOAD_URL (line 42) | const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
  type Path (line 44) | enum Path {
  type ApiPath (line 59) | enum ApiPath {
  type SlotID (line 80) | enum SlotID {
  type FileName (line 85) | enum FileName {
  type StoreKey (line 90) | enum StoreKey {
  constant DEFAULT_SIDEBAR_WIDTH (line 103) | const DEFAULT_SIDEBAR_WIDTH = 300;
  constant MAX_SIDEBAR_WIDTH (line 104) | const MAX_SIDEBAR_WIDTH = 500;
  constant MIN_SIDEBAR_WIDTH (line 105) | const MIN_SIDEBAR_WIDTH = 230;
  constant NARROW_SIDEBAR_WIDTH (line 106) | const NARROW_SIDEBAR_WIDTH = 100;
  constant ACCESS_CODE_PREFIX (line 108) | const ACCESS_CODE_PREFIX = "nk-";
  constant LAST_INPUT_KEY (line 110) | const LAST_INPUT_KEY = "last-input";
  constant STORAGE_KEY (line 113) | const STORAGE_KEY = "chatgpt-next-web";
  constant REQUEST_TIMEOUT_MS (line 115) | const REQUEST_TIMEOUT_MS = 60000;
  constant REQUEST_TIMEOUT_MS_FOR_THINKING (line 116) | const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
  constant EXPORT_MESSAGE_CLASS_NAME (line 118) | const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
  type ServiceProvider (line 120) | enum ServiceProvider {
  type GoogleSafetySettingsThreshold (line 141) | enum GoogleSafetySettingsThreshold {
  type ModelProvider (line 148) | enum ModelProvider {
  constant XAI (line 256) | const XAI = {
  constant AI302 (line 274) | const AI302 = {
  constant DEFAULT_INPUT_TEMPLATE (line 281) | const DEFAULT_INPUT_TEMPLATE = `{{input}}`;
  constant DEFAULT_SYSTEM_TEMPLATE (line 290) | const DEFAULT_SYSTEM_TEMPLATE = `
  constant MCP_TOOLS_TEMPLATE (line 299) | const MCP_TOOLS_TEMPLATE = `
  constant MCP_SYSTEM_TEMPLATE (line 306) | const MCP_SYSTEM_TEMPLATE = `
  constant SUMMARIZE_MODEL (line 423) | const SUMMARIZE_MODEL = "gpt-4o-mini";
  constant GEMINI_SUMMARIZE_MODEL (line 424) | const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
  constant DEEPSEEK_SUMMARIZE_MODEL (line 425) | const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
  constant DEFAULT_TTS_ENGINE (line 464) | const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
  constant DEFAULT_TTS_ENGINES (line 465) | const DEFAULT_TTS_ENGINES = ["OpenAI-TTS", "Edge-TTS"];
  constant DEFAULT_TTS_MODEL (line 466) | const DEFAULT_TTS_MODEL = "tts-1";
  constant DEFAULT_TTS_VOICE (line 467) | const DEFAULT_TTS_VOICE = "alloy";
  constant DEFAULT_TTS_MODELS (line 468) | const DEFAULT_TTS_MODELS = ["tts-1", "tts-1-hd"];
  constant DEFAULT_TTS_VOICES (line 469) | const DEFAULT_TTS_VOICES = [
  constant VISION_MODEL_REGEXES (line 478) | const VISION_MODEL_REGEXES = [
  constant EXCLUDE_VISION_MODEL_REGEXES (line 499) | const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
  constant DEFAULT_MODELS (line 746) | const DEFAULT_MODELS = [
  constant CHAT_PAGE_SIZE (line 914) | const CHAT_PAGE_SIZE = 15;
  constant MAX_RENDER_MSG_COUNT (line 915) | const MAX_RENDER_MSG_COUNT = 45;
  constant DEFAULT_GA_ID (line 930) | const DEFAULT_GA_ID = "G-89WN60ZK2E";
  constant SAAS_CHAT_URL (line 932) | const SAAS_CHAT_URL = "https://nextchat.club";
  constant SAAS_CHAT_UTM_URL (line 933) | const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";

FILE: app/global.d.ts
  type Window (line 13) | interface Window {

FILE: app/layout.tsx
  function RootLayout (line 30) | function RootLayout({

FILE: app/lib/audio.ts
  class AudioHandler (line 1) | class AudioHandler {
    method constructor (line 17) | constructor() {
    method getByteFrequencyData (line 26) | getByteFrequencyData() {
    method initialize (line 31) | async initialize() {
    method startRecording (line 35) | async startRecording(onChunk: (chunk: Uint8Array) => void) {
    method stopRecording (line 86) | stopRecording() {
    method startStreamingPlayback (line 97) | startStreamingPlayback() {
    method stopStreamingPlayback (line 102) | stopStreamingPlayback() {
    method playChunk (line 109) | playChunk(chunk: Uint8Array) {
    method _saveData (line 151) | _saveData(data: Int16Array, bytesPerSample = 16): Blob {
    method savePlayFile (line 174) | savePlayFile() {
    method saveRecordFile (line 178) | saveRecordFile(
    method close (line 193) | async close() {

FILE: app/locales/ar.ts
  method SubTitle (line 285) | SubTitle(used: any, total: any) {

FILE: app/locales/bn.ts
  method SubTitle (line 288) | SubTitle(used: any, total: any) {

FILE: app/locales/cn.ts
  method SubTitle (line 305) | SubTitle(used: any, total: any) {
  type DeepPartial (line 865) | type DeepPartial<T> = T extends object
  type LocaleType (line 871) | type LocaleType = typeof cn;
  type PartialLocaleType (line 872) | type PartialLocaleType = DeepPartial<typeof cn>;

FILE: app/locales/cs.ts
  method SubTitle (line 288) | SubTitle(used: any, total: any) {

FILE: app/locales/da.ts
  method SubTitle (line 299) | SubTitle(used: any, total: any) {

FILE: app/locales/de.ts
  method SubTitle (line 295) | SubTitle(used: any, total: any) {

FILE: app/locales/en.ts
  method SubTitle (line 309) | SubTitle(used: any, total: any) {

FILE: app/locales/es.ts
  method SubTitle (line 298) | SubTitle(used: any, total: any) {

FILE: app/locales/fr.ts
  method SubTitle (line 298) | SubTitle(used: any, total: any) {

FILE: app/locales/id.ts
  method SubTitle (line 289) | SubTitle(used: any, total: any) {

FILE: app/locales/index.ts
  constant ALL_LANGS (line 29) | const ALL_LANGS = {
  type Lang (line 52) | type Lang = keyof typeof ALL_LANGS;
  constant ALL_LANG_OPTIONS (line 56) | const ALL_LANG_OPTIONS: Record<Lang, string> = {
  constant LANG_KEY (line 79) | const LANG_KEY = "lang";
  constant DEFAULT_LANG (line 80) | const DEFAULT_LANG = "en";
  function getItem (line 90) | function getItem(key: string) {
  function setItem (line 94) | function setItem(key: string, value: string) {
  function getLanguage (line 98) | function getLanguage() {
  function getLang (line 116) | function getLang(): Lang {
  function changeLang (line 126) | function changeLang(lang: Lang) {
  function getISOLang (line 131) | function getISOLang() {
  constant DEFAULT_STT_LANG (line 141) | const DEFAULT_STT_LANG = "zh-CN";
  constant STT_LANG_MAP (line 142) | const STT_LANG_MAP: Record<Lang, string> = {
  function getSTTLang (line 165) | function getSTTLang(): string {

FILE: app/locales/it.ts
  method SubTitle (line 299) | SubTitle(used: any, total: any) {

FILE: app/locales/jp.ts
  method SubTitle (line 286) | SubTitle(used: any, total: any) {

FILE: app/locales/ko.ts
  method SubTitle (line 306) | SubTitle(used: any, total: any) {

FILE: app/locales/no.ts
  method SubTitle (line 293) | SubTitle(used: any, total: any) {

FILE: app/locales/pt.ts
  method SubTitle (line 286) | SubTitle(used: any, total: any) {

FILE: app/locales/ru.ts
  method SubTitle (line 290) | SubTitle(used: any, total: any) {

FILE: app/locales/sk.ts
  method SubTitle (line 286) | SubTitle(used: any, total: any) {

FILE: app/locales/tr.ts
  method SubTitle (line 289) | SubTitle(used: any, total: any) {

FILE: app/locales/tw.ts
  method SubTitle (line 292) | SubTitle(used: any, total: any) {
  type DeepPartial (line 549) | type DeepPartial<T> = T extends object
  type LocaleType (line 555) | type LocaleType = typeof tw;
  type PartialLocaleType (line 556) | type PartialLocaleType = DeepPartial<typeof tw>;

FILE: app/locales/vi.ts
  method SubTitle (line 287) | SubTitle(used: any, total: any) {

FILE: app/masks/build.ts
  constant BUILTIN_MASKS (line 9) | const BUILTIN_MASKS: Record<string, BuiltinMask[]> = {

FILE: app/masks/cn.ts
  constant CN_MASKS (line 3) | const CN_MASKS: BuiltinMask[] = [

FILE: app/masks/en.ts
  constant EN_MASKS (line 3) | const EN_MASKS: BuiltinMask[] = [

FILE: app/masks/index.ts
  constant BUILTIN_MASK_ID (line 6) | const BUILTIN_MASK_ID = 100000;
  constant BUILTIN_MASK_STORE (line 8) | const BUILTIN_MASK_STORE = {
  method get (line 11) | get(id?: string) {
  method add (line 15) | add(m: BuiltinMask) {
  constant BUILTIN_MASKS (line 22) | const BUILTIN_MASKS: BuiltinMask[] = [];

FILE: app/masks/tw.ts
  constant TW_MASKS (line 3) | const TW_MASKS: BuiltinMask[] = [

FILE: app/masks/typing.ts
  type BuiltinMask (line 4) | type BuiltinMask = Omit<Mask, "id" | "modelConfig"> & {

FILE: app/mcp/actions.ts
  constant CONFIG_PATH (line 22) | const CONFIG_PATH = path.join(process.cwd(), "app/mcp/mcp_config.json");
  function getClientsStatus (line 27) | async function getClientsStatus(): Promise<
  function getClientTools (line 78) | async function getClientTools(clientId: string) {
  function getAvailableClientsCount (line 83) | async function getAvailableClientsCount() {
  function getAllTools (line 90) | async function getAllTools() {
  function initializeSingleClient (line 102) | async function initializeSingleClient(
  function initializeMcpSystem (line 142) | async function initializeMcpSystem() {
  function addMcpServer (line 164) | async function addMcpServer(clientId: string, config: ServerConfig) {
  function pauseMcpServer (line 196) | async function pauseMcpServer(clientId: string) {
  function resumeMcpServer (line 232) | async function resumeMcpServer(clientId: string): Promise<void> {
  function removeMcpServer (line 286) | async function removeMcpServer(clientId: string) {
  function restartAllClients (line 311) | async function restartAllClients() {
  function executeMcpAction (line 337) | async function executeMcpAction(
  function getMcpConfigFromFile (line 355) | async function getMcpConfigFromFile(): Promise<McpConfigData> {
  function updateMcpConfig (line 366) | async function updateMcpConfig(config: McpConfigData): Promise<void> {
  function isMcpEnabled (line 377) | async function isMcpEnabled() {

FILE: app/mcp/client.ts
  function createClient (line 9) | async function createClient(
  function removeClient (line 41) | async function removeClient(client: Client) {
  function listTools (line 46) | async function listTools(client: Client): Promise<ListToolsResponse> {
  function executeRequest (line 50) | async function executeRequest(

FILE: app/mcp/logger.ts
  class MCPClientLogger (line 12) | class MCPClientLogger {
    method constructor (line 16) | constructor(
    method info (line 24) | info(message: any) {
    method success (line 28) | success(message: any) {
    method error (line 32) | error(message: any) {
    method warn (line 36) | warn(message: any) {
    method debug (line 40) | debug(message: any) {
    method formatMessage (line 49) | private formatMessage(message: any): string {
    method print (line 58) | private print(color: string, message: any) {

FILE: app/mcp/types.ts
  type McpRequestMessage (line 6) | interface McpRequestMessage {
  type McpResponseMessage (line 22) | interface McpResponseMessage {
  type McpNotifications (line 50) | interface McpNotifications {
  type ListToolsResponse (line 67) | interface ListToolsResponse {
  type McpClientData (line 76) | type McpClientData =
  type McpInitializingClient (line 81) | interface McpInitializingClient {
  type McpActiveClient (line 87) | interface McpActiveClient {
  type McpErrorClient (line 93) | interface McpErrorClient {
  type ServerStatus (line 100) | type ServerStatus =
  type ServerStatusResponse (line 107) | interface ServerStatusResponse {
  type ServerConfig (line 113) | interface ServerConfig {
  type McpConfigData (line 120) | interface McpConfigData {
  constant DEFAULT_MCP_CONFIG (line 125) | const DEFAULT_MCP_CONFIG: McpConfigData = {
  type ArgsMapping (line 129) | interface ArgsMapping {
  type PresetServer (line 140) | interface PresetServer {

FILE: app/mcp/utils.ts
  function isMcpJson (line 1) | function isMcpJson(content: string) {
  function extractMcpJson (line 5) | function extractMcpJson(content: string) {

FILE: app/page.tsx
  function App (line 7) | async function App() {

FILE: app/polyfill.ts
  type Array (line 2) | interface Array<T> {

FILE: app/store/access.ts
  constant DEFAULT_OPENAI_URL (line 33) | const DEFAULT_OPENAI_URL = isApp ? OPENAI_BASE_URL : ApiPath.OpenAI;
  constant DEFAULT_GOOGLE_URL (line 35) | const DEFAULT_GOOGLE_URL = isApp ? GEMINI_BASE_URL : ApiPath.Google;
  constant DEFAULT_ANTHROPIC_URL (line 37) | const DEFAULT_ANTHROPIC_URL = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthr...
  constant DEFAULT_BAIDU_URL (line 39) | const DEFAULT_BAIDU_URL = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
  constant DEFAULT_BYTEDANCE_URL (line 41) | const DEFAULT_BYTEDANCE_URL = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteD...
  constant DEFAULT_ALIBABA_URL (line 43) | const DEFAULT_ALIBABA_URL = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
  constant DEFAULT_TENCENT_URL (line 45) | const DEFAULT_TENCENT_URL = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
  constant DEFAULT_MOONSHOT_URL (line 47) | const DEFAULT_MOONSHOT_URL = isApp ? MOONSHOT_BASE_URL : ApiPath.Moonshot;
  constant DEFAULT_STABILITY_URL (line 49) | const DEFAULT_STABILITY_URL = isApp ? STABILITY_BASE_URL : ApiPath.Stabi...
  constant DEFAULT_IFLYTEK_URL (line 51) | const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek;
  constant DEFAULT_DEEPSEEK_URL (line 53) | const DEFAULT_DEEPSEEK_URL = isApp ? DEEPSEEK_BASE_URL : ApiPath.DeepSeek;
  constant DEFAULT_XAI_URL (line 55) | const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI;
  constant DEFAULT_CHATGLM_URL (line 57) | const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM;
  constant DEFAULT_SILICONFLOW_URL (line 59) | const DEFAULT_SILICONFLOW_URL = isApp
  constant DEFAULT_AI302_URL (line 63) | const DEFAULT_AI302_URL = isApp ? AI302_BASE_URL : ApiPath["302.AI"];
  constant DEFAULT_ACCESS_STATE (line 65) | const DEFAULT_ACCESS_STATE = {
  method enabledAccessControl (line 160) | enabledAccessControl() {
  method getVisionModels (line 165) | getVisionModels() {
  method edgeVoiceName (line 169) | edgeVoiceName() {
  method isValidOpenAI (line 175) | isValidOpenAI() {
  method isValidAzure (line 179) | isValidAzure() {
  method isValidGoogle (line 183) | isValidGoogle() {
  method isValidAnthropic (line 187) | isValidAnthropic() {
  method isValidBaidu (line 191) | isValidBaidu() {
  method isValidByteDance (line 195) | isValidByteDance() {
  method isValidAlibaba (line 199) | isValidAlibaba() {
  method isValidTencent (line 203) | isValidTencent() {
  method isValidMoonshot (line 207) | isValidMoonshot() {
  method isValidIflytek (line 210) | isValidIflytek() {
  method isValidDeepSeek (line 213) | isValidDeepSeek() {
  method isValidXAI (line 217) | isValidXAI() {
  method isValidChatGLM (line 221) | isValidChatGLM() {
  method isValidSiliconFlow (line 225) | isValidSiliconFlow() {
  method isAuthorized (line 229) | isAuthorized() {
  method fetch (line 252) | fetch() {
  method migrate (line 288) | migrate(persistedState, version) {

FILE: app/store/chat.ts
  type ChatMessageTool (line 44) | type ChatMessageTool = {
  type ChatMessage (line 57) | type ChatMessage = RequestMessage & {
  function createMessage (line 68) | function createMessage(override: Partial<ChatMessage>): ChatMessage {
  type ChatStat (line 78) | interface ChatStat {
  type ChatSession (line 84) | interface ChatSession {
  constant DEFAULT_TOPIC (line 98) | const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  constant BOT_HELLO (line 99) | const BOT_HELLO: ChatMessage = createMessage({
  function createEmptySession (line 104) | function createEmptySession(): ChatSession {
  function getSummarizeModel (line 122) | function getSummarizeModel(
  function countMessages (line 154) | function countMessages(msgs: ChatMessage[]) {
  function fillTemplateWith (line 161) | function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  function getMcpSystemPrompt (line 205) | async function getMcpSystemPrompt(): Promise<string> {
  constant DEFAULT_CHAT_STATE (line 226) | const DEFAULT_CHAT_STATE = {
  function get (line 235) | function get() {
  method forkSession (line 243) | forkSession() {
  method clearSessions (line 269) | clearSessions() {
  method selectSession (line 276) | selectSession(index: number) {
  method moveSession (line 282) | moveSession(from: number, to: number) {
  method newSession (line 307) | newSession(mask?: Mask) {
  method nextSession (line 330) | nextSession(delta: number) {
  method deleteSession (line 337) | deleteSession(index: number) {
  method currentSession (line 380) | currentSession() {
  method onNewMessage (line 394) | onNewMessage(message: ChatMessage, targetSession: ChatSession) {
  method onUserInput (line 407) | async onUserInput(
  method getMemoryPrompt (line 530) | getMemoryPrompt() {
  method getMessagesWithMemory (line 542) | async getMessagesWithMemory() {
  method updateMessage (line 642) | updateMessage(
  method resetSession (line 654) | resetSession(session: ChatSession) {
  method summarizeSession (line 661) | summarizeSession(
  method updateStat (line 799) | updateStat(message: ChatMessage, session: ChatSession) {
  method updateTargetSession (line 805) | updateTargetSession(
  method clearAllData (line 815) | async clearAllData() {
  method setLastInput (line 820) | setLastInput(lastInput: string) {
  method checkMcpJson (line 827) | checkMcpJson(message: ChatMessage) {
  method migrate (line 864) | migrate(persistedState, version) {

FILE: app/store/config.ts
  type ModelType (line 20) | type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
  type TTSModelType (line 21) | type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
  type TTSVoiceType (line 22) | type TTSVoiceType = (typeof DEFAULT_TTS_VOICES)[number];
  type TTSEngineType (line 23) | type TTSEngineType = (typeof DEFAULT_TTS_ENGINES)[number];
  type SubmitKey (line 25) | enum SubmitKey {
  type Theme (line 33) | enum Theme {
  constant DEFAULT_CONFIG (line 41) | const DEFAULT_CONFIG = {
  type ChatConfig (line 109) | type ChatConfig = typeof DEFAULT_CONFIG;
  type ModelConfig (line 111) | type ModelConfig = ChatConfig["modelConfig"];
  type TTSConfig (line 112) | type TTSConfig = ChatConfig["ttsConfig"];
  type RealtimeConfig (line 113) | type RealtimeConfig = ChatConfig["realtimeConfig"];
  function limitNumber (line 115) | function limitNumber(
  method engine (line 129) | engine(x: string) {
  method model (line 132) | model(x: string) {
  method voice (line 135) | voice(x: string) {
  method speed (line 138) | speed(x: number) {
  method model (line 144) | model(x: string) {
  method max_tokens (line 147) | max_tokens(x: number) {
  method presence_penalty (line 150) | presence_penalty(x: number) {
  method frequency_penalty (line 153) | frequency_penalty(x: number) {
  method temperature (line 156) | temperature(x: number) {
  method top_p (line 159) | top_p(x: number) {
  method reset (line 167) | reset() {
  method mergeModels (line 171) | mergeModels(newModels: LLMModel[]) {
  method allModels (line 194) | allModels() {}
  method merge (line 200) | merge(persistedState, currentState) {
  method migrate (line 214) | migrate(persistedState, version) {

FILE: app/store/mask.ts
  type Mask (line 9) | type Mask = {
  constant DEFAULT_MASK_STATE (line 25) | const DEFAULT_MASK_STATE = {
  type MaskState (line 30) | type MaskState = typeof DEFAULT_MASK_STATE & {
  constant DEFAULT_MASK_AVATAR (line 34) | const DEFAULT_MASK_AVATAR = "gpt-bot";
  method create (line 53) | create(mask?: Partial<Mask>) {
  method updateMask (line 68) | updateMask(id: string, updater: (mask: Mask) => void) {
  method delete (line 78) | delete(id: string) {
  method get (line 85) | get(id?: string) {
  method getAll (line 88) | getAll() {
  method search (line 106) | search(text: string) {
  method setLanguage (line 109) | setLanguage(language: Lang | undefined) {
  method migrate (line 119) | migrate(state, version) {

FILE: app/store/plugin.ts
  type Plugin (line 12) | type Plugin = {
  type FunctionToolItem (line 25) | type FunctionToolItem = {
  type FunctionToolServiceItem (line 34) | type FunctionToolServiceItem = {
  method add (line 43) | add(plugin: Plugin, replace = false) {
  method get (line 153) | get(id: string) {
  constant DEFAULT_PLUGIN_STATE (line 168) | const DEFAULT_PLUGIN_STATE = {
  method create (line 176) | create(plugin?: Partial<Plugin>) {
  method updatePlugin (line 191) | updatePlugin(id: string, updater: (plugin: Plugin) => void) {
  method delete (line 202) | delete(id: string) {
  method getAsTools (line 209) | getAsTools(ids: string[]) {
  method get (line 221) | get(id?: string) {
  method getAll (line 224) | getAll() {
  method onRehydrateStorage (line 233) | onRehydrateStorage(state) {

FILE: app/store/prompt.ts
  type Prompt (line 7) | interface Prompt {
  method init (line 25) | init(builtinPrompts: Prompt[], userPrompts: Prompt[]) {
  method remove (line 36) | remove(id: string) {
  method add (line 40) | add(prompt: Prompt) {
  method search (line 44) | search(text: string) {
  method add (line 58) | add(prompt: Prompt) {
  method get (line 72) | get(id: string) {
  method remove (line 82) | remove(id: string) {
  method getUserPrompts (line 102) | getUserPrompts() {
  method updatePrompt (line 110) | updatePrompt(id: string, updater: (prompt: Prompt) => void) {
  method search (line 125) | search(text: string) {
  method migrate (line 137) | migrate(state, version) {
  method onRehydrateStorage (line 149) | onRehydrateStorage(state) {

FILE: app/store/sd.ts
  constant DEFAULT_SD_STATE (line 21) | const DEFAULT_SD_STATE = {
  function get (line 45) | function get() {
  method getNextId (line 53) | getNextId() {
  method sendTask (line 58) | sendTask(data: any, okCall?: Function) {
  method stabilityRequestCall (line 65) | stabilityRequestCall(data: any) {
  method updateDraw (line 137) | updateDraw(_draw: any) {
  method setCurrentModel (line 147) | setCurrentModel(model: any) {
  method setCurrentParams (line 150) | setCurrentParams(data: any) {

FILE: app/store/sync.ts
  type WebDavConfig (line 16) | interface WebDavConfig {
  type SyncStore (line 23) | type SyncStore = GetStoreState<typeof useSyncStore>;
  constant DEFAULT_SYNC_STATE (line 25) | const DEFAULT_SYNC_STATE = {
  method cloudSync (line 49) | cloudSync() {
  method markSyncTime (line 54) | markSyncTime() {
  method export (line 58) | export() {
  method import (line 70) | async import() {
  method getClient (line 85) | getClient() {
  method sync (line 91) | async sync() {
  method check (line 122) | async check() {
  method migrate (line 131) | migrate(persistedState, version) {

FILE: app/store/update.ts
  constant ONE_MINUTE (line 14) | const ONE_MINUTE = 60 * 1000;
  function formatVersionDate (line 17) | function formatVersionDate(t: string) {
  type VersionType (line 30) | type VersionType = "date" | "tag";
  function getVersion (line 32) | async function getVersion(type: VersionType) {
  method formatVersion (line 64) | formatVersion(version: string) {
  method getLatestVersion (line 71) | async getLatestVersion(force = false) {
  method updateUsage (line 136) | async updateUsage(force = false) {

FILE: app/typing.ts
  type Updater (line 1) | type Updater<T> = (updater: (value: T) => void) => void;
  constant ROLES (line 3) | const ROLES = ["system", "user", "assistant"] as const;
  type MessageRole (line 4) | type MessageRole = (typeof ROLES)[number];
  type RequestMessage (line 6) | interface RequestMessage {
  type DalleSize (line 11) | type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";
  type DalleQuality (line 12) | type DalleQuality = "standard" | "hd";
  type DalleStyle (line 13) | type DalleStyle = "vivid" | "natural";
  type ModelSize (line 15) | type ModelSize =

FILE: app/utils.ts
  function trimTopic (line 16) | function trimTopic(topic: string) {
  function copyToClipboard (line 28) | async function copyToClipboard(text: string) {
  function downloadAs (line 53) | async function downloadAs(text: string, filename: string) {
  function readFromFile (line 96) | function readFromFile() {
  function isIOS (line 116) | function isIOS() {
  function useWindowSize (line 121) | function useWindowSize() {
  constant MOBILE_MAX_WIDTH (line 145) | const MOBILE_MAX_WIDTH = 600;
  function useMobileScreen (line 146) | function useMobileScreen() {
  function isFirefox (line 152) | function isFirefox() {
  function selectOrCopy (line 158) | function selectOrCopy(el: HTMLElement, content: string) {
  function getDomContentWidth (line 170) | function getDomContentWidth(dom: HTMLElement) {
  function getOrCreateMeasureDom (line 178) | function getOrCreateMeasureDom(id: string, init?: (dom: HTMLElement) => ...
  function autoGrowTextArea (line 197) | function autoGrowTextArea(dom: HTMLTextAreaElement) {
  function getCSSVar (line 220) | function getCSSVar(varName: string) {
  function isMacOS (line 227) | function isMacOS(): boolean {
  function getMessageTextContent (line 236) | function getMessageTextContent(message: RequestMessage) {
  function getMessageTextContentWithoutThinking (line 248) | function getMessageTextContentWithoutThinking(message: RequestMessage) {
  function getMessageImages (line 270) | function getMessageImages(message: RequestMessage): string[] {
  function isVisionModel (line 283) | function isVisionModel(model: string) {
  function isDalle3 (line 295) | function isDalle3(model: string) {
  function getTimeoutMSByModel (line 299) | function getTimeoutMSByModel(model: string) {
  function getModelSizes (line 313) | function getModelSizes(model: string): ModelSize[] {
  function supportsCustomSize (line 331) | function supportsCustomSize(model: string): boolean {
  function showPlugins (line 335) | function showPlugins(provider: ServiceProvider, model: string) {
  function fetch (line 353) | function fetch(
  function adapter (line 363) | function adapter(config: Record<string, unknown>) {
  function safeLocalStorage (line 377) | function safeLocalStorage(): {
  function getOperationId (line 437) | function getOperationId(operation: {
  function clientUpdate (line 449) | function clientUpdate() {
  function semverCompare (line 473) | function semverCompare(a: string, b: string) {

FILE: app/utils/audio.ts
  type TTSPlayer (line 1) | type TTSPlayer = {
  function createTTSPlayer (line 7) | function createTTSPlayer(): TTSPlayer {

FILE: app/utils/auth-settings-events.ts
  function trackConversationGuideToCPaymentClick (line 3) | function trackConversationGuideToCPaymentClick() {
  function trackAuthorizationPageButtonToCPaymentClick (line 7) | function trackAuthorizationPageButtonToCPaymentClick() {
  function trackAuthorizationPageBannerToCPaymentClick (line 11) | function trackAuthorizationPageBannerToCPaymentClick() {
  function trackSettingsPageGuideToCPaymentClick (line 17) | function trackSettingsPageGuideToCPaymentClick() {

FILE: app/utils/baidu.ts
  function getAccessToken (line 6) | async function getAccessToken(

FILE: app/utils/chat.ts
  function compressImage (line 15) | function compressImage(file: Blob, maxSize: number): Promise<string> {
  function preProcessImageContentBase (line 73) | async function preProcessImageContentBase(
  function preProcessImageContent (line 96) | async function preProcessImageContent(
  function preProcessImageContentForAlibabaDashScope (line 105) | async function preProcessImageContentForAlibabaDashScope(
  function cacheImageToBase64Image (line 114) | function cacheImageToBase64Image(imageUrl: string) {
  function base64Image2Blob (line 134) | function base64Image2Blob(base64Data: string, contentType: string) {
  function uploadImage (line 144) | function uploadImage(file: Blob): Promise<string> {
  function removeImage (line 167) | function removeImage(imageUrl: string) {
  function stream (line 175) | function stream(
  function streamWithThink (line 392) | function streamWithThink(

FILE: app/utils/clone.ts
  function deepClone (line 1) | function deepClone<T>(obj: T) {
  function ensure (line 5) | function ensure<T extends object>(

FILE: app/utils/cloud/index.ts
  type ProviderType (line 4) | enum ProviderType {
  type SyncClientConfig (line 14) | type SyncClientConfig = {
  type SyncClient (line 22) | type SyncClient = {
  function createSyncClient (line 28) | function createSyncClient<T extends ProviderType>(

FILE: app/utils/cloud/upstash.ts
  type UpstashConfig (line 5) | type UpstashConfig = SyncStore["upstash"];
  type UpStashClient (line 6) | type UpStashClient = ReturnType<typeof createUpstashClient>;
  function createUpstashClient (line 8) | function createUpstashClient(store: SyncStore) {

FILE: app/utils/cloud/webdav.ts
  type WebDAVConfig (line 4) | type WebDAVConfig = SyncStore["webdav"];
  type WebDavClient (line 5) | type WebDavClient = ReturnType<typeof createWebDavClient>;
  function createWebDavClient (line 7) | function createWebDavClient(store: SyncStore) {

FILE: app/utils/cloudflare.ts
  function cloudflareAIGatewayUrl (line 1) | function cloudflareAIGatewayUrl(fetchUrl: string) {

FILE: app/utils/format.ts
  function prettyObject (line 1) | function prettyObject(msg: any) {

FILE: app/utils/hmac.ts
  constant DEFAULT_STATE (line 21) | const DEFAULT_STATE = new uint32Array(8);
  constant ROUND_CONSTANTS (line 22) | const ROUND_CONSTANTS: number[] = [];
  function getFractionalBits (line 33) | function getFractionalBits(n: number) {
  function convertEndian (line 74) | function convertEndian(word: number) {
  function rightRotate (line 91) | function rightRotate(word: number, bits: number) {
  function sha256 (line 95) | function sha256(data: Uint8Array) {
  function hmac (line 188) | function hmac(key: Uint8Array, data: ArrayLike<number>) {
  function sign (line 222) | function sign(
  function hex (line 233) | function hex(bin: Uint8Array) {
  function hash (line 240) | function hash(str: string) {
  function hashWithSecret (line 244) | function hashWithSecret(str: string, secret: string) {

FILE: app/utils/hooks.ts
  function useAllModels (line 5) | function useAllModels() {

FILE: app/utils/indexedDB-storage.ts
  class IndexedDBStorage (line 7) | class IndexedDBStorage implements StateStorage {
    method getItem (line 8) | public async getItem(name: string): Promise<string | null> {
    method setItem (line 17) | public async setItem(name: string, value: string): Promise<void> {
    method removeItem (line 30) | public async removeItem(name: string): Promise<void> {
    method clear (line 38) | public async clear(): Promise<void> {

FILE: app/utils/merge.ts
  function merge (line 1) | function merge(target: any, source: any) {

FILE: app/utils/model.ts
  function getModelProvider (line 46) | function getModelProvider(modelWithProvider: string): [string, string?] {
  function collectModelTable (line 51) | function collectModelTable(
  function collectModelTableWithDefaultModel (line 138) | function collectModelTableWithDefaultModel(
  function collectModels (line 167) | function collectModels(
  function collectModelsWithDefaultModel (line 179) | function collectModelsWithDefaultModel(
  function isModelAvailableInServer (line 196) | function isModelAvailableInServer(
  function isGPT4Model (line 212) | function isGPT4Model(modelName: string): boolean {
  function isModelNotavailableInServer (line 230) | function isModelNotavailableInServer(

FILE: app/utils/ms_edge_tts.ts
  type VOLUME (line 11) | enum VOLUME {
  type RATE (line 24) | enum RATE {
  type PITCH (line 36) | enum PITCH {
  type OUTPUT_FORMAT (line 48) | enum OUTPUT_FORMAT {
  type Voice (line 88) | type Voice = {
  class ProsodyOptions (line 98) | class ProsodyOptions {
  class MsEdgeTTS (line 119) | class MsEdgeTTS {
    method _log (line 134) | private _log(...o: any[]) {
    method constructor (line 146) | public constructor(enableLogger: boolean = false) {
    method _send (line 150) | private async _send(message: any) {
    method _initClient (line 161) | private _initClient() {
    method _pushAudioData (line 226) | private _pushAudioData(audioBuffer: Buffer, requestId: string) {
    method _SSMLTemplate (line 235) | private _SSMLTemplate(input: string, options: ProsodyOptions = {}): st...
    method getVoices (line 259) | getVoices(): Promise<Voice[]> {
    method setMetadata (line 282) | async setMetadata(
    method _metadataCheck (line 313) | private _metadataCheck() {
    method close (line 323) | close() {
    method toStream (line 334) | toStream(input: string, options?: ProsodyOptions): Readable {
    method toArrayBuffer (line 339) | toArrayBuffer(input: string, options?: ProsodyOptions): Promise<ArrayB...
    method rawToStream (line 363) | rawToStream(requestSSML: string): Readable {
    method _rawSSMLRequest (line 368) | private _rawSSMLRequest(requestSSML: string): {

FILE: app/utils/object.ts
  function omit (line 1) | function omit<T extends object, U extends (keyof T)[]>(
  function pick (line 10) | function pick<T extends object, U extends (keyof T)[]>(

FILE: app/utils/store.ts
  type SecondParam (line 7) | type SecondParam<T> = T extends (
  type MakeUpdater (line 15) | type MakeUpdater<T> = {
  type SetStoreState (line 24) | type SetStoreState<T> = (
  function createPersistStore (line 29) | function createPersistStore<T extends object, M>(

FILE: app/utils/stream.ts
  type ResponseEvent (line 6) | type ResponseEvent = {
  type StreamResponse (line 15) | type StreamResponse = {
  function fetch (line 22) | function fetch(url: string, options?: RequestInit): Promise<Response> {

FILE: app/utils/sync.ts
  type NonFunctionKeys (line 12) | type NonFunctionKeys<T> = {
  type NonFunctionFields (line 15) | type NonFunctionFields<T> = Pick<T, NonFunctionKeys<T>>;
  function getNonFunctionFileds (line 17) | function getNonFunctionFileds<T extends object>(obj: T) {
  type GetStoreState (line 29) | type GetStoreState<T> = T extends { getState: () => infer U }
  type AppState (line 49) | type AppState = {
  type Merger (line 55) | type Merger<T extends keyof AppState, U = AppState[T]> = (
  type StateMerger (line 60) | type StateMerger = {
  function getLocalAppState (line 121) | function getLocalAppState() {
  function setLocalAppState (line 131) | function setLocalAppState(appState: AppState) {
  function mergeAppState (line 137) | function mergeAppState(localState: AppState, remoteState: AppState) {
  function mergeWithUpdate (line 151) | function mergeWithUpdate<T extends { lastUpdateTime?: number }>(

FILE: app/utils/tencent.ts
  function sha256 (line 4) | function sha256(message: any, secret: any, encoding?: string) {
  function getDate (line 9) | function getDate(timestamp: number) {
  function getHeader (line 17) | async function getHeader(

FILE: app/utils/token.ts
  function estimateTokenLength (line 1) | function estimateTokenLength(input: string): number {

FILE: next.config.mjs
  method webpack (line 11) | webpack(config) {

FILE: public/audio-processor.js
  class AudioRecorderProcessor (line 2) | class AudioRecorderProcessor extends AudioWorkletProcessor {
    method constructor (line 3) | constructor() {
    method sendBuffer (line 22) | sendBuffer() {
    method process (line 33) | process(inputs) {

FILE: public/serviceWorker.js
  constant CHATGPT_NEXT_WEB_CACHE (line 1) | const CHATGPT_NEXT_WEB_CACHE = "chatgpt-next-web-cache";
  constant CHATGPT_NEXT_WEB_FILE_CACHE (line 2) | const CHATGPT_NEXT_WEB_FILE_CACHE = "chatgpt-next-web-file";
  function jsonify (line 18) | function jsonify(data) {
  function upload (line 22) | async function upload(request, url) {
  function remove (line 43) | async function remove(request, url) {

FILE: scripts/fetch-prompts.mjs
  constant RAW_FILE_URL (line 4) | const RAW_FILE_URL = "https://raw.githubusercontent.com/";
  constant MIRRORF_FILE_URL (line 5) | const MIRRORF_FILE_URL = "http://raw.fgit.ml/";
  constant RAW_CN_URL (line 7) | const RAW_CN_URL = "PlexPt/awesome-chatgpt-prompts-zh/main/prompts-zh.js...
  constant CN_URL (line 8) | const CN_URL = MIRRORF_FILE_URL + RAW_CN_URL;
  constant RAW_TW_URL (line 9) | const RAW_TW_URL = "PlexPt/awesome-chatgpt-prompts-zh/main/prompts-zh-TW...
  constant TW_URL (line 10) | const TW_URL = MIRRORF_FILE_URL + RAW_TW_URL;
  constant RAW_EN_URL (line 11) | const RAW_EN_URL = "f/awesome-chatgpt-prompts/main/prompts.csv";
  constant EN_URL (line 12) | const EN_URL = MIRRORF_FILE_URL + RAW_EN_URL;
  constant FILE (line 13) | const FILE = "./public/prompts.json";
  function fetchCN (line 25) | async function fetchCN() {
  function fetchTW (line 44) | async function fetchTW() {
  function fetchEN (line 63) | async function fetchEN() {
  function main (line 84) | async function main() {

FILE: src-tauri/build.rs
  function main (line 1) | fn main() {

FILE: src-tauri/src/main.rs
  function main (line 6) | fn main() {

FILE: src-tauri/src/stream.rs
  type StreamResponse (line 15) | pub struct StreamResponse {
  type EndPayload (line 23) | pub struct EndPayload {
  type ChunkPayload (line 29) | pub struct ChunkPayload {
  function stream_fetch (line 35) | pub async fn stream_fetch(

FILE: test/sum-module.test.ts
  function sum (line 1) | function sum(a: number, b: number) {
Condensed preview — 255 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,717K chars).
[
  {
    "path": ".babelrc",
    "chars": 186,
    "preview": "{\n  \"presets\": [\n    [\n      \"next/babel\",\n      {\n        \"preset-env\": {\n          \"targets\": {\n            \"browsers\""
  },
  {
    "path": ".dockerignore",
    "chars": 1141,
    "preview": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\n\n# Runtime data\npids\n*.pid\n*.seed\n*.pid.lock\n\n# Directo"
  },
  {
    "path": ".eslintignore",
    "chars": 79,
    "preview": "public/serviceWorker.js\napp/mcp/mcp_config.json\napp/mcp/mcp_config.default.json"
  },
  {
    "path": ".eslintrc.json",
    "chars": 150,
    "preview": "{\n  \"extends\": \"next/core-web-vitals\",\n  \"plugins\": [\"prettier\", \"unused-imports\"],\n  \"rules\": {\n    \"unused-imports/no-"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/1_bug_report.yml",
    "chars": 1975,
    "preview": "name: '🐛 Bug Report'\ndescription: 'Report an bug'\ntitle: '[Bug] '\nlabels: ['bug']\nbody:\n  - type: dropdown\n    attribute"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/1_bug_report_cn.yml",
    "chars": 1604,
    "preview": "name: '🐛 反馈缺陷'\ndescription: '反馈一个问题/缺陷'\ntitle: '[Bug] '\nlabels: ['bug']\nbody:\n  - type: dropdown\n    attributes:\n      l"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/2_feature_request.yml",
    "chars": 684,
    "preview": "name: '🌠 Feature Request'\ndescription: 'Suggest an idea'\ntitle: '[Feature Request] '\nlabels: ['enhancement']\nbody:\n  - t"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/2_feature_request_cn.yml",
    "chars": 474,
    "preview": "name: '🌠 功能需求'\ndescription: '提出需求或建议'\ntitle: '[Feature Request] '\nlabels: ['enhancement']\nbody:\n  - type: textarea\n    a"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "chars": 1148,
    "preview": "#### 💻 变更类型 | Change Type\n\n<!-- For change type, change [ ] to [x]. -->\n\n- [ ] feat    <!-- 引入新功能 | Introduce new featur"
  },
  {
    "path": ".github/dependabot.yml",
    "chars": 502,
    "preview": "# To get started with Dependabot version updates, you'll need to specify which\n# package ecosystems to update and where "
  },
  {
    "path": ".github/workflows/app.yml",
    "chars": 3550,
    "preview": "name: Release App\n\non:\n  workflow_dispatch:\n  release:\n    types: [published]\n\njobs:\n  create-release:\n    permissions:\n"
  },
  {
    "path": ".github/workflows/deploy_preview.yml",
    "chars": 2731,
    "preview": "name: VercelPreviewDeployment\n\non:\n  pull_request_target:\n    types:\n      - review_requested\n\nenv:\n  VERCEL_TEAM: ${{ s"
  },
  {
    "path": ".github/workflows/docker.yml",
    "chars": 1289,
    "preview": "name: Publish Docker image\n\non:\n  workflow_dispatch:\n  release:\n    types: [published]\n\njobs:\n  push_to_registry:\n    na"
  },
  {
    "path": ".github/workflows/issue-translator.yml",
    "chars": 358,
    "preview": "name: Issue Translator\non: \n  issue_comment: \n    types: [created]\n  issues: \n    types: [opened]\n\njobs:\n  build:\n    ru"
  },
  {
    "path": ".github/workflows/remove_deploy_preview.yml",
    "chars": 982,
    "preview": "name: Removedeploypreview\n\npermissions:\n  contents: read\n  statuses: write\n  pull-requests: write\n\nenv:\n  VERCEL_TOKEN: "
  },
  {
    "path": ".github/workflows/sync.yml",
    "chars": 1563,
    "preview": "name: Upstream Sync\r\n\r\npermissions:\r\n  contents: write\r\n\r\non:\r\n  schedule:\r\n    - cron: \"0 0 * * *\" # every day\r\n  workf"
  },
  {
    "path": ".github/workflows/test.yml",
    "chars": 757,
    "preview": "name: Run Tests\n\non:\n  push:\n    branches:\n      - main\n    tags:\n      - \"!*\"\n  pull_request:\n    types:\n      - review"
  },
  {
    "path": ".gitignore",
    "chars": 504,
    "preview": "# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n/.pn"
  },
  {
    "path": ".gitpod.yml",
    "chars": 439,
    "preview": "# This configuration file was automatically generated by Gitpod.\n# Please adjust to your needs (see https://www.gitpod.i"
  },
  {
    "path": ".husky/pre-commit",
    "chars": 68,
    "preview": "#!/usr/bin/env sh\n. \"$(dirname -- \"$0\")/_/husky.sh\"\n\nnpx lint-staged"
  },
  {
    "path": ".lintstagedrc.json",
    "chars": 102,
    "preview": "{\n  \"./app/**/*.{js,ts,jsx,tsx,json,html,css,md}\": [\n    \"eslint --fix\",\n    \"prettier --write\"\n  ]\n}\n"
  },
  {
    "path": ".prettierrc.js",
    "chars": 182,
    "preview": "module.exports = {\n  printWidth: 80,\n  tabWidth: 2,\n  useTabs: false,\n  semi: true,\n  singleQuote: false,\n  trailingComm"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "chars": 5225,
    "preview": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participa"
  },
  {
    "path": "Dockerfile",
    "chars": 1673,
    "preview": "FROM node:18-alpine AS base\n\nFROM base AS deps\n\nRUN apk add --no-cache libc6-compat\n\nWORKDIR /app\n\nCOPY package.json yar"
  },
  {
    "path": "LICENSE",
    "chars": 1070,
    "preview": "MIT License\n\nCopyright (c) 2023-2025 NextChat\n\nPermission is hereby granted, free of charge, to any person obtaining a c"
  },
  {
    "path": "README.md",
    "chars": 17276,
    "preview": "<div align=\"center\">\n\n<a href='https://nextchat.club'>\n  <img src=\"https://github.com/user-attachments/assets/83bdcc07-a"
  },
  {
    "path": "README_CN.md",
    "chars": 8648,
    "preview": "<div align=\"center\">\n\n<a href='#企业版'>\n  <img src=\"./docs/images/ent.svg\" alt=\"icon\"/>\n</a>\n\n<h1 align=\"center\">NextChat<"
  },
  {
    "path": "README_JA.md",
    "chars": 8862,
    "preview": "<div align=\"center\">\n<img src=\"./docs/images/ent.svg\" alt=\"プレビュー\"/>\n\n<h1 align=\"center\">NextChat</h1>\n\nワンクリックで無料であなた専用の "
  },
  {
    "path": "README_KO.md",
    "chars": 13283,
    "preview": "<div align=\"center\">\n\n<a href='https://nextchat.club'>\n  <img src=\"https://github.com/user-attachments/assets/83bdcc07-a"
  },
  {
    "path": "app/api/302ai.ts",
    "chars": 3195,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  AI302_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Serv"
  },
  {
    "path": "app/api/[provider]/[...path]/route.ts",
    "chars": 2735,
    "preview": "import { ApiPath } from \"@/app/constant\";\nimport { NextRequest } from \"next/server\";\nimport { handle as openaiHandler } "
  },
  {
    "path": "app/api/alibaba.ts",
    "chars": 3269,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  ALIBABA_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Se"
  },
  {
    "path": "app/api/anthropic.ts",
    "chars": 4380,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  ANTHROPIC_BASE_URL,\n  Anthropic,\n  ApiPath,\n  Serv"
  },
  {
    "path": "app/api/artifacts/route.ts",
    "chars": 2083,
    "preview": "import md5 from \"spark-md5\";\nimport { NextRequest, NextResponse } from \"next/server\";\nimport { getServerSideConfig } fro"
  },
  {
    "path": "app/api/auth.ts",
    "chars": 3941,
    "preview": "import { NextRequest } from \"next/server\";\nimport { getServerSideConfig } from \"../config/server\";\nimport md5 from \"spar"
  },
  {
    "path": "app/api/azure.ts",
    "chars": 823,
    "preview": "import { ModelProvider } from \"@/app/constant\";\nimport { prettyObject } from \"@/app/utils/format\";\nimport { NextRequest,"
  },
  {
    "path": "app/api/baidu.ts",
    "chars": 3554,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  BAIDU_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Serv"
  },
  {
    "path": "app/api/bytedance.ts",
    "chars": 3165,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  BYTEDANCE_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  "
  },
  {
    "path": "app/api/common.ts",
    "chars": 5974,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\nimport { getServerSideConfig } from \"../config/server\";\nimport "
  },
  {
    "path": "app/api/config/route.ts",
    "chars": 825,
    "preview": "import { NextResponse } from \"next/server\";\n\nimport { getServerSideConfig } from \"../../config/server\";\n\nconst serverCon"
  },
  {
    "path": "app/api/deepseek.ts",
    "chars": 3207,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  DEEPSEEK_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  S"
  },
  {
    "path": "app/api/glm.ts",
    "chars": 3227,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  CHATGLM_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Se"
  },
  {
    "path": "app/api/google.ts",
    "chars": 3316,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\nimport { auth } from \"./auth\";\nimport { getServerSideConfig } f"
  },
  {
    "path": "app/api/iflytek.ts",
    "chars": 3203,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  IFLYTEK_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Se"
  },
  {
    "path": "app/api/moonshot.ts",
    "chars": 3207,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  MOONSHOT_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  S"
  },
  {
    "path": "app/api/openai.ts",
    "chars": 2096,
    "preview": "import { type OpenAIListModelResponse } from \"@/app/client/platforms/openai\";\nimport { getServerSideConfig } from \"@/app"
  },
  {
    "path": "app/api/proxy.ts",
    "chars": 2766,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\nimport { getServerSideConfig } from \"@/app/config/server\";\n\nexp"
  },
  {
    "path": "app/api/siliconflow.ts",
    "chars": 3234,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  SILICONFLOW_BASE_URL,\n  ApiPath,\n  ModelProvider,\n"
  },
  {
    "path": "app/api/stability.ts",
    "chars": 2641,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\nimport { getServerSideConfig } from \"@/app/config/server\";\nimpo"
  },
  {
    "path": "app/api/tencent/route.ts",
    "chars": 2531,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport { TENCENT_BASE_URL, ModelProvider } from \"@/app/consta"
  },
  {
    "path": "app/api/upstash/[action]/[...key]/route.ts",
    "chars": 1865,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\n\nasync function handle(\n  req: NextRequest,\n  { params }: { par"
  },
  {
    "path": "app/api/webdav/[...path]/route.ts",
    "chars": 3897,
    "preview": "import { NextRequest, NextResponse } from \"next/server\";\nimport { STORAGE_KEY, internalAllowedWebDavEndpoints } from \".."
  },
  {
    "path": "app/api/xai.ts",
    "chars": 3162,
    "preview": "import { getServerSideConfig } from \"@/app/config/server\";\nimport {\n  XAI_BASE_URL,\n  ApiPath,\n  ModelProvider,\n  Servic"
  },
  {
    "path": "app/client/api.ts",
    "chars": 10876,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport {\n  ACCESS_CODE_PREFIX,\n  ModelProvider,\n  ServiceProvider,\n}"
  },
  {
    "path": "app/client/controller.ts",
    "chars": 914,
    "preview": "// To store message streaming controller\nexport const ChatControllerPool = {\n  controllers: {} as Record<string, AbortCo"
  },
  {
    "path": "app/client/platforms/ai302.ts",
    "chars": 7931,
    "preview": "\"use client\";\n\nimport {\n  ApiPath,\n  AI302_BASE_URL,\n  DEFAULT_MODELS,\n  AI302,\n} from \"@/app/constant\";\nimport {\n  useA"
  },
  {
    "path": "app/client/platforms/alibaba.ts",
    "chars": 7592,
    "preview": "\"use client\";\nimport { ApiPath, Alibaba, ALIBABA_BASE_URL } from \"@/app/constant\";\nimport {\n  useAccessStore,\n  useAppCo"
  },
  {
    "path": "app/client/platforms/anthropic.ts",
    "chars": 12707,
    "preview": "import { Anthropic, ApiPath } from \"@/app/constant\";\nimport { ChatOptions, getHeaders, LLMApi, SpeechOptions } from \"../"
  },
  {
    "path": "app/client/platforms/baidu.ts",
    "chars": 8105,
    "preview": "\"use client\";\nimport { ApiPath, Baidu, BAIDU_BASE_URL } from \"@/app/constant\";\nimport { useAccessStore, useAppConfig, us"
  },
  {
    "path": "app/client/platforms/bytedance.ts",
    "chars": 6893,
    "preview": "\"use client\";\nimport { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from \"@/app/constant\";\nimport {\n  useAccessStore,\n  useA"
  },
  {
    "path": "app/client/platforms/deepseek.ts",
    "chars": 7582,
    "preview": "\"use client\";\n// azure and openai, using same models. so using same LLMApi.\nimport { ApiPath, DEEPSEEK_BASE_URL, DeepSee"
  },
  {
    "path": "app/client/platforms/glm.ts",
    "chars": 8134,
    "preview": "\"use client\";\nimport { ApiPath, CHATGLM_BASE_URL, ChatGLM } from \"@/app/constant\";\nimport {\n  useAccessStore,\n  useAppCo"
  },
  {
    "path": "app/client/platforms/google.ts",
    "chars": 9657,
    "preview": "import { ApiPath, Google } from \"@/app/constant\";\nimport {\n  ChatOptions,\n  getHeaders,\n  LLMApi,\n  LLMModel,\n  LLMUsage"
  },
  {
    "path": "app/client/platforms/iflytek.ts",
    "chars": 7247,
    "preview": "\"use client\";\nimport {\n  ApiPath,\n  IFLYTEK_BASE_URL,\n  Iflytek,\n  REQUEST_TIMEOUT_MS,\n} from \"@/app/constant\";\nimport {"
  },
  {
    "path": "app/client/platforms/moonshot.ts",
    "chars": 5700,
    "preview": "\"use client\";\n// azure and openai, using same models. so using same LLMApi.\nimport {\n  ApiPath,\n  MOONSHOT_BASE_URL,\n  M"
  },
  {
    "path": "app/client/platforms/openai.ts",
    "chars": 15908,
    "preview": "\"use client\";\n// azure and openai, using same models. so using same LLMApi.\nimport {\n  ApiPath,\n  OPENAI_BASE_URL,\n  DEF"
  },
  {
    "path": "app/client/platforms/siliconflow.ts",
    "chars": 8067,
    "preview": "\"use client\";\n// azure and openai, using same models. so using same LLMApi.\nimport {\n  ApiPath,\n  SILICONFLOW_BASE_URL,\n"
  },
  {
    "path": "app/client/platforms/tencent.ts",
    "chars": 7648,
    "preview": "\"use client\";\nimport { ApiPath, TENCENT_BASE_URL } from \"@/app/constant\";\nimport { useAccessStore, useAppConfig, useChat"
  },
  {
    "path": "app/client/platforms/xai.ts",
    "chars": 5539,
    "preview": "\"use client\";\n// azure and openai, using same models. so using same LLMApi.\nimport { ApiPath, XAI_BASE_URL, XAI } from \""
  },
  {
    "path": "app/command.ts",
    "chars": 1977,
    "preview": "import { useEffect } from \"react\";\nimport { useSearchParams } from \"react-router-dom\";\nimport Locale from \"./locales\";\n\n"
  },
  {
    "path": "app/components/artifacts.module.scss",
    "chars": 546,
    "preview": ".artifacts {\n  display: flex;\n  width: 100%;\n  height: 100%;\n  flex-direction: column;\n  &-header {\n    display: flex;\n "
  },
  {
    "path": "app/components/artifacts.tsx",
    "chars": 7588,
    "preview": "import {\n  useEffect,\n  useState,\n  useRef,\n  useMemo,\n  forwardRef,\n  useImperativeHandle,\n} from \"react\";\nimport { use"
  },
  {
    "path": "app/components/auth.module.scss",
    "chars": 1792,
    "preview": ".auth-page {\n  display: flex;\n  justify-content: flex-start;\n  align-items: center;\n  height: 100%;\n  width: 100%;\n  fle"
  },
  {
    "path": "app/components/auth.tsx",
    "chars": 5681,
    "preview": "import styles from \"./auth.module.scss\";\nimport { IconButton } from \"./button\";\nimport { useState, useEffect } from \"rea"
  },
  {
    "path": "app/components/button.module.scss",
    "chars": 1331,
    "preview": ".icon-button {\n  background-color: var(--white);\n  border-radius: 10px;\n  display: flex;\n  align-items: center;\n  justif"
  },
  {
    "path": "app/components/button.tsx",
    "chars": 1492,
    "preview": "import * as React from \"react\";\n\nimport styles from \"./button.module.scss\";\nimport { CSSProperties } from \"react\";\nimpor"
  },
  {
    "path": "app/components/chat-list.tsx",
    "chars": 4974,
    "preview": "import DeleteIcon from \"../icons/delete.svg\";\n\nimport styles from \"./home.module.scss\";\nimport {\n  DragDropContext,\n  Dr"
  },
  {
    "path": "app/components/chat.module.scss",
    "chars": 13373,
    "preview": "@import \"../styles/animation.scss\";\n\n.attach-images {\n  position: absolute;\n  left: 30px;\n  bottom: 32px;\n  display: fle"
  },
  {
    "path": "app/components/chat.tsx",
    "chars": 71923,
    "preview": "import { useDebouncedCallback } from \"use-debounce\";\nimport React, {\n  Fragment,\n  RefObject,\n  useCallback,\n  useEffect"
  },
  {
    "path": "app/components/emoji.tsx",
    "chars": 3804,
    "preview": "import EmojiPicker, {\n  Emoji,\n  EmojiStyle,\n  Theme as EmojiTheme,\n} from \"emoji-picker-react\";\n\nimport { ModelType } f"
  },
  {
    "path": "app/components/error.tsx",
    "chars": 2051,
    "preview": "\"use client\";\n\nimport React from \"react\";\nimport { IconButton } from \"./button\";\nimport GithubIcon from \"../icons/github"
  },
  {
    "path": "app/components/exporter.module.scss",
    "chars": 5295,
    "preview": ".message-exporter {\n  &-body {\n    margin-top: 20px;\n  }\n}\n\n.export-content {\n  white-space: break-spaces;\n  padding: 10"
  },
  {
    "path": "app/components/exporter.tsx",
    "chars": 18725,
    "preview": "/* eslint-disable @next/next/no-img-element */\nimport { ChatMessage, useAppConfig, useChatStore } from \"../store\";\nimpor"
  },
  {
    "path": "app/components/home.module.scss",
    "chars": 5933,
    "preview": "@mixin container {\n  background-color: var(--white);\n  border: var(--border-in-light);\n  border-radius: 20px;\n  box-shad"
  },
  {
    "path": "app/components/home.tsx",
    "chars": 7184,
    "preview": "\"use client\";\n\nrequire(\"../polyfill\");\n\nimport { useEffect, useState } from \"react\";\nimport styles from \"./home.module.s"
  },
  {
    "path": "app/components/input-range.module.scss",
    "chars": 255,
    "preview": ".input-range {\n  border: var(--border-in-light);\n  border-radius: 10px;\n  padding: 5px 10px 5px 10px;\n  font-size: 12px;"
  },
  {
    "path": "app/components/input-range.tsx",
    "chars": 762,
    "preview": "import * as React from \"react\";\nimport styles from \"./input-range.module.scss\";\nimport clsx from \"clsx\";\n\ninterface Inpu"
  },
  {
    "path": "app/components/markdown.tsx",
    "chars": 9739,
    "preview": "import ReactMarkdown from \"react-markdown\";\nimport \"katex/dist/katex.min.css\";\nimport RemarkMath from \"remark-math\";\nimp"
  },
  {
    "path": "app/components/mask.module.scss",
    "chars": 2128,
    "preview": "@import \"../styles/animation.scss\";\n.mask-page {\n  height: 100%;\n  display: flex;\n  flex-direction: column;\n\n  .mask-pag"
  },
  {
    "path": "app/components/mask.tsx",
    "chars": 21123,
    "preview": "import { IconButton } from \"./button\";\nimport { ErrorBoundary } from \"./error\";\n\nimport styles from \"./mask.module.scss\""
  },
  {
    "path": "app/components/mcp-market.module.scss",
    "chars": 13573,
    "preview": "@import \"../styles/animation.scss\";\n\n.mcp-market-page {\n  height: 100%;\n  display: flex;\n  flex-direction: column;\n\n  .l"
  },
  {
    "path": "app/components/mcp-market.tsx",
    "chars": 24224,
    "preview": "import { IconButton } from \"./button\";\nimport { ErrorBoundary } from \"./error\";\nimport styles from \"./mcp-market.module."
  },
  {
    "path": "app/components/message-selector.module.scss",
    "chars": 1300,
    "preview": ".message-selector {\n  .message-filter {\n    display: flex;\n\n    .search-bar {\n      max-width: unset;\n      flex-grow: 1"
  },
  {
    "path": "app/components/message-selector.tsx",
    "chars": 7075,
    "preview": "import { useEffect, useMemo, useState } from \"react\";\nimport { ChatMessage, useAppConfig, useChatStore } from \"../store\""
  },
  {
    "path": "app/components/model-config.module.scss",
    "chars": 102,
    "preview": ".select-compress-model {\n  width: 60%;\n  select {\n    max-width: 100%;\n    white-space: normal;\n  }\n}\n"
  },
  {
    "path": "app/components/model-config.tsx",
    "chars": 9018,
    "preview": "import { ServiceProvider } from \"@/app/constant\";\nimport { ModalConfigValidator, ModelConfig } from \"../store\";\n\nimport "
  },
  {
    "path": "app/components/new-chat.module.scss",
    "chars": 2429,
    "preview": "@import \"../styles/animation.scss\";\n\n.new-chat {\n  height: 100%;\n  width: 100%;\n  display: flex;\n  align-items: center;\n"
  },
  {
    "path": "app/components/new-chat.tsx",
    "chars": 5354,
    "preview": "import { useEffect, useRef, useState } from \"react\";\nimport { Path, SlotID } from \"../constant\";\nimport { IconButton } f"
  },
  {
    "path": "app/components/plugin.module.scss",
    "chars": 588,
    "preview": ".plugin-title {\n  font-weight: bolder;\n  font-size: 16px;\n  margin: 10px 0;\n}\n.plugin-content {\n  font-size: 14px;\n  fon"
  },
  {
    "path": "app/components/plugin.tsx",
    "chars": 12810,
    "preview": "import { useDebouncedCallback } from \"use-debounce\";\nimport OpenAPIClientAxios from \"openapi-client-axios\";\nimport yaml "
  },
  {
    "path": "app/components/realtime-chat/index.ts",
    "chars": 33,
    "preview": "export * from \"./realtime-chat\";\n"
  },
  {
    "path": "app/components/realtime-chat/realtime-chat.module.scss",
    "chars": 1251,
    "preview": ".realtime-chat {\n  width: 100%;\n  justify-content: center;\n  align-items: center;\n  position: relative;\n  display: flex;"
  },
  {
    "path": "app/components/realtime-chat/realtime-chat.tsx",
    "chars": 10946,
    "preview": "import VoiceIcon from \"@/app/icons/voice.svg\";\nimport VoiceOffIcon from \"@/app/icons/voice-off.svg\";\nimport PowerIcon fr"
  },
  {
    "path": "app/components/realtime-chat/realtime-config.tsx",
    "chars": 5560,
    "preview": "import { RealtimeConfig } from \"@/app/store\";\n\nimport Locale from \"@/app/locales\";\nimport { ListItem, Select, PasswordIn"
  },
  {
    "path": "app/components/sd/index.tsx",
    "chars": 50,
    "preview": "export * from \"./sd\";\nexport * from \"./sd-panel\";\n"
  },
  {
    "path": "app/components/sd/sd-panel.module.scss",
    "chars": 826,
    "preview": ".ctrl-param-item {\n  display: flex;\n  justify-content: space-between;\n  min-height: 40px;\n  padding: 10px 0;\n  animation"
  },
  {
    "path": "app/components/sd/sd-panel.tsx",
    "chars": 9445,
    "preview": "import styles from \"./sd-panel.module.scss\";\nimport React from \"react\";\nimport { Select } from \"@/app/components/ui-lib\""
  },
  {
    "path": "app/components/sd/sd-sidebar.tsx",
    "chars": 3967,
    "preview": "import { IconButton } from \"@/app/components/button\";\nimport GithubIcon from \"@/app/icons/github.svg\";\nimport SDIcon fro"
  },
  {
    "path": "app/components/sd/sd.module.scss",
    "chars": 960,
    "preview": ".sd-img-list{\n  display: flex;\n  flex-wrap: wrap;\n  justify-content: space-between;\n  .sd-img-item{\n    width: 48%;\n    "
  },
  {
    "path": "app/components/sd/sd.tsx",
    "chars": 13877,
    "preview": "import chatStyles from \"@/app/components/chat.module.scss\";\nimport styles from \"@/app/components/sd/sd.module.scss\";\nimp"
  },
  {
    "path": "app/components/search-chat.tsx",
    "chars": 5206,
    "preview": "import { useState, useEffect, useRef, useCallback } from \"react\";\nimport { ErrorBoundary } from \"./error\";\nimport styles"
  },
  {
    "path": "app/components/settings.module.scss",
    "chars": 1317,
    "preview": ".settings {\n  padding: 20px;\n  overflow: auto;\n}\n\n.avatar {\n  cursor: pointer;\n  position: relative;\n  z-index: 1;\n}\n\n.e"
  },
  {
    "path": "app/components/settings.tsx",
    "chars": 61997,
    "preview": "import { useState, useEffect, useMemo } from \"react\";\n\nimport styles from \"./settings.module.scss\";\n\nimport ResetIcon fr"
  },
  {
    "path": "app/components/sidebar.tsx",
    "chars": 11014,
    "preview": "import React, { Fragment, useEffect, useMemo, useRef, useState } from \"react\";\n\nimport styles from \"./home.module.scss\";"
  },
  {
    "path": "app/components/tts-config.tsx",
    "chars": 3870,
    "preview": "import { TTSConfig, TTSConfigValidator } from \"../store\";\n\nimport Locale from \"../locales\";\nimport { ListItem, Select } "
  },
  {
    "path": "app/components/tts.module.scss",
    "chars": 2421,
    "preview": "@import \"../styles/animation.scss\";\n.plugin-page {\n  height: 100%;\n  display: flex;\n  flex-direction: column;\n\n  .plugin"
  },
  {
    "path": "app/components/ui-lib.module.scss",
    "chars": 5892,
    "preview": "@import \"../styles/animation.scss\";\n\n.card {\n  background-color: var(--white);\n  border-radius: 10px;\n  box-shadow: var("
  },
  {
    "path": "app/components/ui-lib.tsx",
    "chars": 14272,
    "preview": "/* eslint-disable @next/next/no-img-element */\nimport styles from \"./ui-lib.module.scss\";\nimport LoadingIcon from \"../ic"
  },
  {
    "path": "app/components/voice-print/index.ts",
    "chars": 31,
    "preview": "export * from \"./voice-print\";\n"
  },
  {
    "path": "app/components/voice-print/voice-print.module.scss",
    "chars": 156,
    "preview": ".voice-print {\n  width: 100%;\n  height: 60px;\n  margin: 20px 0;\n\n  canvas {\n    width: 100%;\n    height: 100%;\n    filte"
  },
  {
    "path": "app/components/voice-print/voice-print.tsx",
    "chars": 4611,
    "preview": "import { useEffect, useRef, useCallback } from \"react\";\nimport styles from \"./voice-print.module.scss\";\n\ninterface Voice"
  },
  {
    "path": "app/config/build.ts",
    "chars": 1278,
    "preview": "import tauriConfig from \"../../src-tauri/tauri.conf.json\";\nimport { DEFAULT_INPUT_TEMPLATE } from \"../constant\";\n\nexport"
  },
  {
    "path": "app/config/client.ts",
    "chars": 615,
    "preview": "import { BuildConfig, getBuildConfig } from \"./build\";\n\nexport function getClientConfig() {\n  if (typeof document !== \"u"
  },
  {
    "path": "app/config/server.ts",
    "chars": 8197,
    "preview": "import md5 from \"spark-md5\";\nimport { DEFAULT_MODELS, DEFAULT_GA_ID } from \"../constant\";\nimport { isGPT4Model } from \"."
  },
  {
    "path": "app/constant.ts",
    "chars": 23061,
    "preview": "export const OWNER = \"ChatGPTNextWeb\";\nexport const REPO = \"ChatGPT-Next-Web\";\nexport const REPO_URL = `https://github.c"
  },
  {
    "path": "app/global.d.ts",
    "chars": 1179,
    "preview": "declare module \"*.jpg\";\ndeclare module \"*.png\";\ndeclare module \"*.woff2\";\ndeclare module \"*.woff\";\ndeclare module \"*.ttf"
  },
  {
    "path": "app/layout.tsx",
    "chars": 1929,
    "preview": "/* eslint-disable @next/next/no-page-custom-font */\nimport \"./styles/globals.scss\";\nimport \"./styles/markdown.scss\";\nimp"
  },
  {
    "path": "app/lib/audio.ts",
    "chars": 6833,
    "preview": "export class AudioHandler {\n  private context: AudioContext;\n  private mergeNode: ChannelMergerNode;\n  private analyserD"
  },
  {
    "path": "app/locales/ar.ts",
    "chars": 17213,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/bn.ts",
    "chars": 18946,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/cn.ts",
    "chars": 19233,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { SubmitKey } from \"../store/config\";\nimport { SAAS_CHAT_UTM_"
  },
  {
    "path": "app/locales/cs.ts",
    "chars": 18620,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/da.ts",
    "chars": 23181,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { SubmitKey } from \"../store/config\";\nimport { SAAS_CHAT_UTM_"
  },
  {
    "path": "app/locales/de.ts",
    "chars": 20592,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/en.ts",
    "chars": 24459,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { SubmitKey } from \"../store/config\";\nimport { LocaleType } f"
  },
  {
    "path": "app/locales/es.ts",
    "chars": 20235,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/fr.ts",
    "chars": 20603,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/id.ts",
    "chars": 18893,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/index.ts",
    "chars": 3246,
    "preview": "import cn from \"./cn\";\nimport en from \"./en\";\nimport pt from \"./pt\";\nimport tw from \"./tw\";\nimport da from \"./da\";\nimpor"
  },
  {
    "path": "app/locales/it.ts",
    "chars": 20292,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/jp.ts",
    "chars": 14450,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/ko.ts",
    "chars": 20710,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/no.ts",
    "chars": 18986,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/pt.ts",
    "chars": 16542,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport { PartialLocaleType } from \"../locales/index\";\nimport { getClientCon"
  },
  {
    "path": "app/locales/ru.ts",
    "chars": 19232,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport { PartialLocaleType } from \"../locales/index\";\nimport { getClientCon"
  },
  {
    "path": "app/locales/sk.ts",
    "chars": 16669,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { SubmitKey } from \"../store/config\";\nimport type { PartialLo"
  },
  {
    "path": "app/locales/tr.ts",
    "chars": 19051,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/locales/tw.ts",
    "chars": 13188,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { SubmitKey } from \"../store/config\";\nimport { SAAS_CHAT_UTM_"
  },
  {
    "path": "app/locales/vi.ts",
    "chars": 18738,
    "preview": "import { SubmitKey } from \"../store/config\";\nimport type { PartialLocaleType } from \"./index\";\nimport { getClientConfig "
  },
  {
    "path": "app/masks/build.ts",
    "chars": 551,
    "preview": "import fs from \"fs\";\nimport path from \"path\";\nimport { CN_MASKS } from \"./cn\";\nimport { TW_MASKS } from \"./tw\";\nimport {"
  },
  {
    "path": "app/masks/cn.ts",
    "chars": 13390,
    "preview": "import { BuiltinMask } from \"./typing\";\n\nexport const CN_MASKS: BuiltinMask[] = [\n  {\n    avatar: \"1f5bc-fe0f\",\n    name"
  },
  {
    "path": "app/masks/en.ts",
    "chars": 12831,
    "preview": "import { BuiltinMask } from \"./typing\";\n\nexport const EN_MASKS: BuiltinMask[] = [\n  {\n    avatar: \"1f47e\",\n    name: \"Gi"
  },
  {
    "path": "app/masks/index.ts",
    "chars": 1039,
    "preview": "import { Mask } from \"../store/mask\";\n\nimport { type BuiltinMask } from \"./typing\";\nexport { type BuiltinMask } from \"./"
  },
  {
    "path": "app/masks/tw.ts",
    "chars": 13386,
    "preview": "import { BuiltinMask } from \"./typing\";\n\nexport const TW_MASKS: BuiltinMask[] = [\n  {\n    avatar: \"1f5bc-fe0f\",\n    name"
  },
  {
    "path": "app/masks/typing.ts",
    "chars": 207,
    "preview": "import { ModelConfig } from \"../store\";\nimport { type Mask } from \"../store/mask\";\n\nexport type BuiltinMask = Omit<Mask,"
  },
  {
    "path": "app/mcp/actions.ts",
    "chars": 9878,
    "preview": "\"use server\";\nimport {\n  createClient,\n  executeRequest,\n  listTools,\n  removeClient,\n} from \"./client\";\nimport { MCPCli"
  },
  {
    "path": "app/mcp/client.ts",
    "chars": 1366,
    "preview": "import { Client } from \"@modelcontextprotocol/sdk/client/index.js\";\nimport { StdioClientTransport } from \"@modelcontextp"
  },
  {
    "path": "app/mcp/logger.ts",
    "chars": 1414,
    "preview": "// ANSI color codes for terminal output\nconst colors = {\n  reset: \"\\x1b[0m\",\n  bright: \"\\x1b[1m\",\n  dim: \"\\x1b[2m\",\n  gr"
  },
  {
    "path": "app/mcp/mcp_config.default.json",
    "chars": 23,
    "preview": "{\n  \"mcpServers\": {}\n}\n"
  },
  {
    "path": "app/mcp/types.ts",
    "chars": 3390,
    "preview": "// ref: https://spec.modelcontextprotocol.io/specification/basic/messages/\n\nimport { z } from \"zod\";\nimport { Client } f"
  },
  {
    "path": "app/mcp/utils.ts",
    "chars": 349,
    "preview": "export function isMcpJson(content: string) {\n  return content.match(/```json:mcp:([^{\\s]+)([\\s\\S]*?)```/);\n}\n\nexport fun"
  },
  {
    "path": "app/page.tsx",
    "chars": 373,
    "preview": "import { Analytics } from \"@vercel/analytics/react\";\nimport { Home } from \"./components/home\";\nimport { getServerSideCon"
  },
  {
    "path": "app/polyfill.ts",
    "chars": 623,
    "preview": "declare global {\n  interface Array<T> {\n    at(index: number): T | undefined;\n  }\n}\n\nif (!Array.prototype.at) {\n  Array."
  },
  {
    "path": "app/store/access.ts",
    "chars": 7291,
    "preview": "import {\n  GoogleSafetySettingsThreshold,\n  ServiceProvider,\n  StoreKey,\n  ApiPath,\n  OPENAI_BASE_URL,\n  ANTHROPIC_BASE_"
  },
  {
    "path": "app/store/chat.ts",
    "chars": 28130,
    "preview": "import {\n  getMessageTextContent,\n  isDalle3,\n  safeLocalStorage,\n  trimTopic,\n} from \"../utils\";\n\nimport { indexedDBSto"
  },
  {
    "path": "app/store/config.ts",
    "chars": 6563,
    "preview": "import { LLMModel } from \"../client/api\";\nimport { DalleQuality, DalleStyle, ModelSize } from \"../typing\";\nimport { getC"
  },
  {
    "path": "app/store/index.ts",
    "chars": 128,
    "preview": "export * from \"./chat\";\nexport * from \"./update\";\nexport * from \"./access\";\nexport * from \"./config\";\nexport * from \"./p"
  },
  {
    "path": "app/store/mask.ts",
    "chars": 3420,
    "preview": "import { BUILTIN_MASKS } from \"../masks\";\nimport { getLang, Lang } from \"../locales\";\nimport { DEFAULT_TOPIC, ChatMessag"
  },
  {
    "path": "app/store/plugin.ts",
    "chars": 7837,
    "preview": "import OpenAPIClientAxios from \"openapi-client-axios\";\nimport { StoreKey } from \"../constant\";\nimport { nanoid } from \"n"
  },
  {
    "path": "app/store/prompt.ts",
    "chars": 4720,
    "preview": "import Fuse from \"fuse.js\";\nimport { nanoid } from \"nanoid\";\nimport { StoreKey } from \"../constant\";\nimport { getLang } "
  },
  {
    "path": "app/store/sd.ts",
    "chars": 4677,
    "preview": "import {\n  Stability,\n  StoreKey,\n  ACCESS_CODE_PREFIX,\n  ApiPath,\n} from \"@/app/constant\";\nimport { getBearerToken } fr"
  },
  {
    "path": "app/store/sync.ts",
    "chars": 3834,
    "preview": "import { getClientConfig } from \"../config/client\";\nimport { ApiPath, STORAGE_KEY, StoreKey } from \"../constant\";\nimport"
  },
  {
    "path": "app/store/update.ts",
    "chars": 4801,
    "preview": "import {\n  FETCH_COMMIT_URL,\n  FETCH_TAG_URL,\n  ModelProvider,\n  StoreKey,\n} from \"../constant\";\nimport { getClientConfi"
  },
  {
    "path": "app/styles/animation.scss",
    "chars": 303,
    "preview": "@keyframes slide-in {\n  from {\n    opacity: 0;\n    transform: translateY(20px);\n  }\n\n  to {\n    opacity: 1;\n    transfor"
  },
  {
    "path": "app/styles/globals.scss",
    "chars": 6993,
    "preview": "@import \"./animation.scss\";\n@import \"./window.scss\";\n\n@mixin light {\n  --theme: light;\n\n  /* color */\n  --white: white;\n"
  },
  {
    "path": "app/styles/highlight.scss",
    "chars": 1720,
    "preview": ".markdown-body {\n  pre {\n    padding: 0;\n  }\n\n  pre,\n  code {\n    font-family: Consolas, Monaco, \"Andale Mono\", \"Ubuntu "
  },
  {
    "path": "app/styles/markdown.scss",
    "chars": 25937,
    "preview": "@mixin light {\n  color-scheme: light;\n  --color-prettylights-syntax-comment: #6e7781;\n  --color-prettylights-syntax-cons"
  },
  {
    "path": "app/styles/window.scss",
    "chars": 628,
    "preview": ".window-header {\n  padding: 14px 20px;\n  border-bottom: rgba(0, 0, 0, 0.1) 1px solid;\n  position: relative;\n\n  display: "
  },
  {
    "path": "app/typing.ts",
    "chars": 577,
    "preview": "export type Updater<T> = (updater: (value: T) => void) => void;\n\nexport const ROLES = [\"system\", \"user\", \"assistant\"] as"
  },
  {
    "path": "app/utils/audio.ts",
    "chars": 1314,
    "preview": "type TTSPlayer = {\n  init: () => void;\n  play: (audioBuffer: ArrayBuffer, onended: () => void | null) => Promise<void>;\n"
  },
  {
    "path": "app/utils/auth-settings-events.ts",
    "chars": 629,
    "preview": "import { sendGAEvent } from \"@next/third-parties/google\";\n\nexport function trackConversationGuideToCPaymentClick() {\n  s"
  },
  {
    "path": "app/utils/baidu.ts",
    "chars": 521,
    "preview": "import { BAIDU_OATUH_URL } from \"../constant\";\n/**\n * 使用 AK,SK 生成鉴权签名(Access Token)\n * @return 鉴权签名信息\n */\nexport async f"
  },
  {
    "path": "app/utils/chat.ts",
    "chars": 19225,
    "preview": "import {\n  CACHE_URL_PREFIX,\n  UPLOAD_URL,\n  REQUEST_TIMEOUT_MS,\n} from \"@/app/constant\";\nimport { MultimodalContent, Re"
  },
  {
    "path": "app/utils/clone.ts",
    "chars": 271,
    "preview": "export function deepClone<T>(obj: T) {\n  return JSON.parse(JSON.stringify(obj));\n}\n\nexport function ensure<T extends obj"
  },
  {
    "path": "app/utils/cloud/index.ts",
    "chars": 787,
    "preview": "import { createWebDavClient } from \"./webdav\";\nimport { createUpstashClient } from \"./upstash\";\n\nexport enum ProviderTyp"
  },
  {
    "path": "app/utils/cloud/upstash.ts",
    "chars": 3210,
    "preview": "import { STORAGE_KEY } from \"@/app/constant\";\nimport { SyncStore } from \"@/app/store/sync\";\nimport { chunks } from \"../f"
  },
  {
    "path": "app/utils/cloud/webdav.ts",
    "chars": 2574,
    "preview": "import { STORAGE_KEY } from \"@/app/constant\";\nimport { SyncStore } from \"@/app/store/sync\";\n\nexport type WebDAVConfig = "
  },
  {
    "path": "app/utils/cloudflare.ts",
    "chars": 1240,
    "preview": "export function cloudflareAIGatewayUrl(fetchUrl: string) {\n  // rebuild fetchUrl, if using cloudflare ai gateway\n  // do"
  },
  {
    "path": "app/utils/format.ts",
    "chars": 843,
    "preview": "export function prettyObject(msg: any) {\n  const obj = msg;\n  if (typeof msg !== \"string\") {\n    msg = JSON.stringify(ms"
  },
  {
    "path": "app/utils/hmac.ts",
    "chars": 7661,
    "preview": "// From https://gist.github.com/guillermodlpa/f6d955f838e9b10d1ef95b8e259b2c58\n// From https://gist.github.com/stevendes"
  },
  {
    "path": "app/utils/hooks.ts",
    "chars": 616,
    "preview": "import { useMemo } from \"react\";\nimport { useAccessStore, useAppConfig } from \"../store\";\nimport { collectModelsWithDefa"
  },
  {
    "path": "app/utils/indexedDB-storage.ts",
    "chars": 1161,
    "preview": "import { StateStorage } from \"zustand/middleware\";\nimport { get, set, del, clear } from \"idb-keyval\";\nimport { safeLocal"
  },
  {
    "path": "app/utils/merge.ts",
    "chars": 419,
    "preview": "export function merge(target: any, source: any) {\n  Object.keys(source).forEach(function (key) {\n    if (\n      source.h"
  },
  {
    "path": "app/utils/model.ts",
    "chars": 7806,
    "preview": "import { DEFAULT_MODELS, ServiceProvider } from \"../constant\";\nimport { LLMModel } from \"../client/api\";\n\nconst CustomSe"
  },
  {
    "path": "app/utils/ms_edge_tts.ts",
    "chars": 15323,
    "preview": "// import axios from \"axios\";\nimport { Buffer } from \"buffer\";\nimport { randomBytes } from \"crypto\";\nimport { Readable }"
  },
  {
    "path": "app/utils/object.ts",
    "chars": 397,
    "preview": "export function omit<T extends object, U extends (keyof T)[]>(\n  obj: T,\n  ...keys: U\n): Omit<T, U[number]> {\n  const re"
  },
  {
    "path": "app/utils/store.ts",
    "chars": 2050,
    "preview": "import { create } from \"zustand\";\nimport { combine, persist, createJSONStorage } from \"zustand/middleware\";\nimport { Upd"
  },
  {
    "path": "app/utils/stream.ts",
    "chars": 3078,
    "preview": "// using tauri command to send request\n// see src-tauri/src/stream.rs, and src-tauri/src/main.rs\n// 1. invoke('stream_fe"
  },
  {
    "path": "app/utils/sync.ts",
    "chars": 4781,
    "preview": "import {\n  ChatSession,\n  useAccessStore,\n  useAppConfig,\n  useChatStore,\n} from \"../store\";\nimport { useMaskStore } fro"
  },
  {
    "path": "app/utils/tencent.ts",
    "chars": 2711,
    "preview": "import { sign, hash as getHash, hex } from \"./hmac\";\n\n// 使用 SHA-256 和 secret 进行 HMAC 加密\nfunction sha256(message: any, se"
  }
]

// ... and 55 more files (download for full content)

About this extraction

This page contains the full source code of the ChatGPTNextWeb/NextChat GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 255 files (1.5 MB), approximately 451.5k tokens, and a symbol index with 870 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!