[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2.1\nsetup: true\norbs:\n  path-filtering: circleci/path-filtering@1.3.0\n\nworkflows:\n  version: 2.1\n  generate-config:\n    jobs:\n      - path-filtering/filter:\n          filters:\n            tags:\n              only:\n                - /.*/\n          base-revision: main\n          config-path: .circleci/continue_config.yml\n          mapping: |\n            .circleci/.* run-all-workflows true\n            gpt4all-backend/.* run-all-workflows true\n            gpt4all-bindings/python/.* run-python-workflow true\n            gpt4all-bindings/typescript/.* run-ts-workflow true\n            gpt4all-chat/.* run-chat-workflow true\n"
  },
  {
    "path": ".circleci/continue_config.yml",
    "content": "version: 2.1\norbs:\n  win: circleci/windows@5.0\n  python: circleci/python@1.2\n  node: circleci/node@5.1\n\nparameters:\n  run-all-workflows:\n    type: boolean\n    default: false\n  run-python-workflow:\n    type: boolean\n    default: false\n  run-chat-workflow:\n    type: boolean\n    default: false\n  run-ts-workflow:\n    type: boolean\n    default: false\n\njob-macos-executor: &job-macos-executor\n  macos:\n    xcode: 16.2.0\n  resource_class: macos.m1.medium.gen1\n  environment:\n    HOMEBREW_NO_AUTO_UPDATE: 1\n\njob-macos-install-deps: &job-macos-install-deps\n  name: Install basic macOS build dependencies\n  command: brew install ccache llvm wget\n\njob-linux-install-chat-deps: &job-linux-install-chat-deps\n  name: Install Linux build dependencies for gpt4all-chat\n  command: |\n    # Prevent apt-get from interactively prompting for service restart\n    echo \"\\$nrconf{restart} = 'l'\" | sudo tee /etc/needrestart/conf.d/90-autorestart.conf >/dev/null\n    wget -qO- 'https://apt.llvm.org/llvm-snapshot.gpg.key' | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc >/dev/null\n    sudo add-apt-repository -yn 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-19 main'\n    wget -qO- \"https://packages.lunarg.com/lunarg-signing-key-pub.asc\" \\\n      | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc >/dev/null\n    wget -qO- \"https://packages.lunarg.com/vulkan/1.3.290/lunarg-vulkan-1.3.290-jammy.list\" \\\n      | sudo tee /etc/apt/sources.list.d/lunarg-vulkan-1.3.290-jammy.list >/dev/null\n    wget \"https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb\"\n    sudo dpkg -i cuda-keyring_1.1-1_all.deb\n    packages=(\n      bison build-essential ccache clang-19 clang-tools-19 cuda-compiler-11-8 flex gperf libcublas-dev-11-8\n      libfontconfig1 libfreetype6 libgl1-mesa-dev libmysqlclient21 libnvidia-compute-550-server libodbc2 libpq5\n      libstdc++-12-dev libwayland-dev libx11-6 libx11-xcb1 libxcb-cursor0 libxcb-glx0 libxcb-icccm4 libxcb-image0\n      libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-shape0 libxcb-shm0 libxcb-sync1 libxcb-util1\n      libxcb-xfixes0 libxcb-xinerama0 libxcb-xkb1 libxcb1 libxext6 libxfixes3 libxi6 libxkbcommon-dev libxkbcommon-x11-0\n      libxrender1 patchelf python3 vulkan-sdk python3 vulkan-sdk\n    )\n    sudo apt-get update\n    sudo apt-get install -y \"${packages[@]}\"\n    wget \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-linux-x64-4.8.1.run\"\n    chmod +x qt-online-installer-linux-x64-4.8.1.run\n    ./qt-online-installer-linux-x64-4.8.1.run --no-force-installations --no-default-installations \\\n      --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations \\\n      --email \"$QT_EMAIL\" --password \"$QT_PASSWORD\" install \\\n      qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.linux_gcc_64 qt.qt6.682.addons.qt5compat \\\n      qt.qt6.682.debug_info extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n\njob-linux-install-backend-deps: &job-linux-install-backend-deps\n  name: Install Linux build dependencies for gpt4all-backend\n  command: |\n    wget -qO- 'https://apt.llvm.org/llvm-snapshot.gpg.key' | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc >/dev/null\n    sudo add-apt-repository -yn 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-19 main'\n    wget -qO- \"https://packages.lunarg.com/lunarg-signing-key-pub.asc\" \\\n      | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc >/dev/null\n    wget -qO- \"https://packages.lunarg.com/vulkan/1.3.290/lunarg-vulkan-1.3.290-jammy.list\" \\\n      | sudo tee /etc/apt/sources.list.d/lunarg-vulkan-1.3.290-jammy.list >/dev/null\n    wget \"https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb\"\n    sudo dpkg -i cuda-keyring_1.1-1_all.deb\n    packages=(\n      build-essential ccache clang-19 clang-tools-19 cuda-compiler-11-8 libcublas-dev-11-8\n      libnvidia-compute-550-server libstdc++-12-dev vulkan-sdk\n    )\n    sudo apt-get update\n    sudo apt-get install -y \"${packages[@]}\"\n    pyenv global 3.13.2\n    pip install setuptools wheel cmake ninja\n\njobs:\n  # work around CircleCI-Public/path-filtering-orb#20\n  noop:\n    docker:\n      - image: cimg/base:stable\n    steps:\n      - run: \"true\"\n  validate-commit-on-main:\n    docker:\n      - image: cimg/base:stable\n    steps:\n      - checkout\n      - run:\n          name: Verify that commit is on the main branch\n          command: git merge-base --is-ancestor HEAD main\n  build-offline-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-macos-\n      - run:\n          <<: *job-macos-install-deps\n      - run:\n          name: Install Rosetta\n          command: softwareupdate --install-rosetta --agree-to-license  # needed for QtIFW\n      - run:\n          name: Installing Qt\n          command: |\n            wget \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-macOS-x64-4.8.1.dmg\"\n            hdiutil attach qt-online-installer-macOS-x64-4.8.1.dmg\n            /Volumes/qt-online-installer-macOS-x64-4.8.1/qt-online-installer-macOS-x64-4.8.1.app/Contents/MacOS/qt-online-installer-macOS-x64-4.8.1 \\\n              --no-force-installations --no-default-installations --no-size-checking --default-answer \\\n              --accept-licenses --confirm-command --accept-obligations --email \"$QT_EMAIL\" --password \"$QT_PASSWORD\" \\\n              install \\\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.clang_64 qt.qt6.682.addons.qt5compat \\\n              extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n            hdiutil detach /Volumes/qt-online-installer-macOS-x64-4.8.1\n      - run:\n          name: Setup Keychain\n          command: |\n            echo $MAC_SIGNING_CERT | base64 --decode > cert.p12\n            security create-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security default-keychain -s sign.keychain\n            security unlock-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security import cert.p12 -k sign.keychain -P \"$MAC_SIGNING_CERT_PWD\" -T /usr/bin/codesign\n            security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k \"$MAC_KEYCHAIN_KEY\" sign.keychain\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            export PATH=$PATH:$HOME/Qt/Tools/QtInstallerFramework/4.8/bin\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake \\\n              -S ../gpt4all-chat -B . -G Ninja \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_PREFIX_PATH:PATH=~/Qt/6.8.2/macos/lib/cmake \\\n              -DCMAKE_MAKE_PROGRAM:FILEPATH=~/Qt/Tools/Ninja/ninja \\\n              -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \\\n              -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \\\n              -DCMAKE_RANLIB=/usr/bin/ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DBUILD_UNIVERSAL=ON \\\n              -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \\\n              -DGGML_METAL_MACOSX_VERSION_MIN=12.6 \\\n              -DMACDEPLOYQT=~/Qt/6.8.2/macos/bin/macdeployqt \\\n              -DGPT4ALL_OFFLINE_INSTALLER=ON \\\n              -DGPT4ALL_SIGN_INSTALL=ON \\\n              -DGPT4ALL_GEN_CPACK_CONFIG=ON\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target package\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake . -DGPT4ALL_GEN_CPACK_CONFIG=OFF\n            # The 'install' step here *should* be completely unnecessary. There is absolutely no reason we should have\n            # to copy all of the build artifacts to an output directory that we do not use (because we package GPT4All\n            # as an installer instead).\n            # However, because of the way signing is implemented in the cmake script, the *source* files are signed at\n            # install time instead of the *installed* files. This side effect is the *only* way libraries that are not\n            # processed by macdeployqt, such as libllmodel.so, get signed (at least, with -DBUILD_UNIVERSAL=ON).\n            # Also, we have to run this as a *separate* step. Telling cmake to run both targets in one command causes it\n            # to execute them in parallel, since it is not aware of the dependency of the package target on the install\n            # target.\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target install\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target package\n            ccache -s\n            mkdir upload\n            cp gpt4all-installer-* upload\n      # persist the unsigned installer\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-macos-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-offline-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      # attach to a workspace containing unsigned dmg\n      - attach_workspace:\n          at: build\n      - run:\n          name: \"Setup Keychain\"\n          command: |\n            echo $MAC_SIGNING_CERT | base64 --decode > cert.p12\n            security create-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security default-keychain -s sign.keychain\n            security unlock-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security import cert.p12 -k sign.keychain -P \"$MAC_SIGNING_CERT_PWD\" -T /usr/bin/codesign\n            security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            rm cert.p12\n      - run:\n          name: \"Sign App Bundle\"\n          command: |\n            python3 -m pip install click\n            python3 gpt4all-chat/cmake/sign_dmg.py --input-dmg build/upload/gpt4all-installer-darwin.dmg --output-dmg build/upload/gpt4all-installer-darwin-signed.dmg --signing-identity \"$MAC_SIGNING_CERT_NAME\"\n      - run:\n          name: \"Sign DMG\"\n          command: |\n            codesign --options runtime --timestamp -s \"$MAC_SIGNING_CERT_NAME\" build/upload/gpt4all-installer-darwin-signed.dmg\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  notarize-offline-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: \"Notarize\"\n          command: |\n            xcrun notarytool submit build/upload/gpt4all-installer-darwin-signed.dmg --apple-id \"$MAC_NOTARIZATION_ID\" --team-id \"$MAC_NOTARIZATION_TID\" --password \"$MAC_NOTARIZATION_KEY\" --wait  | tee notarize_log.txt\n      - run:\n          name: \"Report Notarization Failure\"\n          command: |\n            NID=`python3 .circleci/grab_notary_id.py notarize_log.txt` && export NID\n            xcrun notarytool log $NID --keychain-profile \"notary-profile\"\n            exit 1\n          when: on_fail\n      - run:\n          name: \"Staple\"\n          command: |\n            xcrun stapler staple build/upload/gpt4all-installer-darwin-signed.dmg\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Install Rosetta\n          command: softwareupdate --install-rosetta --agree-to-license  # needed for QtIFW\n      - run:\n          name: Test installation and verify that it is signed\n          command: |\n            set -e\n            hdiutil attach build/upload/gpt4all-installer-darwin-signed.dmg\n            codesign --verify --deep --verbose /Volumes/gpt4all-installer-darwin/gpt4all-installer-darwin.app\n            /Volumes/gpt4all-installer-darwin/gpt4all-installer-darwin.app/Contents/MacOS/gpt4all-installer-darwin \\\n              --no-size-checking --default-answer --accept-licenses --confirm-command \\\n              install gpt4all\n            codesign --verify --deep --verbose /Applications/gpt4all/bin/gpt4all.app\n            codesign --verify --deep --verbose /Applications/gpt4all/maintenancetool.app\n            hdiutil detach /Volumes/gpt4all-installer-darwin\n\n  build-online-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-macos-\n      - run:\n          <<: *job-macos-install-deps\n      - run:\n          name: Install Rosetta\n          command: softwareupdate --install-rosetta --agree-to-license  # needed for QtIFW\n      - run:\n          name: Installing Qt\n          command: |\n            wget \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-macOS-x64-4.8.1.dmg\"\n            hdiutil attach qt-online-installer-macOS-x64-4.8.1.dmg\n            /Volumes/qt-online-installer-macOS-x64-4.8.1/qt-online-installer-macOS-x64-4.8.1.app/Contents/MacOS/qt-online-installer-macOS-x64-4.8.1 \\\n              --no-force-installations --no-default-installations --no-size-checking --default-answer \\\n              --accept-licenses --confirm-command --accept-obligations --email \"$QT_EMAIL\" --password \"$QT_PASSWORD\" \\\n              install \\\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.clang_64 qt.qt6.682.addons.qt5compat \\\n              extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n            hdiutil detach /Volumes/qt-online-installer-macOS-x64-4.8.1\n      - run:\n          name: Setup Keychain\n          command: |\n            echo $MAC_SIGNING_CERT | base64 --decode > cert.p12\n            security create-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security default-keychain -s sign.keychain\n            security unlock-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security import cert.p12 -k sign.keychain -P \"$MAC_SIGNING_CERT_PWD\" -T /usr/bin/codesign\n            security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k \"$MAC_KEYCHAIN_KEY\" sign.keychain\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            export PATH=$PATH:$HOME/Qt/Tools/QtInstallerFramework/4.8/bin\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake \\\n              -S ../gpt4all-chat -B . -G Ninja \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_PREFIX_PATH:PATH=~/Qt/6.8.2/macos/lib/cmake \\\n              -DCMAKE_MAKE_PROGRAM:FILEPATH=~/Qt/Tools/Ninja/ninja \\\n              -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \\\n              -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \\\n              -DCMAKE_RANLIB=/usr/bin/ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DBUILD_UNIVERSAL=ON \\\n              -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \\\n              -DGGML_METAL_MACOSX_VERSION_MIN=12.6 \\\n              -DMACDEPLOYQT=~/Qt/6.8.2/macos/bin/macdeployqt \\\n              -DGPT4ALL_OFFLINE_INSTALLER=OFF \\\n              -DGPT4ALL_SIGN_INSTALL=ON \\\n              -DGPT4ALL_GEN_CPACK_CONFIG=ON\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target package\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake . -DGPT4ALL_GEN_CPACK_CONFIG=OFF\n            # See comment above related to the 'install' target.\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target install\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target package\n            ccache -s\n            mkdir upload\n            cp gpt4all-installer-* upload\n            tar -cvzf upload/repository.tar.gz -C _CPack_Packages/Darwin/IFW/gpt4all-installer-darwin repository\n      # persist the unsigned installer\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-macos-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-online-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      # attach to a workspace containing unsigned dmg\n      - attach_workspace:\n          at: build\n      - run:\n          name: \"Setup Keychain\"\n          command: |\n            echo $MAC_SIGNING_CERT | base64 --decode > cert.p12\n            security create-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security default-keychain -s sign.keychain\n            security unlock-keychain -p \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            security import cert.p12 -k sign.keychain -P \"$MAC_SIGNING_CERT_PWD\" -T /usr/bin/codesign\n            security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k \"$MAC_KEYCHAIN_KEY\" sign.keychain\n            rm cert.p12\n      - run:\n          name: \"Sign App Bundle\"\n          command: |\n            python3 -m pip install click\n            python3 gpt4all-chat/cmake/sign_dmg.py --input-dmg build/upload/gpt4all-installer-darwin.dmg --output-dmg build/upload/gpt4all-installer-darwin-signed.dmg --signing-identity \"$MAC_SIGNING_CERT_NAME\"\n      - run:\n          name: \"Sign DMG\"\n          command: |\n            codesign --options runtime --timestamp -s \"$MAC_SIGNING_CERT_NAME\" build/upload/gpt4all-installer-darwin-signed.dmg\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  notarize-online-chat-installer-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: \"Notarize\"\n          command: |\n            xcrun notarytool submit build/upload/gpt4all-installer-darwin-signed.dmg --apple-id \"$MAC_NOTARIZATION_ID\" --team-id \"$MAC_NOTARIZATION_TID\" --password \"$MAC_NOTARIZATION_KEY\" --wait  | tee notarize_log.txt\n      - run:\n          name: \"Report Notarization Failure\"\n          command: |\n            NID=`python3 .circleci/grab_notary_id.py notarize_log.txt` && export NID\n            xcrun notarytool log $NID --keychain-profile \"notary-profile\"\n            exit 1\n          when: on_fail\n      - run:\n          name: \"Staple\"\n          command: |\n            xcrun stapler staple build/upload/gpt4all-installer-darwin-signed.dmg\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Install Rosetta\n          command: softwareupdate --install-rosetta --agree-to-license  # needed for QtIFW\n      - run:\n          name: Test installation and verify that it is signed\n          command: |\n            set -e\n            hdiutil attach build/upload/gpt4all-installer-darwin-signed.dmg\n            codesign --verify --deep --verbose /Volumes/gpt4all-installer-darwin/gpt4all-installer-darwin.app\n            tar -xf build/upload/repository.tar.gz\n            /Volumes/gpt4all-installer-darwin/gpt4all-installer-darwin.app/Contents/MacOS/gpt4all-installer-darwin \\\n              --no-size-checking --default-answer --accept-licenses --confirm-command --set-temp-repository repository \\\n              install gpt4all\n            codesign --verify --deep --verbose /Applications/gpt4all/bin/gpt4all.app\n            codesign --verify --deep --verbose /Applications/gpt4all/maintenancetool.app\n            hdiutil detach /Volumes/gpt4all-installer-darwin\n\n  build-offline-chat-installer-linux:\n    machine:\n      image: ubuntu-2204:current\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-linux-amd64-\n      - run:\n          <<: *job-linux-install-chat-deps\n      - run:\n          name: Build linuxdeployqt\n          command: |\n            git clone https://github.com/nomic-ai/linuxdeployqt\n            cd linuxdeployqt && qmake && sudo make install\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            set -eo pipefail\n            export CMAKE_PREFIX_PATH=~/Qt/6.8.2/gcc_64/lib/cmake\n            export PATH=$PATH:$HOME/Qt/Tools/QtInstallerFramework/4.8/bin\n            export PATH=$PATH:/usr/local/cuda/bin\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            mkdir upload\n            ~/Qt/Tools/CMake/bin/cmake \\\n              -S ../gpt4all-chat -B . \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=clang-19 \\\n              -DCMAKE_CXX_COMPILER=clang++-19 \\\n              -DCMAKE_CXX_COMPILER_AR=ar \\\n              -DCMAKE_CXX_COMPILER_RANLIB=ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \\\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \\\n              -DGPT4ALL_OFFLINE_INSTALLER=ON\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target all\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target install\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target package\n            ccache -s\n            cp gpt4all-installer-* upload\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-linux-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - run:\n          name: Test installation\n          command: |\n            mkdir ~/Desktop\n            build/upload/gpt4all-installer-linux.run --no-size-checking --default-answer --accept-licenses \\\n              --confirm-command \\\n              install gpt4all\n\n  build-online-chat-installer-linux:\n    machine:\n      image: ubuntu-2204:current\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-linux-amd64-\n      - run:\n          <<: *job-linux-install-chat-deps\n      - run:\n          name: Build linuxdeployqt\n          command: |\n            git clone https://github.com/nomic-ai/linuxdeployqt\n            cd linuxdeployqt && qmake && sudo make install\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            set -eo pipefail\n            export CMAKE_PREFIX_PATH=~/Qt/6.8.2/gcc_64/lib/cmake\n            export PATH=$PATH:$HOME/Qt/Tools/QtInstallerFramework/4.8/bin\n            export PATH=$PATH:/usr/local/cuda/bin\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            mkdir upload\n            ~/Qt/Tools/CMake/bin/cmake \\\n              -S ../gpt4all-chat -B . \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=clang-19 \\\n              -DCMAKE_CXX_COMPILER=clang++-19 \\\n              -DCMAKE_CXX_COMPILER_AR=ar \\\n              -DCMAKE_CXX_COMPILER_RANLIB=ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \\\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \\\n              -DGPT4ALL_OFFLINE_INSTALLER=OFF\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target all\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target install\n            ~/Qt/Tools/CMake/bin/cmake --build . -j$(nproc) --target package\n            ccache -s\n            cp gpt4all-installer-* upload\n            tar -cvzf upload/repository.tar.gz -C _CPack_Packages/Linux/IFW/gpt4all-installer-linux repository\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-linux-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - run:\n          name: Test installation\n          command: |\n            mkdir ~/Desktop\n            build/upload/gpt4all-installer-linux.run --no-size-checking --default-answer --accept-licenses \\\n              --confirm-command \\\n              --set-temp-repository build/_CPack_Packages/Linux/IFW/gpt4all-installer-linux/repository \\\n              install gpt4all\n\n  build-offline-chat-installer-windows:\n    machine:\n      # we use 2024.04.01 because nvcc complains about the MSVC ver if we use anything newer\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-amd64-\n      - run:\n          name: Install dependencies\n          command: choco install -y ccache wget\n      - run:\n          name: Installing Qt\n          command: |\n            wget.exe \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-windows-x64-4.8.1.exe\"\n            & .\\qt-online-installer-windows-x64-4.8.1.exe --no-force-installations --no-default-installations `\n              --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations `\n              --email \"${Env:QT_EMAIL}\" --password \"${Env:QT_PASSWORD}\" install `\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.win64_msvc2022_64 qt.qt6.682.addons.qt5compat `\n              qt.qt6.682.debug_info extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n      - run:\n          name: Install VulkanSDK\n          command: |\n            wget.exe \"https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe\"\n            .\\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install\n      - run:\n          name: Install CUDA Toolkit\n          command: |\n            wget.exe \"https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe\"\n            .\\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'\n\n            $Env:PATH = \"${Env:PATH};C:\\VulkanSDK\\1.3.261.1\\bin\"\n            $Env:PATH = \"${Env:PATH};C:\\Qt\\Tools\\QtInstallerFramework\\4.8\\bin\"\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            & \"C:\\Qt\\Tools\\CMake_64\\bin\\cmake.exe\" `\n              -S ..\\gpt4all-chat -B . -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              \"-DCMAKE_PREFIX_PATH:PATH=C:\\Qt\\6.8.2\\msvc2022_64\" `\n              \"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\\Qt\\Tools\\Ninja\\ninja.exe\" `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON `\n              -DGPT4ALL_OFFLINE_INSTALLER=ON\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\"\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" install\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" package\n            ccache -s\n            mkdir upload\n            copy gpt4all-installer-win64.exe upload\n      - store_artifacts:\n          path: build/upload\n            # add workspace so signing jobs can connect & obtain dmg\n      - save_cache:\n          key: ccache-gpt4all-win-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-offline-chat-installer-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: Install dependencies\n          command: choco install -y wget\n      - run:\n          name: \"Install Dotnet 8 && Azure Sign Tool\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: \"Sign Windows Installer With AST\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            AzureSignTool.exe sign -du \"https://gpt4all.io/index.html\" -kvu https://gpt4all.vault.azure.net -kvi \"$Env:AZSignGUID\" -kvs \"$Env:AZSignPWD\" -kvc \"$Env:AZSignCertName\" -kvt \"$Env:AZSignTID\" -tr http://timestamp.digicert.com -v \"$($(Get-Location).Path)\\build\\upload\\gpt4all-installer-win64.exe\"\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Test installation\n          command: |\n            build\\upload\\gpt4all-installer-win64.exe --no-size-checking --default-answer --accept-licenses `\n              --confirm-command `\n              install gpt4all\n\n  build-online-chat-installer-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-amd64-\n      - run:\n          name: Install dependencies\n          command: choco install -y ccache wget\n      - run:\n          name: Installing Qt\n          command: |\n            wget.exe \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-windows-x64-4.8.1.exe\"\n            & .\\qt-online-installer-windows-x64-4.8.1.exe --no-force-installations --no-default-installations `\n              --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations `\n              --email \"${Env:QT_EMAIL}\" --password \"${Env:QT_PASSWORD}\" install `\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.win64_msvc2022_64 qt.qt6.682.addons.qt5compat `\n              qt.qt6.682.debug_info extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n      - run:\n          name: Install VulkanSDK\n          command: |\n            wget.exe \"https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe\"\n            .\\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install\n      - run:\n          name: Install CUDA Toolkit\n          command: |\n            wget.exe \"https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe\"\n            .\\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n      - run:\n          name: \"Setup Azure SignTool\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'\n\n            $Env:PATH = \"${Env:PATH};C:\\VulkanSDK\\1.3.261.1\\bin\"\n            $Env:PATH = \"${Env:PATH};C:\\Qt\\Tools\\QtInstallerFramework\\4.8\\bin\"\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            & \"C:\\Qt\\Tools\\CMake_64\\bin\\cmake.exe\" `\n              -S ..\\gpt4all-chat -B . -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              \"-DCMAKE_PREFIX_PATH:PATH=C:\\Qt\\6.8.2\\msvc2022_64\" `\n              \"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\\Qt\\Tools\\Ninja\\ninja.exe\" `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON `\n              -DGPT4ALL_OFFLINE_INSTALLER=OFF\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\"\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" install\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" package\n            ccache -s\n            mkdir upload\n            copy gpt4all-installer-win64.exe upload\n            Set-Location -Path \"_CPack_Packages/win64/IFW/gpt4all-installer-win64\"\n            Compress-Archive -Path 'repository' -DestinationPath '..\\..\\..\\..\\upload\\repository.zip'\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-win-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-online-chat-installer-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: Install dependencies\n          command: choco install -y wget\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n      - run:\n          name: \"Setup Azure SignTool\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: \"Sign Windows Installer With AST\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            AzureSignTool.exe sign -du \"https://gpt4all.io/index.html\" -kvu https://gpt4all.vault.azure.net -kvi \"$Env:AZSignGUID\" -kvs \"$Env:AZSignPWD\" -kvc \"$Env:AZSignCertName\" -kvt \"$Env:AZSignTID\" -tr http://timestamp.digicert.com -v \"$($(Get-Location).Path)/build/upload/gpt4all-installer-win64.exe\"\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Test installation\n          command: |\n            Expand-Archive -LiteralPath build\\upload\\repository.zip -DestinationPath .\n            build\\upload\\gpt4all-installer-win64.exe --no-size-checking --default-answer --accept-licenses `\n              --confirm-command --set-temp-repository repository `\n              install gpt4all\n\n  build-offline-chat-installer-windows-arm:\n    machine:\n      # we use 2024.04.01 because nvcc complains about the MSVC ver if we use anything newer\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-aarch64-\n      - run:\n          name: Install dependencies\n          command: choco install -y ccache wget\n      - run:\n          name: Installing Qt\n          command: |\n            wget.exe \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-windows-x64-4.8.1.exe\"\n            & .\\qt-online-installer-windows-x64-4.8.1.exe --no-force-installations --no-default-installations `\n              --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations `\n              --email \"${Env:QT_EMAIL}\" --password \"${Env:QT_PASSWORD}\" install `\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.win64_msvc2022_64 `\n              qt.qt6.682.win64_msvc2022_arm64_cross_compiled qt.qt6.682.addons.qt5compat qt.qt6.682.debug_info `\n              qt.qt6.682.addons.qthttpserver\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -Arch arm64 -HostArch amd64 -DevCmdArguments '-no_logo'\n\n            $Env:PATH = \"${Env:PATH};C:\\Qt\\Tools\\QtInstallerFramework\\4.8\\bin\"\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            & \"C:\\Qt\\Tools\\CMake_64\\bin\\cmake.exe\" `\n              -S ..\\gpt4all-chat -B . -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              \"-DCMAKE_PREFIX_PATH:PATH=C:\\Qt\\6.8.2\\msvc2022_arm64\" `\n              \"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\\Qt\\Tools\\Ninja\\ninja.exe\" `\n              \"-DCMAKE_TOOLCHAIN_FILE=C:\\Qt\\6.8.2\\msvc2022_arm64\\lib\\cmake\\Qt6\\qt.toolchain.cmake\" `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DLLMODEL_CUDA=OFF `\n              -DLLMODEL_KOMPUTE=OFF `\n              \"-DWINDEPLOYQT=C:\\Qt\\6.8.2\\msvc2022_64\\bin\\windeployqt.exe;--qtpaths;C:\\Qt\\6.8.2\\msvc2022_arm64\\bin\\qtpaths.bat\" `\n              -DGPT4ALL_TEST=OFF `\n              -DGPT4ALL_OFFLINE_INSTALLER=ON\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\"\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" install\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" package\n            ccache -s\n            mkdir upload\n            copy gpt4all-installer-win64-arm.exe upload\n      - store_artifacts:\n          path: build/upload\n            # add workspace so signing jobs can connect & obtain dmg\n      - save_cache:\n          key: ccache-gpt4all-win-aarch64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-offline-chat-installer-windows-arm:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: Install dependencies\n          command: choco install -y wget\n      - run:\n          name: \"Install Dotnet 8 && Azure Sign Tool\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: \"Sign Windows Installer With AST\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            AzureSignTool.exe sign -du \"https://gpt4all.io/index.html\" -kvu https://gpt4all.vault.azure.net -kvi \"$Env:AZSignGUID\" -kvs \"$Env:AZSignPWD\" -kvc \"$Env:AZSignCertName\" -kvt \"$Env:AZSignTID\" -tr http://timestamp.digicert.com -v \"$($(Get-Location).Path)\\build\\upload\\gpt4all-installer-win64-arm.exe\"\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Test installation\n          command: |\n            build\\upload\\gpt4all-installer-win64-arm.exe --no-size-checking --default-answer --accept-licenses `\n              --confirm-command `\n              install gpt4all\n\n  build-online-chat-installer-windows-arm:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-aarch64-\n      - run:\n          name: Install dependencies\n          command: choco install -y ccache wget\n      - run:\n          name: Installing Qt\n          command: |\n            wget.exe \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-windows-x64-4.8.1.exe\"\n            & .\\qt-online-installer-windows-x64-4.8.1.exe --no-force-installations --no-default-installations `\n              --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations `\n              --email \"${Env:QT_EMAIL}\" --password \"${Env:QT_PASSWORD}\" install `\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.win64_msvc2022_64 `\n              qt.qt6.682.win64_msvc2022_arm64_cross_compiled qt.qt6.682.addons.qt5compat qt.qt6.682.debug_info `\n              qt.qt6.682.addons.qthttpserver\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n      - run:\n          name: \"Setup Azure SignTool\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -Arch arm64 -HostArch amd64 -DevCmdArguments '-no_logo'\n\n            $Env:PATH = \"${Env:PATH};C:\\Qt\\Tools\\QtInstallerFramework\\4.8\\bin\"\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            mkdir build\n            cd build\n            & \"C:\\Qt\\Tools\\CMake_64\\bin\\cmake.exe\" `\n              -S ..\\gpt4all-chat -B . -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              \"-DCMAKE_PREFIX_PATH:PATH=C:\\Qt\\6.8.2\\msvc2022_arm64\" `\n              \"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\\Qt\\Tools\\Ninja\\ninja.exe\" `\n              \"-DCMAKE_TOOLCHAIN_FILE=C:\\Qt\\6.8.2\\msvc2022_arm64\\lib\\cmake\\Qt6\\qt.toolchain.cmake\" `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DLLMODEL_CUDA=OFF `\n              -DLLMODEL_KOMPUTE=OFF `\n              \"-DWINDEPLOYQT=C:\\Qt\\6.8.2\\msvc2022_64\\bin\\windeployqt.exe;--qtpaths;C:\\Qt\\6.8.2\\msvc2022_arm64\\bin\\qtpaths.bat\" `\n              -DGPT4ALL_TEST=OFF `\n              -DGPT4ALL_OFFLINE_INSTALLER=OFF\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\"\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" install\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" package\n            ccache -s\n            mkdir upload\n            copy gpt4all-installer-win64-arm.exe upload\n            Set-Location -Path \"_CPack_Packages/win64/IFW/gpt4all-installer-win64-arm\"\n            Compress-Archive -Path 'repository' -DestinationPath '..\\..\\..\\..\\upload\\repository.zip'\n      - store_artifacts:\n          path: build/upload\n      - save_cache:\n          key: ccache-gpt4all-win-aarch64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      # add workspace so signing jobs can connect & obtain dmg\n      - persist_to_workspace:\n          root: build\n          # specify path to only include components we want to persist\n          # accross builds\n          paths:\n            - upload\n\n  sign-online-chat-installer-windows-arm:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - attach_workspace:\n          at: build\n      - run:\n          name: Install dependencies\n          command: choco install -y wget\n      - run:\n          name: \"Install Dotnet 8\"\n          command: |\n            mkdir dotnet\n            cd dotnet\n            $dotnet_url=\"https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip\"\n            wget.exe \"$dotnet_url\"\n            Expand-Archive -LiteralPath .\\dotnet-sdk-8.0.302-win-x64.zip\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n      - run:\n          name: \"Setup Azure SignTool\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            $Env:DOTNET_SKIP_FIRST_TIME_EXPERIENCE=$true\n            dotnet tool install --global AzureSignTool\n      - run:\n          name: \"Sign Windows Installer With AST\"\n          command: |\n            $Env:DOTNET_ROOT=\"$($(Get-Location).Path)\\dotnet\\dotnet-sdk-8.0.302-win-x64\"\n            $Env:PATH=\"$Env:DOTNET_ROOT;$Env:PATH\"\n            AzureSignTool.exe sign -du \"https://gpt4all.io/index.html\" -kvu https://gpt4all.vault.azure.net -kvi \"$Env:AZSignGUID\" -kvs \"$Env:AZSignPWD\" -kvc \"$Env:AZSignCertName\" -kvt \"$Env:AZSignTID\" -tr http://timestamp.digicert.com -v \"$($(Get-Location).Path)/build/upload/gpt4all-installer-win64-arm.exe\"\n      - store_artifacts:\n          path: build/upload\n      - run:\n          name: Test installation\n          command: |\n            Expand-Archive -LiteralPath build\\upload\\repository.zip -DestinationPath .\n            build\\upload\\gpt4all-installer-win64-arm.exe --no-size-checking --default-answer --accept-licenses `\n              --confirm-command --set-temp-repository repository `\n              install gpt4all\n\n  build-gpt4all-chat-linux:\n    machine:\n      image: ubuntu-2204:current\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-linux-amd64-\n      - run:\n          <<: *job-linux-install-chat-deps\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            export CMAKE_PREFIX_PATH=~/Qt/6.8.2/gcc_64/lib/cmake\n            export PATH=$PATH:/usr/local/cuda/bin\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            ~/Qt/Tools/CMake/bin/cmake \\\n              -S gpt4all-chat -B build \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=clang-19 \\\n              -DCMAKE_CXX_COMPILER=clang++-19 \\\n              -DCMAKE_CXX_COMPILER_AR=ar \\\n              -DCMAKE_CXX_COMPILER_RANLIB=ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \\\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON\n            ~/Qt/Tools/CMake/bin/cmake --build build -j$(nproc) --target all\n            ccache -s\n      - save_cache:\n          key: ccache-gpt4all-linux-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n\n  build-gpt4all-chat-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-amd64-\n      - run:\n          name: Install dependencies\n          command: choco install -y ccache wget\n      - run:\n          name: Installing Qt\n          command: |\n            wget.exe \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-windows-x64-4.8.1.exe\"\n            & .\\qt-online-installer-windows-x64-4.8.1.exe --no-force-installations --no-default-installations `\n              --no-size-checking --default-answer --accept-licenses --confirm-command --accept-obligations `\n              --email \"${Env:QT_EMAIL}\" --password \"${Env:QT_PASSWORD}\" install `\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.win64_msvc2022_64 qt.qt6.682.addons.qt5compat `\n              qt.qt6.682.debug_info extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n      - run:\n          name: Install VulkanSDK\n          command: |\n            wget.exe \"https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe\"\n            .\\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install\n      - run:\n          name: Install CUDA Toolkit\n          command: |\n            wget.exe \"https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe\"\n            .\\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'\n\n            $Env:PATH = \"${Env:PATH};C:\\VulkanSDK\\1.3.261.1\\bin\"\n            $Env:VULKAN_SDK = \"C:\\VulkanSDK\\1.3.261.1\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            & \"C:\\Qt\\Tools\\CMake_64\\bin\\cmake.exe\" `\n              -S gpt4all-chat -B build -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              \"-DCMAKE_PREFIX_PATH:PATH=C:\\Qt\\6.8.2\\msvc2022_64\" `\n              \"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\\Qt\\Tools\\Ninja\\ninja.exe\" `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON\n            & \"C:\\Qt\\Tools\\Ninja\\ninja.exe\" -C build\n            ccache -s\n      - save_cache:\n          key: ccache-gpt4all-win-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n\n  build-gpt4all-chat-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-macos-\n      - run:\n          <<: *job-macos-install-deps\n      - run:\n          name: Install Rosetta\n          command: softwareupdate --install-rosetta --agree-to-license  # needed for QtIFW\n      - run:\n          name: Installing Qt\n          command: |\n            wget \"https://qt.mirror.constant.com/archive/online_installers/4.8/qt-online-installer-macOS-x64-4.8.1.dmg\"\n            hdiutil attach qt-online-installer-macOS-x64-4.8.1.dmg\n            /Volumes/qt-online-installer-macOS-x64-4.8.1/qt-online-installer-macOS-x64-4.8.1.app/Contents/MacOS/qt-online-installer-macOS-x64-4.8.1 \\\n              --no-force-installations --no-default-installations --no-size-checking --default-answer \\\n              --accept-licenses --confirm-command --accept-obligations --email \"$QT_EMAIL\" --password \"$QT_PASSWORD\" \\\n              install \\\n              qt.tools.cmake qt.tools.ifw.48 qt.tools.ninja qt.qt6.682.clang_64 qt.qt6.682.addons.qt5compat \\\n              extensions.qtpdf.682 qt.qt6.682.addons.qthttpserver\n            hdiutil detach /Volumes/qt-online-installer-macOS-x64-4.8.1\n      - run:\n          name: Build\n          no_output_timeout: 30m\n          command: |\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake \\\n              -S gpt4all-chat -B build -G Ninja \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_PREFIX_PATH:PATH=~/Qt/6.8.2/macos/lib/cmake \\\n              -DCMAKE_MAKE_PROGRAM:FILEPATH=~/Qt/Tools/Ninja/ninja \\\n              -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \\\n              -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \\\n              -DCMAKE_RANLIB=/usr/bin/ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DBUILD_UNIVERSAL=ON \\\n              -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \\\n              -DGGML_METAL_MACOSX_VERSION_MIN=12.6\n            ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build build --target all\n            ccache -s\n      - save_cache:\n          key: ccache-gpt4all-macos-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n\n  build-ts-docs:\n    docker:\n      - image: cimg/base:stable\n    steps:\n      - checkout\n      - node/install:\n          node-version: \"18.16\"\n      - run: node --version\n      - run: corepack enable\n      - node/install-packages:\n          pkg-manager: npm\n          app-dir: gpt4all-bindings/typescript\n          override-ci-command: npm install --ignore-scripts\n      - run:\n          name: build docs ts yo\n          command: |\n            cd gpt4all-bindings/typescript\n            npm run docs:build\n\n  deploy-docs:\n    docker:\n      - image: circleci/python:3.8\n    steps:\n      - checkout\n      - run:\n          name: Install dependencies\n          command: |\n            sudo apt-get update\n            sudo apt-get -y install python3 python3-pip\n            sudo pip3 install awscli --upgrade\n            sudo pip3 install mkdocs mkdocs-material mkautodoc 'mkdocstrings[python]' markdown-captions pillow cairosvg\n      - run:\n          name: Make Documentation\n          command: |\n            cd gpt4all-bindings/python\n            mkdocs build\n      - run:\n          name: Deploy Documentation\n          command: |\n            cd gpt4all-bindings/python\n            aws s3 sync --delete site/ s3://docs.gpt4all.io/\n      - run:\n          name: Invalidate docs.gpt4all.io cloudfront\n          command: aws cloudfront create-invalidation --distribution-id E1STQOW63QL2OH --paths \"/*\"\n\n  build-py-linux:\n    machine:\n      image: ubuntu-2204:current\n    steps:\n      - checkout\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-linux-amd64-\n      - run:\n          <<: *job-linux-install-backend-deps\n      - run:\n          name: Build C library\n          no_output_timeout: 30m\n          command: |\n            export PATH=$PATH:/usr/local/cuda/bin\n            git submodule update --init --recursive\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            cmake -B build -G Ninja \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=clang-19 \\\n              -DCMAKE_CXX_COMPILER=clang++-19 \\\n              -DCMAKE_CXX_COMPILER_AR=ar \\\n              -DCMAKE_CXX_COMPILER_RANLIB=ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \\\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \\\n              -DCMAKE_CUDA_ARCHITECTURES='50-virtual;52-virtual;61-virtual;70-virtual;75-virtual'\n            cmake --build build -j$(nproc)\n            ccache -s\n      - run:\n          name: Build wheel\n          command: |\n            cd gpt4all-bindings/python/\n            python setup.py bdist_wheel --plat-name=manylinux1_x86_64\n      - store_artifacts:\n          path: gpt4all-bindings/python/dist\n      - save_cache:\n          key: ccache-gpt4all-linux-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - persist_to_workspace:\n          root: gpt4all-bindings/python/dist\n          paths:\n            - \"*.whl\"\n\n  build-py-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-macos-\n      - run:\n          <<: *job-macos-install-deps\n      - run:\n          name: Install dependencies\n          command: |\n            pip install setuptools wheel cmake\n      - run:\n          name: Build C library\n          no_output_timeout: 30m\n          command: |\n            git submodule update --init  # don't use --recursive because macOS doesn't use Kompute\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            cmake -B build \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \\\n              -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \\\n              -DCMAKE_RANLIB=/usr/bin/ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DBUILD_UNIVERSAL=ON \\\n              -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \\\n              -DGGML_METAL_MACOSX_VERSION_MIN=12.6\n            cmake --build build --parallel\n            ccache -s\n      - run:\n          name: Build wheel\n          command: |\n            cd gpt4all-bindings/python\n            python setup.py bdist_wheel --plat-name=macosx_10_15_universal2\n      - store_artifacts:\n          path: gpt4all-bindings/python/dist\n      - save_cache:\n          key: ccache-gpt4all-macos-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - persist_to_workspace:\n          root: gpt4all-bindings/python/dist\n          paths:\n            - \"*.whl\"\n\n  build-py-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-amd64-\n      - run:\n          name: Install dependencies\n          command:\n            choco install -y ccache cmake ninja wget --installargs 'ADD_CMAKE_TO_PATH=System'\n      - run:\n          name: Install VulkanSDK\n          command: |\n            wget.exe \"https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe\"\n            .\\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install\n      - run:\n          name: Install CUDA Toolkit\n          command: |\n            wget.exe \"https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe\"\n            .\\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8\n      - run:\n          name: Install Python dependencies\n          command: pip install setuptools wheel cmake\n      - run:\n          name: Build C library\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'\n\n            $Env:PATH += \";C:\\VulkanSDK\\1.3.261.1\\bin\"\n            $Env:VULKAN_SDK = \"C:\\VulkanSDK\\1.3.261.1\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            cmake -B build -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON `\n              -DCMAKE_CUDA_ARCHITECTURES='50-virtual;52-virtual;61-virtual;70-virtual;75-virtual'\n            cmake --build build --parallel\n            ccache -s\n      - run:\n          name: Build wheel\n          command: |\n            cd gpt4all-bindings/python\n            python setup.py bdist_wheel --plat-name=win_amd64\n      - store_artifacts:\n          path: gpt4all-bindings/python/dist\n      - save_cache:\n          key: ccache-gpt4all-win-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      - persist_to_workspace:\n          root: gpt4all-bindings/python/dist\n          paths:\n            - \"*.whl\"\n\n  deploy-wheels:\n    docker:\n      - image: circleci/python:3.8\n    steps:\n      - setup_remote_docker\n      - attach_workspace:\n          at: /tmp/workspace\n      - run:\n          name: Install dependencies\n          command: |\n            sudo apt-get update\n            sudo apt-get install -y build-essential cmake\n            pip install setuptools wheel twine\n      - run:\n          name: Upload Python package\n          command: |\n            twine upload /tmp/workspace/*.whl --username __token__ --password $PYPI_CRED\n      - store_artifacts:\n          path: /tmp/workspace\n\n  build-bindings-backend-linux:\n    machine:\n      image: ubuntu-2204:current\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-linux-amd64-\n      - run:\n          <<: *job-linux-install-backend-deps\n      - run:\n          name: Build Libraries\n          no_output_timeout: 30m\n          command: |\n            export PATH=$PATH:/usr/local/cuda/bin\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            mkdir -p runtimes/build\n            cd runtimes/build\n            cmake ../.. -G Ninja \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=clang-19 \\\n              -DCMAKE_CXX_COMPILER=clang++-19 \\\n              -DCMAKE_CXX_COMPILER_AR=ar \\\n              -DCMAKE_CXX_COMPILER_RANLIB=ranlib \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \\\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON\n            cmake --build . -j$(nproc)\n            ccache -s\n            mkdir ../linux-x64\n            cp -L *.so ../linux-x64 # otherwise persist_to_workspace seems to mess symlinks\n      - save_cache:\n          key: ccache-gpt4all-linux-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - runtimes/linux-x64/*.so\n\n  build-bindings-backend-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-macos-\n      - run:\n          <<: *job-macos-install-deps\n      - run:\n          name: Build Libraries\n          no_output_timeout: 30m\n          command: |\n            ccache -o \"cache_dir=${PWD}/../.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            mkdir -p runtimes/build\n            cd runtimes/build\n            cmake ../.. \\\n              -DCMAKE_BUILD_TYPE=Release \\\n              -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \\\n              -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \\\n              -DCMAKE_RANLIB=/usr/bin/ranlib \\\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache \\\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \\\n              -DBUILD_UNIVERSAL=ON \\\n              -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \\\n              -DGGML_METAL_MACOSX_VERSION_MIN=12.6\n            cmake --build . --parallel\n            ccache -s\n            mkdir ../osx-x64\n            cp -L *.dylib ../osx-x64\n            cp ../../llama.cpp-mainline/*.metal ../osx-x64\n            ls ../osx-x64\n      - save_cache:\n          key: ccache-gpt4all-macos-{{ epoch }}\n          when: always\n          paths:\n            - ../.ccache\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - runtimes/osx-x64/*.dylib\n            - runtimes/osx-x64/*.metal\n\n  build-bindings-backend-windows:\n    machine:\n      image: windows-server-2022-gui:2024.04.1\n      resource_class: windows.large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - run:\n          name: Update Submodules\n          command: |\n            git submodule sync\n            git submodule update --init --recursive\n      - restore_cache:\n          keys:\n            - ccache-gpt4all-win-amd64-\n      - run:\n          name: Install dependencies\n          command: |\n            choco install -y ccache cmake ninja wget --installargs 'ADD_CMAKE_TO_PATH=System'\n      - run:\n          name: Install VulkanSDK\n          command: |\n            wget.exe \"https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe\"\n            .\\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install\n      - run:\n          name: Install CUDA Toolkit\n          command: |\n            wget.exe \"https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe\"\n            .\\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8\n      - run:\n          name: Build Libraries\n          no_output_timeout: 30m\n          command: |\n            $vsInstallPath = & \"C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe\" -property installationpath\n            Import-Module \"${vsInstallPath}\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll\"\n            Enter-VsDevShell -VsInstallPath \"$vsInstallPath\" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'\n\n            $Env:Path += \";C:\\VulkanSDK\\1.3.261.1\\bin\"\n            $Env:VULKAN_SDK = \"C:\\VulkanSDK\\1.3.261.1\"\n            ccache -o \"cache_dir=${pwd}\\..\\.ccache\" -o max_size=500M -p -z\n            cd gpt4all-backend\n            mkdir runtimes/win-x64_msvc\n            cd runtimes/win-x64_msvc\n            cmake -S ../.. -B . -G Ninja `\n              -DCMAKE_BUILD_TYPE=Release `\n              -DCMAKE_C_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `\n              -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `\n              -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON\n            cmake --build . --parallel\n            ccache -s\n            cp bin/Release/*.dll .\n      - save_cache:\n          key: ccache-gpt4all-win-amd64-{{ epoch }}\n          when: always\n          paths:\n            - ..\\.ccache\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - runtimes/win-x64_msvc/*.dll\n\n  build-nodejs-linux:\n    docker:\n      - image: cimg/base:stable\n    steps:\n      - checkout\n      - attach_workspace:\n          at: /tmp/gpt4all-backend\n      - node/install:\n          install-yarn: true\n          node-version: \"18.16\"\n      - run: node --version\n      - run: corepack enable\n      - node/install-packages:\n          app-dir: gpt4all-bindings/typescript\n          pkg-manager: yarn\n          override-ci-command: yarn install\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            yarn prebuildify -t 18.16.0 --napi\n      - run:\n          command: |\n            mkdir -p gpt4all-backend/prebuilds/linux-x64\n            mkdir -p gpt4all-backend/runtimes/linux-x64\n            cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so gpt4all-backend/runtimes/linux-x64\n            cp gpt4all-bindings/typescript/prebuilds/linux-x64/*.node gpt4all-backend/prebuilds/linux-x64\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - prebuilds/linux-x64/*.node\n            - runtimes/linux-x64/*-*.so\n\n  build-nodejs-macos:\n    <<: *job-macos-executor\n    steps:\n      - checkout\n      - attach_workspace:\n          at: /tmp/gpt4all-backend\n      - node/install:\n          install-yarn: true\n          node-version: \"18.16\"\n      - run: node --version\n      - run: corepack enable\n      - node/install-packages:\n          app-dir: gpt4all-bindings/typescript\n          pkg-manager: yarn\n          override-ci-command: yarn install\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            yarn prebuildify -t 18.16.0 --napi\n      - run:\n          name: \"Persisting all necessary things to workspace\"\n          command: |\n            mkdir -p gpt4all-backend/prebuilds/darwin-x64\n            mkdir -p gpt4all-backend/runtimes/darwin\n            cp /tmp/gpt4all-backend/runtimes/osx-x64/*-*.* gpt4all-backend/runtimes/darwin\n            cp gpt4all-bindings/typescript/prebuilds/darwin-x64/*.node gpt4all-backend/prebuilds/darwin-x64\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - prebuilds/darwin-x64/*.node\n            - runtimes/darwin/*-*.*\n\n  build-nodejs-windows:\n    executor:\n      name: win/default\n      size: large\n      shell: powershell.exe -ExecutionPolicy Bypass\n    steps:\n      - checkout\n      - attach_workspace:\n          at: /tmp/gpt4all-backend\n      - run: choco install wget -y\n      - run:\n          command: |\n            wget.exe \"https://nodejs.org/dist/v18.16.0/node-v18.16.0-x86.msi\" -P C:\\Users\\circleci\\Downloads\\\n            MsiExec.exe /i C:\\Users\\circleci\\Downloads\\node-v18.16.0-x86.msi /qn\n      - run:\n          command: |\n            Start-Process powershell -verb runAs -Args \"-start GeneralProfile\"\n            nvm install 18.16.0\n            nvm use 18.16.0\n      - run: node --version\n      - run: corepack enable\n      - run:\n          command: |\n            npm install -g yarn\n            cd gpt4all-bindings/typescript\n            yarn install\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            yarn prebuildify -t 18.16.0 --napi\n      - run:\n          command: |\n            mkdir -p gpt4all-backend/prebuilds/win32-x64\n            mkdir -p gpt4all-backend/runtimes/win32-x64\n            cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll gpt4all-backend/runtimes/win32-x64\n            cp gpt4all-bindings/typescript/prebuilds/win32-x64/*.node gpt4all-backend/prebuilds/win32-x64\n\n      - persist_to_workspace:\n          root: gpt4all-backend\n          paths:\n            - prebuilds/win32-x64/*.node\n            - runtimes/win32-x64/*-*.dll\n\n  deploy-npm-pkg:\n    docker:\n      - image: cimg/base:stable\n    steps:\n      - attach_workspace:\n          at: /tmp/gpt4all-backend\n      - checkout\n      - node/install:\n          install-yarn: true\n          node-version: \"18.16\"\n      - run: node --version\n      - run: corepack enable\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            # excluding llmodel. nodejs bindings dont need llmodel.dll\n            mkdir -p runtimes/win32-x64/native\n            mkdir -p prebuilds/win32-x64/\n            cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll runtimes/win32-x64/native/\n            cp /tmp/gpt4all-backend/prebuilds/win32-x64/*.node prebuilds/win32-x64/\n\n            mkdir -p runtimes/linux-x64/native\n            mkdir -p prebuilds/linux-x64/\n            cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so runtimes/linux-x64/native/\n            cp /tmp/gpt4all-backend/prebuilds/linux-x64/*.node prebuilds/linux-x64/\n\n            # darwin has univeral runtime libraries\n            mkdir -p runtimes/darwin/native\n            mkdir -p prebuilds/darwin-x64/\n\n            cp /tmp/gpt4all-backend/runtimes/darwin/*-*.* runtimes/darwin/native/\n\n            cp /tmp/gpt4all-backend/prebuilds/darwin-x64/*.node prebuilds/darwin-x64/\n\n            # Fallback build if user is not on above prebuilds\n            mv -f binding.ci.gyp binding.gyp\n\n            mkdir gpt4all-backend\n            cd ../../gpt4all-backend\n            mv llmodel.h llmodel.cpp llmodel_c.cpp llmodel_c.h sysinfo.h dlhandle.h ../gpt4all-bindings/typescript/gpt4all-backend/\n\n      # Test install\n      - node/install-packages:\n          app-dir: gpt4all-bindings/typescript\n          pkg-manager: yarn\n          override-ci-command: yarn install\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            yarn run test\n      - run:\n          command: |\n            cd gpt4all-bindings/typescript\n            npm set //registry.npmjs.org/:_authToken=$NPM_TOKEN\n            npm publish\n\n# only run a job on the main branch\njob_only_main: &job_only_main\n  filters:\n    branches:\n      only: main\n\n# allow a job to run on tags as well as commits\njob_allow_tags: &job_allow_tags\n  filters:\n    tags:\n      only:\n        - /.*/\n\n# standard chat workflow filter\nworkflow-when-chat-requested: &workflow-when-chat-requested\n  when:\n    and:\n      - or: [ << pipeline.parameters.run-all-workflows >>, << pipeline.parameters.run-chat-workflow >> ]\n      - not:\n          equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n\nworkflows:\n  version: 2\n  noop:\n    when:\n      not:\n        or:\n          - << pipeline.parameters.run-all-workflows >>\n          - << pipeline.parameters.run-python-workflow >>\n          - << pipeline.parameters.run-ts-workflow >>\n          - << pipeline.parameters.run-chat-workflow >>\n          - equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - noop\n  schedule:\n    # only run when scheduled by CircleCI\n    when:\n      equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - build-offline-chat-installer-macos:\n          context: gpt4all\n      - build-offline-chat-installer-windows:\n          context: gpt4all\n      - build-offline-chat-installer-windows-arm:\n          context: gpt4all\n      - build-offline-chat-installer-linux:\n          context: gpt4all\n      - sign-offline-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-macos\n      - notarize-offline-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - sign-offline-chat-installer-macos\n      - sign-offline-chat-installer-windows:\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-windows\n      - sign-offline-chat-installer-windows-arm:\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-windows-arm\n  build-chat-installers-release:\n    # only run on main branch tags that start with 'v' and a digit\n    when:\n      and:\n        - matches: { pattern: '^v\\d.*', value: << pipeline.git.tag >> }\n        - not:\n            equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - validate-commit-on-main:\n          <<: *job_allow_tags\n      - build-offline-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-offline-chat-installer-windows:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-offline-chat-installer-windows-arm:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-offline-chat-installer-linux:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - sign-offline-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-macos\n      - notarize-offline-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - sign-offline-chat-installer-macos\n      - sign-offline-chat-installer-windows:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-windows\n      - sign-offline-chat-installer-windows-arm:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-offline-chat-installer-windows-arm\n      - build-online-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-online-chat-installer-windows:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-online-chat-installer-windows-arm:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - build-online-chat-installer-linux:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - validate-commit-on-main\n      - sign-online-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-online-chat-installer-macos\n      - notarize-online-chat-installer-macos:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - sign-online-chat-installer-macos\n      - sign-online-chat-installer-windows:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-online-chat-installer-windows\n      - sign-online-chat-installer-windows-arm:\n          <<: *job_allow_tags\n          context: gpt4all\n          requires:\n            - build-online-chat-installer-windows-arm\n  build-chat-offline-installers:\n    <<: *workflow-when-chat-requested\n    jobs:\n      - build-hold:\n          type: approval\n      - sign-hold:\n          type: approval\n      - build-offline-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-offline-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-offline-chat-installer-macos\n      - notarize-offline-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - sign-offline-chat-installer-macos\n      - build-offline-chat-installer-windows:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-offline-chat-installer-windows:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-offline-chat-installer-windows\n      - build-offline-chat-installer-windows-arm:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-offline-chat-installer-windows-arm:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-offline-chat-installer-windows-arm\n      - build-offline-chat-installer-linux:\n          context: gpt4all\n          requires:\n            - build-hold\n  build-chat-online-installers:\n    <<: *workflow-when-chat-requested\n    jobs:\n      - build-hold:\n          type: approval\n      - sign-hold:\n          type: approval\n      - build-online-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-online-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-online-chat-installer-macos\n      - notarize-online-chat-installer-macos:\n          context: gpt4all\n          requires:\n            - sign-online-chat-installer-macos\n      - build-online-chat-installer-windows:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-online-chat-installer-windows:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-online-chat-installer-windows\n      - build-online-chat-installer-windows-arm:\n          context: gpt4all\n          requires:\n            - build-hold\n      - sign-online-chat-installer-windows-arm:\n          context: gpt4all\n          requires:\n            - sign-hold\n            - build-online-chat-installer-windows-arm\n      - build-online-chat-installer-linux:\n          context: gpt4all\n          requires:\n            - build-hold\n  build-and-test-gpt4all-chat:\n    <<: *workflow-when-chat-requested\n    jobs:\n      - hold:\n          type: approval\n      - build-gpt4all-chat-linux:\n          context: gpt4all\n          requires:\n            - hold\n      - build-gpt4all-chat-windows:\n          context: gpt4all\n          requires:\n            - hold\n      - build-gpt4all-chat-macos:\n          context: gpt4all\n          requires:\n            - hold\n  deploy-docs:\n    when:\n      and:\n        - equal: [ << pipeline.git.branch >>, main ]\n        - or:\n            - << pipeline.parameters.run-all-workflows >>\n            - << pipeline.parameters.run-python-workflow >>\n        - not:\n            equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - deploy-docs:\n          context: gpt4all\n  build-python:\n    when:\n      and:\n        - or: [ << pipeline.parameters.run-all-workflows >>, << pipeline.parameters.run-python-workflow >> ]\n        - not:\n            equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - pypi-hold:\n          <<: *job_only_main\n          type: approval\n      - hold:\n          type: approval\n      - build-py-linux:\n          requires:\n            - hold\n      - build-py-macos:\n          requires:\n            - hold\n      - build-py-windows:\n          requires:\n            - hold\n      - deploy-wheels:\n          <<: *job_only_main\n          context: gpt4all\n          requires:\n            - pypi-hold\n            - build-py-windows\n            - build-py-linux\n            - build-py-macos\n  build-bindings:\n    when:\n      and:\n        - or: [ << pipeline.parameters.run-all-workflows >>, << pipeline.parameters.run-ts-workflow >> ]\n        - not:\n            equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]\n    jobs:\n      - backend-hold:\n          type: approval\n      - nodejs-hold:\n          type: approval\n      - npm-hold:\n          <<: *job_only_main\n          type: approval\n      - docs-hold:\n          type: approval\n      - build-bindings-backend-linux:\n          requires:\n            - backend-hold\n      - build-bindings-backend-macos:\n          requires:\n            - backend-hold\n      - build-bindings-backend-windows:\n          requires:\n            - backend-hold\n      - build-nodejs-linux:\n          requires:\n            - nodejs-hold\n            - build-bindings-backend-linux\n      - build-nodejs-windows:\n          requires:\n            - nodejs-hold\n            - build-bindings-backend-windows\n      - build-nodejs-macos:\n          requires:\n            - nodejs-hold\n            - build-bindings-backend-macos\n      - build-ts-docs:\n          requires:\n            - docs-hold\n      - deploy-npm-pkg:\n          <<: *job_only_main\n          requires:\n            - npm-hold\n            - build-nodejs-linux\n            - build-nodejs-windows\n            - build-nodejs-macos\n"
  },
  {
    "path": ".circleci/grab_notary_id.py",
    "content": "import re\nimport sys\n\nID_REG = r\"id: (.*)\"\n\ndef main() -> None:\n    notary_log = sys.argv[1]\n    with open(notary_log, \"r\") as f:\n        notary_output = f.read()\n        id_m = re.search(ID_REG, notary_output)\n        if id_m:\n            print(id_m.group(1))\n        else:\n            raise RuntimeError(\"Unable to parse ID from notarization logs\")\n\nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": ".codespellrc",
    "content": "[codespell]\nignore-words-list = blong, afterall, assistent, crasher, requestor\nskip = ./.git,./gpt4all-chat/translations,*.pdf,*.svg,*.lock\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bindings-bug.md",
    "content": "---\nname: \"\\U0001F6E0 Bindings Bug Report\"\nabout: A bug report for the GPT4All Bindings\nlabels: [\"bindings\", \"bug-unconfirmed\"]\n---\n\n<!-- Before creating a new issue, please make sure to take a few moments to check the issue tracker for existing issues about the bug. -->\n\n### Bug Report\n\n<!-- A clear and concise description of what the bug is. -->\n\n### Example Code\n\n<!-- Please provide a minimal code example that can be used to experience this issue. Delete this section if it does not apply. -->\n\n### Steps to Reproduce\n\n<!-- List the steps that should be taken to experience this issue. -->\n\n1.\n2.\n3.\n\n### Expected Behavior\n\n<!-- In a few words, what did you expect to happen? -->\n\n### Your Environment\n\n- Bindings version (e.g. \"Version\" from `pip show gpt4all`):\n- Operating System:\n- Chat model used (if applicable):\n\n<!-- You can freely edit this text, please remove all the lines you believe are unnecessary. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/chat-bug.md",
    "content": "---\nname: \"\\U0001F4AC GPT4All Bug Report\"\nabout: A bug report for GPT4All Chat\nlabels: [\"chat\", \"bug-unconfirmed\"]\n---\n\n<!-- Before creating a new issue, please make sure to take a few moments to check the issue tracker for existing issues about the bug. -->\n\n### Bug Report\n\n<!-- A clear and concise description of what the bug is. -->\n\n### Steps to Reproduce\n\n<!-- List the steps that should be taken to experience this issue. Provide any relevant information about your configuration, and describe anything that was unexpected. -->\n\n1.\n2.\n3.\n\n### Expected Behavior\n\n<!-- In a few words, what did you expect to happen? -->\n\n### Your Environment\n\n- GPT4All version:\n- Operating System:\n- Chat model used (if applicable):\n\n<!-- You can freely edit this text, please remove all the lines you believe are unnecessary. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "version: 2.1\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/documentation.md",
    "content": "---\nname: \"\\U0001F4C4 Documentation\"\nabout: An issue related to the GPT4All documentation\nlabels: [\"documentation\"]\n---\n\n### Documentation\n\n<!-- Please describe the issue with the documentation as clearly as possible. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.md",
    "content": "---\nname: \"\\U0001F680 Feature Request\"\nabout: Submit a proposal/request for a new GPT4All feature\ntitle: \"[Feature] Feature request title...\"\nlabels: [\"enhancement\"]\n---\n\n### Feature Request\n\n<!-- A clear and concise description of the feature proposal. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/other-bug.md",
    "content": "---\nname: \"\\U0001F41B Other Bug Report\"\nabout: A bug in another component of GPT4All\nlabels: [\"bug-unconfirmed\"]\n---\n\n<!-- Before creating a new issue, please make sure to take a few moments to check the issue tracker for existing issues about the bug. -->\n\n### Bug Report\n\n<!-- A clear and concise description of what the bug is. -->\n\n### Steps to Reproduce\n\n<!-- List the steps that should be taken to experience this issue. Provide any relevant information about your configuration, and describe anything that was unexpected. If this bug involves original code, please provide a minimal version that can reproduce the issue. -->\n\n1.\n2.\n3.\n\n### Expected Behavior\n\n<!-- In a few words, what did you expect to happen? -->\n\n### Your Environment\n\n- GPT4All version (if applicable):\n- Operating System:\n- Chat model used (if applicable):\n\n<!-- You can freely edit this text, please remove all the lines you believe are unnecessary. -->\n\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "## Describe your changes\n\n## Issue ticket number and link\n\n## Checklist before requesting a review\n- [ ] I have performed a self-review of my code.\n- [ ] If it is a core feature, I have added thorough tests.\n- [ ] I have added thorough documentation for my code.\n- [ ] I have tagged PR with relevant project labels. I acknowledge that a PR without labels may be dismissed.\n- [ ] If this PR addresses a bug, I have provided both a screenshot/video of the original bug and the working solution.\n\n## Demo\n<!-- Screenshots or video of new or updated code changes !-->\n\n### Steps to Reproduce\n<!-- Steps to reproduce demo !-->\n\n## Notes\n<!-- Any other relevant information to include about PR !-->\n"
  },
  {
    "path": ".github/workflows/close_issues.yml",
    "content": "# This workflow will close issues that do not have labels or additional comments.\n# Trigger manually.\n\nname: \"Close Issues\"\non:\n  workflow_dispatch:\n\njobs:\n  close_issues:\n    runs-on: ubuntu-latest\n    steps:\n    - name: Close issues without label or comment\n      uses: actions/github-script@v3\n      with:\n        github-token: ${{secrets.GITHUB_TOKEN}}\n        script: |\n          const repo = context.repo;\n          let page = 1;\n          let issues = [];\n          while (true) {\n            const result = await github.issues.listForRepo({...repo, per_page: 100, page: page});\n            if (result.data.length === 0) break;\n            issues = issues.concat(result.data);\n            page += 1;\n          }\n          for (let { number } of issues) {\n            const issueData = await github.issues.get({...repo, issue_number: number});\n            const comments = await github.issues.listComments({...repo, issue_number: number});\n            if (issueData.data.labels.length === 0 && comments.data.length < 1) {\n              await github.issues.update({...repo, issue_number: number, state: 'closed'});\n              await github.issues.createComment({...repo, issue_number: number, body: 'Issue closed as it does not have any labels or comments.'});\n            }\n          }\n"
  },
  {
    "path": ".github/workflows/codespell.yml",
    "content": "---\nname: Codespell\n\non:\n  push:\n    branches: [main]\n  pull_request:\n    branches: [main]\n\njobs:\n  codespell:\n    name: Check for spelling errors\n    runs-on: ubuntu-latest\n\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n      - name: Codespell\n        uses: codespell-project/actions-codespell@v2\n"
  },
  {
    "path": ".gitignore",
    "content": "*.arrow\nsquad_*\n*sbert_embedded*\n*.pkl\nckpts*\n.deepspeed_env\n*.jsonl\n*tar.gz\nckpts**\nwandb\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n\n\n# vs code\n.vscode\n*.bin\n\n.DS_Store\n\n# gpt4all-chat\nCMakeLists.txt.user\ngpt4all-chat/models/*\nbuild_*\nbuild-*\ncmake-build-*\n/gpt4all-chat/tests/python/config.py\n\n# IntelliJ\n.idea/\n\n# LLM models\n*.gguf\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"llama.cpp-mainline\"]\n\tpath = gpt4all-backend/deps/llama.cpp-mainline\n\turl = https://github.com/nomic-ai/llama.cpp.git\n\tbranch = master\n[submodule \"gpt4all-chat/usearch\"]\n\tpath = gpt4all-chat/deps/usearch\n\turl = https://github.com/nomic-ai/usearch.git\n[submodule \"gpt4all-chat/deps/SingleApplication\"]\n\tpath = gpt4all-chat/deps/SingleApplication\n\turl = https://github.com/nomic-ai/SingleApplication.git\n[submodule \"gpt4all-chat/deps/fmt\"]\n\tpath = gpt4all-chat/deps/fmt\n\turl = https://github.com/fmtlib/fmt.git\n[submodule \"gpt4all-chat/deps/DuckX\"]\n\tpath = gpt4all-chat/deps/DuckX\n\turl = https://github.com/nomic-ai/DuckX.git\n[submodule \"gpt4all-chat/deps/QXlsx\"]\n\tpath = gpt4all-chat/deps/QXlsx\n\turl = https://github.com/nomic-ai/QXlsx.git\n[submodule \"gpt4all-chat/deps/minja\"]\n\tpath = gpt4all-chat/deps/minja\n\turl = https://github.com/nomic-ai/minja.git\n[submodule \"gpt4all-chat/deps/json\"]\n\tpath = gpt4all-chat/deps/json\n\turl = https://github.com/nlohmann/json.git\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nWhen contributing to this repository, please first discuss the change you wish to make via issue,\nemail, or any other method with the owners of this repository before making a change. \n\nPlease note we have a code of conduct, please follow it in all your interactions with the project.\n\n## Pull Request Process\n\n1. Ensure any install or build dependencies are removed before the end of the layer when doing a build.\n2. Make sure Pull Request is tagged with appropriate project identifiers and has a clear description of contribution.\n3. Any new or updated code must have documentation and preferably tests included with Pull Request.\n4. Significant feature or code changes should provide a short video or screenshot demo.\n4. Fill out relevant parts of Pull Request template. \n4. Pull requests must have sign-off from one other developer. Reach out to a repository owner once your\n   code is ready to be merged into `main`.\n\n## Code of Conduct\n\n### Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and maintainers pledge to making participation in our project and\nour community a harassment-free experience for everyone, regardless of age, body\nsize, disability, ethnicity, gender identity and expression, level of experience,\nnationality, personal appearance, race, religion, or sexual identity and\norientation.\n\n### Our Standards\n\nExamples of behavior that contributes to creating a positive environment\ninclude:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or\nadvances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic\n  address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n### Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable\nbehavior and are expected to take appropriate and fair corrective action in\nresponse to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or\nreject comments, commits, code, wiki edits, issues, and other contributions\nthat are not aligned to this Code of Conduct, or to ban temporarily or\npermanently any contributor for other behaviors that they deem inappropriate,\nthreatening, offensive, or harmful.\n\n### Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces\nwhen an individual is representing the project or its community. Examples of\nrepresenting a project or community include using an official project e-mail\naddress, posting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event. Representation of a project may be\nfurther defined and clarified by project maintainers.\n\n### Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported by contacting the project team at support@nomic.ai. All\ncomplaints will be reviewed and investigated and will result in a response that\nis deemed necessary and appropriate to the circumstances. The project team is\nobligated to maintain confidentiality with regard to the reporter of an incident.\nFurther details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good\nfaith may face temporary or permanent repercussions as determined by other\nmembers of the project's leadership.\n\n### Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,\navailable at [http://contributor-covenant.org/version/1/4][version]\n\n[homepage]: http://contributor-covenant.org\n[version]: http://contributor-covenant.org/version/1/4/"
  },
  {
    "path": "LICENSE.txt",
    "content": "Copyright (c) 2023 Nomic, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "MAINTAINERS.md",
    "content": "# MAINTAINERS\n\n## Rules\n\n* All content inside GPT4All shall have a documented maintainer\n* If a maintainer decides to retire or resign a call for volunteers will go\n  out\n* If no further maintainer can be found in a reasonable time frame, then the\n  content will be marked deprecated and removed in time\n\n## Job\n\nMaintainers will be...\n\n1. Responsible for overseeing content under their stewardship\n2. Responsible for triaging new issues, reviewing PRs, assigning priority\n   to tasks\n3. Responsible for keeping content in sufficient quality in a timely fashion\n\n## List\n\nAdam Treat ([@manyoso](https://github.com/manyoso))<br/>\nE-mail: adam@nomic.ai<br/>\nDiscord: `@gonzochess75`\n- Overall project maintainer\n- Chat UI\n\nJared Van Bortel ([@cebtenzzre](https://github.com/cebtenzzre))<br/>\nE-mail: jared@nomic.ai<br/>\nDiscord: `@cebtenzzre`\n- gpt4all-backend\n- Python binding\n- Python CLI app\n\nJacob Nguyen ([@jacoobes](https://github.com/jacoobes))<br/>\nDiscord: `@jacoobes`<br/>\nE-mail: `jacoobes@sern.dev`\n- TypeScript binding\n\nDominik ([@cosmic-snow](https://github.com/cosmic-snow))<br/>\nE-mail: cosmic-snow@mailfence.com<br/>\nDiscord: `@cosmic__snow`\n- Community documentation (GitHub Wiki)\n\nMax Cembalest ([@mcembalest](https://github.com/mcembalest))<br/>\nE-mail: max@nomic.ai<br/>\nDiscord: `@maxcembalest.`\n- Official documentation (gpt4all-bindings/python/docs -> https://docs.gpt4all.io/)\n\nThiago Ramos ([@thiagojramos](https://github.com/thiagojramos))<br/>\nE-mail: thiagojramos@outlook.com<br/>\n- pt\\_BR translation\n\n不知火 Shiranui ([@supersonictw](https://github.com/supersonictw))<br/>\nE-mail: supersonic@livemail.tw<br/>\nDiscord: `@supersonictw`\n- zh\\_TW translation\n\nJeremy Tayco ([@jstayco](https://github.com/jstayco))<br/>\nE-mail: jstayco@protonmail.ch<br/>\nDiscord: `@vertana`\n- es\\_MX translation\n\nRiccardo Giovanetti ([@Harvester62](https://github.com/Harvester62))<br/>\nE-mail: riccardo.giovanetti@gmail.com<br/>\nDiscord: `@harvester62`\n- it\\_IT translation\n\nTim ([@Tim453](https://github.com/Tim453))<br/>\nE-mail: tim453@mailbox.org<br/>\nDiscord: `@Tim453`\n- Flatpak\n\nJack ([@wuodoo](https://github.com/wuodoo))<br/>\nE-mail: 2296103047@qq.com<br/>\nDiscord: `@mikage`\n- zh\\_CN translation\n"
  },
  {
    "path": "README.md",
    "content": "<h1 align=\"center\">GPT4All</h1>\n\n<p align=\"center\">\n  Now with support for DeepSeek R1 Distillations\n</p>\n\n<p align=\"center\">\n  <a href=\"https://www.nomic.ai/gpt4all\">Website</a> &bull; <a href=\"https://docs.gpt4all.io\">Documentation</a> &bull; <a href=\"https://discord.gg/mGZE39AS3e\">Discord</a> &bull; <a href=\"https://www.youtube.com/watch?v=gQcZDXRVJok\">YouTube Tutorial</a>\n</p>\n\n<p align=\"center\">\n  GPT4All runs large language models (LLMs) privately on everyday desktops & laptops.\n</p>\n<p align=\"center\">\n  No API calls or GPUs required - you can just download the application and <a href=\"https://docs.gpt4all.io/gpt4all_desktop/quickstart.html#quickstart\">get started</a>.\n</p>\n\n<p align=\"center\">\n  Read about what's new in <a href=\"https://www.nomic.ai/blog/tag/gpt4all\">our blog</a>.\n</p>\n<p align=\"center\">\n  <a href=\"https://nomic.ai/gpt4all/#newsletter-form\">Subscribe to the newsletter</a>\n</p>\n\nhttps://github.com/nomic-ai/gpt4all/assets/70534565/513a0f15-4964-4109-89e4-4f9a9011f311\n\n<p align=\"center\">\nGPT4All is made possible by our compute partner <a href=\"https://www.paperspace.com/\">Paperspace</a>.\n</p>\n\n## Download Links\n\n<p>\n  &mdash; <a href=\"https://gpt4all.io/installers/gpt4all-installer-win64.exe\">\n    <img src=\"gpt4all-bindings/python/docs/assets/windows.png\" style=\"height: 1em; width: auto\" /> Windows Installer\n  </a> &mdash;\n</p>\n<p>\n  &mdash; <a href=\"https://gpt4all.io/installers/gpt4all-installer-win64-arm.exe\">\n    <img src=\"gpt4all-bindings/python/docs/assets/windows.png\" style=\"height: 1em; width: auto\" /> Windows ARM Installer\n  </a> &mdash;\n</p>\n<p>\n  &mdash; <a href=\"https://gpt4all.io/installers/gpt4all-installer-darwin.dmg\">\n    <img src=\"gpt4all-bindings/python/docs/assets/mac.png\" style=\"height: 1em; width: auto\" /> macOS Installer\n  </a> &mdash;\n</p>\n<p>\n  &mdash; <a href=\"https://gpt4all.io/installers/gpt4all-installer-linux.run\">\n    <img src=\"gpt4all-bindings/python/docs/assets/ubuntu.svg\" style=\"height: 1em; width: auto\" /> Ubuntu Installer\n  </a> &mdash;\n</p>\n<p>\n  The Windows and Linux builds require Intel Core i3 2nd Gen / AMD Bulldozer, or better.\n</p>\n<p>\n  The Windows ARM build supports Qualcomm Snapdragon and Microsoft SQ1/SQ2 processors.\n</p>\n<p>\n  The Linux build is x86-64 only (no ARM).\n</p>\n<p>\n  The macOS build requires Monterey 12.6 or newer. Best results with Apple Silicon M-series processors.\n</p>\n\nSee the full [System Requirements](gpt4all-chat/system_requirements.md) for more details.\n\n<br/>\n<br/>\n<p>\n  <a href='https://flathub.org/apps/io.gpt4all.gpt4all'>\n    <img style=\"height: 2em; width: auto\" alt='Get it on Flathub' src='https://flathub.org/api/badge'><br/>\n    Flathub (community maintained)\n  </a>\n</p>\n\n## Install GPT4All Python\n\n`gpt4all` gives you access to LLMs with our Python client around [`llama.cpp`](https://github.com/ggerganov/llama.cpp) implementations. \n\nNomic contributes to open source software like [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to make LLMs accessible and efficient **for all**.\n\n```bash\npip install gpt4all\n```\n\n```python\nfrom gpt4all import GPT4All\nmodel = GPT4All(\"Meta-Llama-3-8B-Instruct.Q4_0.gguf\") # downloads / loads a 4.66GB LLM\nwith model.chat_session():\n    print(model.generate(\"How can I run LLMs efficiently on my laptop?\", max_tokens=1024))\n```\n\n\n## Integrations\n\n:parrot::link: [Langchain](https://python.langchain.com/v0.2/docs/integrations/providers/gpt4all/)\n:card_file_box: [Weaviate Vector Database](https://github.com/weaviate/weaviate) - [module docs](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-gpt4all)\n:telescope: [OpenLIT (OTel-native Monitoring)](https://github.com/openlit/openlit) - [Docs](https://docs.openlit.io/latest/integrations/gpt4all)\n\n## Release History\n- **July 2nd, 2024**: V3.0.0 Release\n    - Fresh redesign of the chat application UI\n    - Improved user workflow for LocalDocs\n    - Expanded access to more model architectures\n- **October 19th, 2023**: GGUF Support Launches with Support for:\n    - Mistral 7b base model, an updated model gallery on our website, several new local code models including Rift Coder v1.5\n    - [Nomic Vulkan](https://blog.nomic.ai/posts/gpt4all-gpu-inference-with-vulkan) support for Q4\\_0 and Q4\\_1 quantizations in GGUF.\n    - Offline build support for running old versions of the GPT4All Local LLM Chat Client.\n- **September 18th, 2023**: [Nomic Vulkan](https://blog.nomic.ai/posts/gpt4all-gpu-inference-with-vulkan) launches supporting local LLM inference on NVIDIA and AMD GPUs.\n- **July 2023**: Stable support for LocalDocs, a feature that allows you to privately and locally chat with your data.\n- **June 28th, 2023**: [Docker-based API server] launches allowing inference of local LLMs from an OpenAI-compatible HTTP endpoint.\n\n[Docker-based API server]: https://github.com/nomic-ai/gpt4all/tree/cef74c2be20f5b697055d5b8b506861c7b997fab/gpt4all-api\n\n## Contributing\nGPT4All welcomes contributions, involvement, and discussion from the open source community!\nPlease see CONTRIBUTING.md and follow the issues, bug reports, and PR markdown templates.\n\nCheck project discord, with project owners, or through existing issues/PRs to avoid duplicate work.\nPlease make sure to tag all of the above with relevant project identifiers or your contribution could potentially get lost.\nExample tags: `backend`, `bindings`, `python-bindings`, `documentation`, etc.\n\n## Citation\n\nIf you utilize this repository, models or data in a downstream project, please consider citing it with:\n```\n@misc{gpt4all,\n  author = {Yuvanesh Anand and Zach Nussbaum and Brandon Duderstadt and Benjamin Schmidt and Andriy Mulyar},\n  title = {GPT4All: Training an Assistant-style Chatbot with Large Scale Data Distillation from GPT-3.5-Turbo},\n  year = {2023},\n  publisher = {GitHub},\n  journal = {GitHub repository},\n  howpublished = {\\url{https://github.com/nomic-ai/gpt4all}},\n}\n```\n"
  },
  {
    "path": "common/common.cmake",
    "content": "function(gpt4all_add_warning_options target)\n    if (MSVC)\n        return()\n    endif()\n    target_compile_options(\"${target}\" PRIVATE\n        # base options\n        -Wall\n        -Wextra\n        # extra options\n        -Wcast-align\n        -Wextra-semi\n        -Wformat=2\n        -Wmissing-include-dirs\n        -Wsuggest-override\n        -Wvla\n        # errors\n        -Werror=format-security\n        -Werror=init-self\n        -Werror=pointer-arith\n        -Werror=undef\n        # disabled warnings\n        -Wno-sign-compare\n        -Wno-unused-parameter\n    )\n    if (CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\")\n        target_compile_options(\"${target}\" PRIVATE\n            -Wduplicated-branches\n            -Wduplicated-cond\n            -Wlogical-op\n            -Wno-reorder\n            -Wno-null-dereference\n        )\n    elseif (CMAKE_CXX_COMPILER_ID MATCHES \"^(Apple)?Clang$\")\n        target_compile_options(\"${target}\" PRIVATE\n            -Wunreachable-code-break\n            -Wunreachable-code-return\n            -Werror=pointer-integer-compare\n            -Wno-reorder-ctor\n        )\n    endif()\nendfunction()\n"
  },
  {
    "path": "gpt4all-backend/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.23)  # for FILE_SET\n\ninclude(../common/common.cmake)\n\nset(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON)\n\nif (APPLE)\n    option(BUILD_UNIVERSAL \"Build a Universal binary on macOS\" ON)\nelse()\n    option(LLMODEL_KOMPUTE \"llmodel: use Kompute\"              ON)\n    option(LLMODEL_VULKAN  \"llmodel: use Vulkan\"               OFF)\n    option(LLMODEL_CUDA    \"llmodel: use CUDA\"                 ON)\n    option(LLMODEL_ROCM    \"llmodel: use ROCm\"                 OFF)\nendif()\n\nif (APPLE)\n  if (BUILD_UNIVERSAL)\n    # Build a Universal binary on macOS\n    # This requires that the found Qt library is compiled as Universal binaries.\n    set(CMAKE_OSX_ARCHITECTURES \"arm64;x86_64\" CACHE STRING \"\" FORCE)\n  else()\n    # Build for the host architecture on macOS\n    if (NOT CMAKE_OSX_ARCHITECTURES)\n      set(CMAKE_OSX_ARCHITECTURES \"${CMAKE_HOST_SYSTEM_PROCESSOR}\" CACHE STRING \"\" FORCE)\n    endif()\n  endif()\nendif()\n\n# Include the binary directory for the generated header file\ninclude_directories(\"${CMAKE_CURRENT_BINARY_DIR}\")\n\nset(LLMODEL_VERSION_MAJOR 0)\nset(LLMODEL_VERSION_MINOR 5)\nset(LLMODEL_VERSION_PATCH 0)\nset(LLMODEL_VERSION \"${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}\")\nproject(llmodel VERSION ${LLMODEL_VERSION} LANGUAGES CXX C)\n\nset(CMAKE_CXX_STANDARD 23)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\nset(BUILD_SHARED_LIBS ON)\n\n# Check for IPO support\ninclude(CheckIPOSupported)\ncheck_ipo_supported(RESULT IPO_SUPPORTED OUTPUT IPO_ERROR)\nif (NOT IPO_SUPPORTED)\n    message(WARNING \"Interprocedural optimization is not supported by your toolchain! This will lead to bigger file sizes and worse performance: ${IPO_ERROR}\")\nelse()\n    message(STATUS \"Interprocedural optimization support detected\")\nendif()\n\nset(DIRECTORY deps/llama.cpp-mainline)\ninclude(llama.cpp.cmake)\n\nset(BUILD_VARIANTS)\nif (APPLE)\n    list(APPEND BUILD_VARIANTS metal)\nendif()\nif (LLMODEL_KOMPUTE)\n    list(APPEND BUILD_VARIANTS kompute kompute-avxonly)\nelse()\n    list(PREPEND BUILD_VARIANTS cpu cpu-avxonly)\nendif()\nif (LLMODEL_VULKAN)\n    list(APPEND BUILD_VARIANTS vulkan vulkan-avxonly)\nendif()\nif (LLMODEL_CUDA)\n    cmake_minimum_required(VERSION 3.18)  # for CMAKE_CUDA_ARCHITECTURES\n\n    # Defaults must be set before enable_language(CUDA).\n    # Keep this in sync with the arch list in ggml/src/CMakeLists.txt (plus 5.0 for non-F16 branch).\n    if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)\n        # 52 == lowest CUDA 12 standard\n        # 60 == f16 CUDA intrinsics\n        # 61 == integer CUDA intrinsics\n        # 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster\n        if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)\n            set(CMAKE_CUDA_ARCHITECTURES \"60;61;70;75\") # needed for f16 CUDA intrinsics\n        else()\n            set(CMAKE_CUDA_ARCHITECTURES \"50;52;61;70;75\") # lowest CUDA 12 standard + lowest for integer intrinsics\n            #set(CMAKE_CUDA_ARCHITECTURES \"OFF\") # use this to compile much faster, but only F16 models work\n        endif()\n    endif()\n    message(STATUS \"Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}\")\n\n    include(CheckLanguage)\n    check_language(CUDA)\n    if (NOT CMAKE_CUDA_COMPILER)\n        message(WARNING \"CUDA Toolkit not found. To build without CUDA, use -DLLMODEL_CUDA=OFF.\")\n    endif()\n    enable_language(CUDA)\n    list(APPEND BUILD_VARIANTS cuda cuda-avxonly)\nendif()\nif (LLMODEL_ROCM)\n    enable_language(HIP)\n    list(APPEND BUILD_VARIANTS rocm rocm-avxonly)\nendif()\n\n# Go through each build variant\nforeach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)\n    # Determine flags\n    if (BUILD_VARIANT MATCHES avxonly)\n        set(GPT4ALL_ALLOW_NON_AVX OFF)\n    else()\n        set(GPT4ALL_ALLOW_NON_AVX ON)\n    endif()\n    set(GGML_AVX2 ${GPT4ALL_ALLOW_NON_AVX})\n    set(GGML_F16C ${GPT4ALL_ALLOW_NON_AVX})\n    set(GGML_FMA  ${GPT4ALL_ALLOW_NON_AVX})\n\n    set(GGML_METAL   OFF)\n    set(GGML_KOMPUTE OFF)\n    set(GGML_VULKAN  OFF)\n    set(GGML_CUDA    OFF)\n    set(GGML_ROCM    OFF)\n    if (BUILD_VARIANT MATCHES metal)\n        set(GGML_METAL   ON)\n    elseif (BUILD_VARIANT MATCHES kompute)\n        set(GGML_KOMPUTE ON)\n    elseif (BUILD_VARIANT MATCHES vulkan)\n        set(GGML_VULKAN  ON)\n    elseif (BUILD_VARIANT MATCHES cuda)\n        set(GGML_CUDA    ON)\n    elseif (BUILD_VARIANT MATCHES rocm)\n        set(GGML_HIPBLAS ON)\n    endif()\n\n    # Include GGML\n    include_ggml(-mainline-${BUILD_VARIANT})\n\n    if (BUILD_VARIANT MATCHES metal)\n        set(GGML_METALLIB \"${GGML_METALLIB}\" PARENT_SCOPE)\n    endif()\n\n    # Function for preparing individual implementations\n    function(prepare_target TARGET_NAME BASE_LIB)\n        set(TARGET_NAME ${TARGET_NAME}-${BUILD_VARIANT})\n        message(STATUS \"Configuring model implementation target ${TARGET_NAME}\")\n        # Link to ggml/llama\n        target_link_libraries(${TARGET_NAME}\n            PRIVATE ${BASE_LIB}-${BUILD_VARIANT})\n        # Let it know about its build variant\n        target_compile_definitions(${TARGET_NAME}\n            PRIVATE GGML_BUILD_VARIANT=\"${BUILD_VARIANT}\")\n        # Enable IPO if possible\n# FIXME: Doesn't work with msvc reliably. See https://github.com/nomic-ai/gpt4all/issues/841\n#        set_property(TARGET ${TARGET_NAME}\n#                     PROPERTY INTERPROCEDURAL_OPTIMIZATION ${IPO_SUPPORTED})\n    endfunction()\n\n    # Add each individual implementations\n    add_library(llamamodel-mainline-${BUILD_VARIANT} SHARED\n        src/llamamodel.cpp src/llmodel_shared.cpp)\n    gpt4all_add_warning_options(llamamodel-mainline-${BUILD_VARIANT})\n    target_compile_definitions(llamamodel-mainline-${BUILD_VARIANT} PRIVATE\n        LLAMA_VERSIONS=>=3 LLAMA_DATE=999999)\n    target_include_directories(llamamodel-mainline-${BUILD_VARIANT} PRIVATE\n        src include/gpt4all-backend\n    )\n    prepare_target(llamamodel-mainline llama-mainline)\n\n    if (NOT PROJECT_IS_TOP_LEVEL AND BUILD_VARIANT STREQUAL cuda)\n        set(CUDAToolkit_BIN_DIR ${CUDAToolkit_BIN_DIR} PARENT_SCOPE)\n    endif()\nendforeach()\n\nadd_library(llmodel\n    src/dlhandle.cpp\n    src/llmodel.cpp\n    src/llmodel_c.cpp\n    src/llmodel_shared.cpp\n)\ngpt4all_add_warning_options(llmodel)\ntarget_sources(llmodel PUBLIC\n    FILE_SET public_headers TYPE HEADERS BASE_DIRS include\n    FILES include/gpt4all-backend/llmodel.h\n          include/gpt4all-backend/llmodel_c.h\n          include/gpt4all-backend/sysinfo.h\n)\ntarget_compile_definitions(llmodel PRIVATE LIB_FILE_EXT=\"${CMAKE_SHARED_LIBRARY_SUFFIX}\")\ntarget_include_directories(llmodel PRIVATE src include/gpt4all-backend)\n\nset_target_properties(llmodel PROPERTIES\n                              VERSION ${PROJECT_VERSION}\n                              SOVERSION ${PROJECT_VERSION_MAJOR})\n\nset(COMPONENT_NAME_MAIN ${PROJECT_NAME})\nset(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install)\n"
  },
  {
    "path": "gpt4all-backend/README.md",
    "content": "# GPT4ALL Backend\nThis directory contains the C/C++ model backend used by GPT4All for inference on the CPU. This backend acts as a universal library/wrapper for all models that the GPT4All ecosystem supports. Language bindings are built on top of this universal library. The native GPT4all Chat application directly uses this library for all inference.\n\n# What models are supported by the GPT4All ecosystem?\n\nCurrently, there are three different model architectures that are supported:\n\n1. GPTJ - Based off of the GPT-J architecture with examples found [here](https://huggingface.co/EleutherAI/gpt-j-6b)\n2. LLAMA - Based off of the LLAMA architecture with examples found [here](https://huggingface.co/models?sort=downloads&search=llama)\n3. MPT - Based off of Mosaic ML's MPT architecture with examples found [here](https://huggingface.co/mosaicml/mpt-7b)\n\n# Why so many different architectures? What differentiates them?\n\nOne of the major differences is license. Currently, the LLAMA based models are subject to a non-commercial license, whereas the GPTJ and MPT base models allow commercial usage. In the early advent of the recent explosion of activity in open source local models, the llama models have generally been seen as performing better, but that is changing quickly. Every week - even every day! - new models are released with some of the GPTJ and MPT models competitive in performance/quality with LLAMA. What's more, there are some very nice architectural innovations with the MPT models that could lead to new performance/quality gains.\n\n# How does GPT4All make these models available for CPU inference?\n\nBy leveraging the ggml library written by Georgi Gerganov and a growing community of developers. There are currently multiple different versions of this library. The original github repo can be found [here](https://github.com/ggerganov/ggml), but the developer of the library has also created a LLAMA based version [here](https://github.com/ggerganov/llama.cpp). Currently, this backend is using the latter as a submodule.\n\n# Does that mean GPT4All is compatible with all llama.cpp models and vice versa?\n\nUnfortunately, no for three reasons:\n\n1. The upstream [llama.cpp](https://github.com/ggerganov/llama.cpp) project has introduced [a compatibility breaking](https://github.com/ggerganov/llama.cpp/commit/b9fd7eee57df101d4a3e3eabc9fd6c2cb13c9ca1) re-quantization method recently. This is a breaking change that renders all previous models (including the ones that GPT4All uses) inoperative with newer versions of llama.cpp since that change.\n2. The GPT4All backend has the llama.cpp submodule specifically pinned to a version prior to this breaking change.\n3. The GPT4All backend currently supports MPT based models as an added feature. Neither llama.cpp nor the original ggml repo support this architecture as of this writing, however efforts are underway to make MPT available in the ggml repo which you can follow [here.](https://github.com/ggerganov/ggml/pull/145)\n\n# What is being done to make them more compatible?\n\nA few things. Number one, we are maintaining compatibility with our current model zoo by way of the submodule pinning. However, we are also exploring how we can update to newer versions of llama.cpp without breaking our current models. This might involve an additional magic header check or it could possibly involve keeping the currently pinned submodule and also adding a new submodule with later changes and differentiating them with namespaces or some other manner. Investigations continue.\n\n# What about GPU inference?\n\nIn newer versions of llama.cpp, there has been some added support for NVIDIA GPU's for inference. We're investigating how to incorporate this into our downloadable installers.\n\n# Ok, so bottom line... how do I make my model on Hugging Face compatible with GPT4All ecosystem right now?\n\n1. Check to make sure the Hugging Face model is available in one of our three supported architectures\n2. If it is, then you can use the conversion script inside of our pinned llama.cpp submodule for GPTJ and LLAMA based models\n3. Or if your model is an MPT model you can use the conversion script located directly in this backend directory under the scripts subdirectory \n\n# Check back for updates as we'll try to keep this updated as things change!\n"
  },
  {
    "path": "gpt4all-backend/include/gpt4all-backend/llmodel.h",
    "content": "#ifndef LLMODEL_H\n#define LLMODEL_H\n\n#include <algorithm>\n#include <cassert>\n#include <cstddef>\n#include <cstdint>\n#include <expected>\n#include <functional>\n#include <optional>\n#include <span>\n#include <stdexcept>\n#include <string>\n#include <string_view>\n#include <unordered_map>\n#include <utility>\n#include <vector>\n\nclass Dlhandle;\n\nusing namespace std::string_literals;\n\n#define LLMODEL_MAX_PROMPT_BATCH 128\n\nclass LLModel {\npublic:\n    using Token = int32_t;\n    using PromptCallback      = std::function<bool(std::span<const Token> batch, bool cached)>;\n    using ResponseCallback    = std::function<bool(Token token, std::string_view piece)>;\n    using EmbedCancelCallback = bool(unsigned *batchSizes, unsigned nBatch, const char *backend);\n    using ProgressCallback    = std::function<bool(float progress)>;\n\n    class BadArchError: public std::runtime_error {\n    public:\n        BadArchError(std::string arch)\n            : runtime_error(\"Unsupported model architecture: \" + arch)\n            , m_arch(std::move(arch))\n            {}\n\n        const std::string &arch() const noexcept { return m_arch; }\n\n    private:\n        std::string m_arch;\n    };\n\n    class MissingImplementationError: public std::runtime_error {\n    public:\n        using std::runtime_error::runtime_error;\n    };\n\n    class UnsupportedModelError: public std::runtime_error {\n    public:\n        using std::runtime_error::runtime_error;\n    };\n\n    struct GPUDevice {\n        const char *backend;\n        int index;\n        int type;\n        size_t heapSize;\n        std::string name;\n        std::string vendor;\n\n        GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor):\n            backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)),\n            vendor(std::move(vendor)) {}\n\n        std::string selectionName() const\n        {\n            assert(backend == \"cuda\"s || backend == \"kompute\"s);\n            return backendName() + \": \" + name;\n        }\n\n        std::string backendName() const { return backendIdToName(backend); }\n\n        static std::string backendIdToName(const std::string &backend) { return s_backendNames.at(backend); }\n\n        static std::string updateSelectionName(const std::string &name) {\n            if (name == \"Auto\" || name == \"CPU\" || name == \"Metal\")\n                return name;\n            auto it = std::find_if(s_backendNames.begin(), s_backendNames.end(), [&name](const auto &entry) {\n                return name.starts_with(entry.second + \": \");\n            });\n            if (it != s_backendNames.end())\n                return name;\n            return \"Vulkan: \" + name; // previously, there were only Vulkan devices\n        }\n\n    private:\n        static inline const std::unordered_map<std::string, std::string> s_backendNames {\n            {\"cpu\", \"CPU\"}, {\"metal\", \"Metal\"}, {\"cuda\", \"CUDA\"}, {\"kompute\", \"Vulkan\"},\n        };\n    };\n\n    class Implementation {\n    public:\n        Implementation(const Implementation &) = delete;\n        Implementation(Implementation &&);\n        ~Implementation();\n\n        std::string_view modelType() const { return m_modelType; }\n        std::string_view buildVariant() const { return m_buildVariant; }\n\n        static LLModel *construct(const std::string &modelPath, const std::string &backend = \"auto\", int n_ctx = 2048);\n        static std::vector<GPUDevice> availableGPUDevices(size_t memoryRequired = 0);\n        static int32_t maxContextLength(const std::string &modelPath);\n        static int32_t layerCount(const std::string &modelPath);\n        static bool isEmbeddingModel(const std::string &modelPath);\n        static auto chatTemplate(const char *modelPath) -> std::expected<std::string, std::string>;\n        static void setImplementationsSearchPath(const std::string &path);\n        static const std::string &implementationsSearchPath();\n        static bool hasSupportedCPU();\n        // 0 for no, 1 for yes, -1 for non-x86_64\n        static int cpuSupportsAVX2();\n\n    private:\n        Implementation(Dlhandle &&);\n\n        static const std::vector<Implementation> &implementationList();\n        static const Implementation *implementation(const char *fname, const std::string &buildVariant);\n        static LLModel *constructGlobalLlama(const std::optional<std::string> &backend = std::nullopt);\n\n        char *(*m_getFileArch)(const char *fname);\n        bool (*m_isArchSupported)(const char *arch);\n        LLModel *(*m_construct)();\n\n        std::string_view m_modelType;\n        std::string_view m_buildVariant;\n        Dlhandle *m_dlhandle;\n    };\n\n    struct PromptContext {\n        int32_t n_predict = 200;\n        int32_t top_k = 40;\n        float   top_p = 0.9f;\n        float   min_p = 0.0f;\n        float   temp = 0.9f;\n        int32_t n_batch = 9;\n        float   repeat_penalty = 1.10f;\n        int32_t repeat_last_n = 64;     // last n tokens to penalize\n        float   contextErase = 0.5f;    // percent of context to erase if we exceed the context window\n    };\n\n    explicit LLModel() {}\n    virtual ~LLModel() {}\n\n    virtual bool supportsEmbedding() const = 0;\n    virtual bool supportsCompletion() const = 0;\n    virtual bool loadModel(const std::string &modelPath, int n_ctx, int ngl) = 0;\n    virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; }\n    virtual bool isEmbeddingModel(const std::string &modelPath) const { (void)modelPath; return false; }\n    virtual bool isModelLoaded() const = 0;\n    virtual size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) = 0;\n    virtual size_t stateSize() const = 0;\n    virtual size_t saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const = 0;\n    virtual size_t restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens) = 0;\n\n    // This method requires the model to return true from supportsCompletion otherwise it will throw\n    // an error\n    virtual void prompt(std::string_view        prompt,\n                        const PromptCallback   &promptCallback,\n                        const ResponseCallback &responseCallback,\n                        const PromptContext    &ctx);\n\n    virtual int32_t countPromptTokens(std::string_view prompt) const;\n\n    virtual size_t embeddingSize() const {\n        throw std::logic_error(std::string(implementation().modelType()) + \" does not support embeddings\");\n    }\n    // user-specified prefix\n    virtual void embed(const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix,\n                       int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false,\n                       EmbedCancelCallback *cancelCb = nullptr);\n    // automatic prefix\n    virtual void embed(const std::vector<std::string> &texts, float *embeddings, bool isRetrieval,\n                       int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false);\n\n    virtual void setThreadCount(int32_t n_threads) { (void)n_threads; }\n    virtual int32_t threadCount() const { return 1; }\n\n    const Implementation &implementation() const {\n        return *m_implementation;\n    }\n\n    virtual std::vector<GPUDevice> availableGPUDevices(size_t memoryRequired) const {\n        (void)memoryRequired;\n        return {};\n    }\n\n    virtual bool initializeGPUDevice(size_t memoryRequired, const std::string &name) const {\n        (void)memoryRequired;\n        (void)name;\n        return false;\n    }\n\n    virtual bool initializeGPUDevice(int device, std::string *unavail_reason = nullptr) const {\n        (void)device;\n        if (unavail_reason) {\n            *unavail_reason = \"model has no GPU support\";\n        }\n        return false;\n    }\n\n    virtual bool usingGPUDevice() const { return false; }\n    virtual const char *backendName() const { return \"cpu\"; }\n    virtual const char *gpuDeviceName() const { return nullptr; }\n\n    void setProgressCallback(ProgressCallback callback) { m_progressCallback = callback; }\n\n    virtual int32_t contextLength() const = 0;\n    virtual auto specialTokens() -> std::unordered_map<std::string, std::string> const = 0;\n\nprotected:\n    // These are pure virtual because subclasses need to implement as the default implementation of\n    // 'prompt' above calls these functions\n    virtual std::vector<Token> tokenize(std::string_view str) const = 0;\n    virtual bool isSpecialToken(Token id) const = 0;\n    virtual std::string tokenToString(Token id) const = 0;\n    virtual void initSampler(const PromptContext &ctx) = 0;\n    virtual Token sampleToken() const = 0;\n    virtual bool evalTokens(int32_t nPast, std::span<const Token> tokens) const = 0;\n    virtual void shiftContext(const PromptContext &promptCtx, int32_t *nPast) = 0;\n    virtual int32_t inputLength() const = 0;\n    virtual int32_t computeModelInputPosition(std::span<const Token> input) const = 0;\n    virtual void setModelInputPosition(int32_t pos) = 0;\n    virtual void appendInputToken(Token tok) = 0;\n    virtual std::span<const Token> inputTokens() const = 0;\n    virtual const std::vector<Token> &endTokens() const = 0;\n    virtual bool shouldAddBOS() const = 0;\n\n    virtual int32_t maxContextLength(std::string const &modelPath) const\n    {\n        (void)modelPath;\n        return -1;\n    }\n\n    virtual int32_t layerCount(std::string const &modelPath) const\n    {\n        (void)modelPath;\n        return -1;\n    }\n\n    virtual auto chatTemplate(const char *modelPath) const -> std::expected<std::string, std::string>\n    {\n        (void)modelPath;\n        return std::unexpected(\"not implemented\");\n    }\n\n    const Implementation *m_implementation = nullptr;\n\n    ProgressCallback m_progressCallback;\n    static bool staticProgressCallback(float progress, void* ctx)\n    {\n        LLModel* model = static_cast<LLModel*>(ctx);\n        if (model && model->m_progressCallback)\n            return model->m_progressCallback(progress);\n        return true;\n    }\n\n    // prefill context with prompt\n    auto decodePrompt(const PromptCallback &promptCallback,\n                      const PromptContext  &promptCtx,\n                      std::vector<Token>    embd_inp)\n        -> std::optional<int32_t>;\n    // generate a response\n    void generateResponse(const ResponseCallback &responseCallback,\n                          const PromptContext    &promptCtx,\n                          int32_t                 nPast);\n\n    friend class LLMImplementation;\n};\n\n#endif // LLMODEL_H\n"
  },
  {
    "path": "gpt4all-backend/include/gpt4all-backend/llmodel_c.h",
    "content": "#ifndef LLMODEL_C_H\n#define LLMODEL_C_H\n\n#include <stdbool.h>\n#include <stddef.h>\n#include <stdint.h>\n\n#ifdef __GNUC__\n#define DEPRECATED __attribute__ ((deprecated))\n#elif defined(_MSC_VER)\n#define DEPRECATED __declspec(deprecated)\n#else\n#pragma message(\"WARNING: You need to implement DEPRECATED for this compiler\")\n#define DEPRECATED\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/**\n * Opaque pointer to the underlying model.\n */\ntypedef void *llmodel_model;\n\n/**\n * A token.\n */\ntypedef int32_t token_t;\n\n/**\n * llmodel_prompt_context structure for holding the prompt context.\n * NOTE: The implementation takes care of all the memory handling of the raw logits pointer and the\n * raw tokens pointer. Attempting to resize them or modify them in any way can lead to undefined\n * behavior.\n */\nstruct llmodel_prompt_context {\n    int32_t n_predict;      // number of tokens to predict\n    int32_t top_k;          // top k logits to sample from\n    float   top_p;          // nucleus sampling probability threshold\n    float   min_p;          // Min P sampling\n    float   temp;           // temperature to adjust model's output distribution\n    int32_t n_batch;        // number of predictions to generate in parallel\n    float   repeat_penalty; // penalty factor for repeated tokens\n    int32_t repeat_last_n;  // last n tokens to penalize\n    float   context_erase;  // percent of context to erase if we exceed the context window\n};\n\nstruct llmodel_gpu_device {\n    const char * backend;\n    int index;\n    int type; // same as VkPhysicalDeviceType\n    size_t heapSize;\n    const char * name;\n    const char * vendor;\n};\n\n#ifndef __cplusplus\ntypedef struct llmodel_prompt_context llmodel_prompt_context;\ntypedef struct llmodel_gpu_device llmodel_gpu_device;\n#endif\n\n/**\n * Callback type for prompt processing.\n * @param token_ids An array of token ids of the prompt.\n * @param n_token_ids The number of tokens in the array.\n * @param cached Whether the tokens were already in cache.\n * @return a bool indicating whether the model should keep processing.\n */\ntypedef bool (*llmodel_prompt_callback)(const token_t *token_ids, size_t n_token_ids, bool cached);\n\n/**\n * Callback type for response.\n * @param token_id The token id of the response.\n * @param response The response string. NOTE: a token_id of -1 indicates the string is an error string.\n * @return a bool indicating whether the model should keep generating.\n */\ntypedef bool (*llmodel_response_callback)(token_t token_id, const char *response);\n\n/**\n * Embedding cancellation callback for use with llmodel_embed.\n * @param batch_sizes The number of tokens in each batch that will be embedded.\n * @param n_batch The number of batches that will be embedded.\n * @param backend The backend that will be used for embedding. One of \"cpu\", \"kompute\", \"cuda\", or \"metal\".\n * @return True to cancel llmodel_embed, false to continue.\n */\ntypedef bool (*llmodel_emb_cancel_callback)(unsigned *batch_sizes, unsigned n_batch, const char *backend);\n\ntypedef void (*llmodel_special_token_callback)(const char *name, const char *token);\n\n/**\n * Create a llmodel instance.\n * Recognises correct model type from file at model_path\n * @param model_path A string representing the path to the model file.\n * @return A pointer to the llmodel_model instance; NULL on error.\n */\nDEPRECATED llmodel_model llmodel_model_create(const char *model_path);\n\n/**\n * Create a llmodel instance.\n * Recognises correct model type from file at model_path\n * @param model_path A string representing the path to the model file; will only be used to detect model type.\n * @param backend A string representing the implementation to use. One of 'auto', 'cpu', 'metal', 'kompute', or 'cuda'.\n * @param error A pointer to a string; will only be set on error.\n * @return A pointer to the llmodel_model instance; NULL on error.\n */\nllmodel_model llmodel_model_create2(const char *model_path, const char *backend, const char **error);\n\n/**\n * Destroy a llmodel instance.\n * Recognises correct model type using type info\n * @param model a pointer to a llmodel_model instance.\n */\nvoid llmodel_model_destroy(llmodel_model model);\n\n/**\n * Estimate RAM requirement for a model file\n * @param model A pointer to the llmodel_model instance.\n * @param model_path A string representing the path to the model file.\n * @param n_ctx Maximum size of context window\n * @param ngl Number of GPU layers to use (Vulkan)\n * @return size greater than 0 if the model was parsed successfully, 0 if file could not be parsed.\n */\nsize_t llmodel_required_mem(llmodel_model model, const char *model_path, int n_ctx, int ngl);\n\n/**\n * Load a model from a file.\n * @param model A pointer to the llmodel_model instance.\n * @param model_path A string representing the path to the model file.\n * @param n_ctx Maximum size of context window\n * @param ngl Number of GPU layers to use (Vulkan)\n * @return true if the model was loaded successfully, false otherwise.\n */\nbool llmodel_loadModel(llmodel_model model, const char *model_path, int n_ctx, int ngl);\n\n/**\n * Check if a model is loaded.\n * @param model A pointer to the llmodel_model instance.\n * @return true if the model is loaded, false otherwise.\n */\nbool llmodel_isModelLoaded(llmodel_model model);\n\n/**\n * Get the size of the internal state of the model.\n * NOTE: This state data is specific to the type of model you have created.\n * @param model A pointer to the llmodel_model instance.\n * @return the size in bytes of the internal state of the model\n */\nuint64_t llmodel_state_get_size(llmodel_model model);\n\n/**\n * Saves the internal state of the model.\n * NOTE: This state data is specific to the type of model you have created.\n * @param model A pointer to the llmodel_model instance.\n * @param state Where to store the state. This must be a buffer of at least llmodel_state_get_size() bytes.\n * @param state_size The size of the destination for the state.\n * @param input_tokens_out Where to store the address of the token cache state. This is dynamically allocated and must\n * be freed with llmodel_state_free_input_tokens.\n * @param n_input_tokens Where to store the size of the token cache state.\n * @return The number of bytes copied. On error, zero is returned, the token cache is set to NULL, and the token cache\n * size is set to zero.\n */\nuint64_t llmodel_state_get_data(llmodel_model model, uint8_t *state_out, uint64_t state_size,\n                                token_t **input_tokens_out, uint64_t *n_input_tokens);\n\n/**\n * Frees the temporary token cache buffer created by a call to llmodel_state_get_data().\n * @param input_tokens The token cache buffer.\n */\nvoid llmodel_state_free_input_tokens(token_t *input_tokens);\n\n/**\n * Restores the internal state of the model using data from the specified address.\n * NOTE: This state data is specific to the type of model you have created.\n * @param model A pointer to the llmodel_model instance.\n * @param state A pointer to the state data.\n * @param state_size The size of the state data.\n * @param input_tokens The token cache associated with the saved state.\n * @param n_input_tokens The number of tokens in input_tokens.\n * @return The number of bytes read, or zero on error.\n */\nuint64_t llmodel_state_set_data(llmodel_model model, const uint8_t *state, uint64_t state_size,\n                                const token_t *input_tokens, uint64_t n_input_tokens);\n\n/**\n * Generate a response using the model.\n * @param model A pointer to the llmodel_model instance.\n * @param prompt A string representing the input prompt.\n * @param prompt_callback A callback function for handling the processing of prompt.\n * @param response_callback A callback function for handling the generated response.\n * @param ctx A pointer to the llmodel_prompt_context structure.\n * @param error A pointer to a string; will only be set on error.\n */\nbool llmodel_prompt(llmodel_model               model,\n                    const char                 *prompt,\n                    llmodel_prompt_callback     prompt_callback,\n                    llmodel_response_callback   response_callback,\n                    llmodel_prompt_context     *ctx,\n                    const char                **error);\n\n/**\n * Generate an embedding using the model.\n * NOTE: If given NULL pointers for the model or text, or an empty text, a NULL pointer will be\n * returned. Bindings should signal an error when NULL is the return value.\n * @param model A pointer to the llmodel_model instance.\n * @param texts A pointer to a NULL-terminated array of strings representing the texts to generate an\n * embedding for.\n * @param embedding_size A pointer to a size_t type that will be set by the call indicating the length\n * of the returned floating point array.\n * @param prefix The model-specific prefix representing the embedding task, without the trailing colon. NULL for no\n * prefix.\n * @param dimensionality The embedding dimension, for use with Matryoshka-capable models. Set to -1 to for full-size.\n * @param token_count Return location for the number of prompt tokens processed, or NULL.\n * @param do_mean True to average multiple embeddings if the text is longer than the model can accept, False to\n * truncate.\n * @param atlas Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens with\n * long_text_mode=\"mean\" will raise an error. Disabled by default.\n * @param cancel_cb Cancellation callback, or NULL. See the documentation of llmodel_emb_cancel_callback.\n * @param error Return location for a malloc()ed string that will be set on error, or NULL.\n * @return A pointer to an array of floating point values passed to the calling method which then will\n * be responsible for lifetime of this memory. NULL if an error occurred.\n */\nfloat *llmodel_embed(llmodel_model model, const char **texts, size_t *embedding_size, const char *prefix,\n                     int dimensionality, size_t *token_count, bool do_mean, bool atlas,\n                     llmodel_emb_cancel_callback cancel_cb, const char **error);\n\n/**\n * Frees the memory allocated by the llmodel_embedding function.\n * @param ptr A pointer to the embedding as returned from llmodel_embedding.\n */\nvoid llmodel_free_embedding(float *ptr);\n\n/**\n * Set the number of threads to be used by the model.\n * @param model A pointer to the llmodel_model instance.\n * @param n_threads The number of threads to be used.\n */\nvoid llmodel_setThreadCount(llmodel_model model, int32_t n_threads);\n\n/**\n * Get the number of threads currently being used by the model.\n * @param model A pointer to the llmodel_model instance.\n * @return The number of threads currently being used.\n */\nint32_t llmodel_threadCount(llmodel_model model);\n\n/**\n * Set llmodel implementation search path.\n * Default is \".\"\n * @param path The path to the llmodel implementation shared objects. This can be a single path or\n * a list of paths separated by ';' delimiter.\n */\nvoid llmodel_set_implementation_search_path(const char *path);\n\n/**\n * Get llmodel implementation search path.\n * @return The current search path; lifetime ends on next set llmodel_set_implementation_search_path() call.\n */\nconst char *llmodel_get_implementation_search_path();\n\n/**\n * Get a list of available GPU devices given the memory required.\n * @param memoryRequired The minimum amount of VRAM, in bytes\n * @return A pointer to an array of llmodel_gpu_device's whose number is given by num_devices.\n */\nstruct llmodel_gpu_device* llmodel_available_gpu_devices(size_t memoryRequired, int* num_devices);\n\n/**\n * Initializes a GPU device based on a specified string criterion.\n *\n * This function initializes a GPU device based on a string identifier provided. The function\n * allows initialization based on general device type (\"gpu\"), vendor name (\"amd\", \"nvidia\", \"intel\"),\n * or any specific device name.\n *\n * @param memoryRequired The amount of memory (in bytes) required by the application or task\n *                       that will utilize the GPU device.\n * @param device A string specifying the desired criterion for GPU device selection. It can be:\n *               - \"gpu\": To initialize the best available GPU.\n *               - \"amd\", \"nvidia\", or \"intel\": To initialize the best available GPU from that vendor.\n *               - A specific GPU device name: To initialize a GPU with that exact name.\n *\n * @return True if the GPU device is successfully initialized based on the provided string\n *         criterion. Returns false if the desired GPU device could not be initialized.\n */\nbool llmodel_gpu_init_gpu_device_by_string(llmodel_model model, size_t memoryRequired, const char *device);\n\n/**\n * Initializes a GPU device by specifying a valid gpu device pointer.\n * @param device A gpu device pointer.\n * @return True if the GPU device is successfully initialized, false otherwise.\n */\nbool llmodel_gpu_init_gpu_device_by_struct(llmodel_model model, const llmodel_gpu_device *device);\n\n/**\n * Initializes a GPU device by its index.\n * @param device An integer representing the index of the GPU device to be initialized.\n * @return True if the GPU device is successfully initialized, false otherwise.\n */\nbool llmodel_gpu_init_gpu_device_by_int(llmodel_model model, int device);\n\n/**\n * @return The name of the llama.cpp backend currently in use. One of \"cpu\", \"kompute\", or \"metal\".\n */\nconst char *llmodel_model_backend_name(llmodel_model model);\n\n/**\n * @return The name of the GPU device currently in use, or NULL for backends other than Kompute.\n */\nconst char *llmodel_model_gpu_device_name(llmodel_model model);\n\nint32_t llmodel_count_prompt_tokens(llmodel_model model, const char *prompt, const char **error);\n\nvoid llmodel_model_foreach_special_token(llmodel_model model, llmodel_special_token_callback callback);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // LLMODEL_C_H\n"
  },
  {
    "path": "gpt4all-backend/include/gpt4all-backend/sysinfo.h",
    "content": "#ifndef SYSINFO_H\n#define SYSINFO_H\n\n#include <fstream>\n#include <iomanip>\n#include <sstream>\n#include <string>\n\n#if defined(__linux__)\n#   include <unistd.h>\n#elif defined(__APPLE__)\n#   include <sys/types.h>\n#   include <sys/sysctl.h>\n#elif defined(_WIN32)\n#   define WIN32_LEAN_AND_MEAN\n#   ifndef NOMINMAX\n#       define NOMINMAX\n#   endif\n#   include <windows.h>\n#endif\n\nstatic long long getSystemTotalRAMInBytes()\n{\n    long long totalRAM = 0;\n\n#if defined(__linux__)\n    std::ifstream file(\"/proc/meminfo\");\n    std::string line;\n    while (std::getline(file, line)) {\n        if (line.find(\"MemTotal\") != std::string::npos) {\n            std::string memTotalStr = line.substr(line.find(\":\") + 1);\n            memTotalStr.erase(0, memTotalStr.find_first_not_of(\" \"));\n            memTotalStr = memTotalStr.substr(0, memTotalStr.find(\" \"));\n            totalRAM = std::stoll(memTotalStr) * 1024;  // Convert from KB to bytes\n            break;\n        }\n    }\n    file.close();\n#elif defined(__APPLE__)\n    int mib[2] = {CTL_HW, HW_MEMSIZE};\n    size_t length = sizeof(totalRAM);\n    sysctl(mib, 2, &totalRAM, &length, NULL, 0);\n#elif defined(_WIN32)\n    MEMORYSTATUSEX memoryStatus;\n    memoryStatus.dwLength = sizeof(memoryStatus);\n    GlobalMemoryStatusEx(&memoryStatus);\n    totalRAM = memoryStatus.ullTotalPhys;\n#endif\n\n    return totalRAM;\n}\n\nstatic double getSystemTotalRAMInGB()\n{\n    return static_cast<double>(getSystemTotalRAMInBytes()) / (1024 * 1024 * 1024);\n}\n\nstatic std::string getSystemTotalRAMInGBString()\n{\n    std::stringstream ss;\n    ss << std::fixed << std::setprecision(2) << getSystemTotalRAMInGB() << \" GB\";\n    return ss.str();\n}\n\n#endif // SYSINFO_H\n"
  },
  {
    "path": "gpt4all-backend/llama.cpp.cmake",
    "content": "cmake_minimum_required(VERSION 3.14)  # for add_link_options and implicit target directories.\n\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)\n\n#\n# Option list\n#\n# some of the options here are commented out so they can be set \"dynamically\" before calling include_ggml()\n\nset(GGML_LLAMAFILE_DEFAULT ON)\n\n# general\noption(LLAMA_STATIC                     \"llama: static link libraries\"                          OFF)\noption(LLAMA_NATIVE                     \"llama: enable -march=native flag\"                      OFF)\n\n# debug\noption(LLAMA_ALL_WARNINGS               \"llama: enable all compiler warnings\"                   ON)\noption(LLAMA_ALL_WARNINGS_3RD_PARTY     \"llama: enable all compiler warnings in 3rd party libs\" OFF)\noption(LLAMA_GPROF                      \"llama: enable gprof\"                                   OFF)\n\n# build\noption(LLAMA_FATAL_WARNINGS             \"llama: enable -Werror flag\"                            OFF)\n\n# instruction set specific\n#option(GGML_AVX                     \"ggml: enable AVX\"                                     ON)\n#option(GGML_AVX2                    \"ggml: enable AVX2\"                                    ON)\n#option(GGML_AVX512                  \"ggml: enable AVX512\"                                  OFF)\n#option(GGML_AVX512_VBMI             \"ggml: enable AVX512-VBMI\"                             OFF)\n#option(GGML_AVX512_VNNI             \"ggml: enable AVX512-VNNI\"                             OFF)\n#option(GGML_FMA                     \"ggml: enable FMA\"                                     ON)\n# in MSVC F16C is implied with AVX2/AVX512\n#if (NOT MSVC)\n#    option(GGML_F16C                \"ggml: enable F16C\"                                    ON)\n#endif()\n\nif (WIN32)\n    set(LLAMA_WIN_VER \"0x602\" CACHE STRING \"llama: Windows Version\")\nendif()\n\n# 3rd party libs\noption(GGML_ACCELERATE                      \"ggml: enable Accelerate framework\"               ON)\noption(GGML_BLAS                            \"ggml: use BLAS\"                                  OFF)\noption(GGML_LLAMAFILE                       \"ggml: use llamafile SGEMM\"                       ${GGML_LLAMAFILE_DEFAULT})\nset(GGML_BLAS_VENDOR \"Generic\" CACHE STRING \"ggml: BLAS library vendor\")\n\n#option(GGML_CUDA                            \"ggml: use CUDA\"                                  OFF)\noption(GGML_CUDA_FORCE_DMMV                 \"ggml: use dmmv instead of mmvq CUDA kernels\"     OFF)\noption(GGML_CUDA_FORCE_MMQ                  \"ggml: use mmq kernels instead of cuBLAS\"         OFF)\noption(GGML_CUDA_FORCE_CUBLAS               \"ggml: always use cuBLAS instead of mmq kernels\"  OFF)\nset   (GGML_CUDA_DMMV_X   \"32\" CACHE STRING \"ggml: x stride for dmmv CUDA kernels\")\nset   (GGML_CUDA_MMV_Y     \"1\" CACHE STRING \"ggml: y block size for mmv CUDA kernels\")\noption(GGML_CUDA_F16                        \"ggml: use 16 bit floats for some calculations\"   OFF)\nset   (GGML_CUDA_KQUANTS_ITER \"2\" CACHE STRING\n                                            \"ggml: iters./thread per block for Q2_K/Q6_K\")\nset   (GGML_CUDA_PEER_MAX_BATCH_SIZE \"128\" CACHE STRING\n                                            \"ggml: max. batch size for using peer access\")\noption(GGML_CUDA_NO_PEER_COPY               \"ggml: do not use peer to peer copies\"            OFF)\noption(GGML_CUDA_NO_VMM                     \"ggml: do not try to use CUDA VMM\"                OFF)\noption(GGML_CUDA_FA_ALL_QUANTS              \"ggml: compile all quants for FlashAttention\"     OFF)\noption(GGML_CUDA_USE_GRAPHS                 \"ggml: use CUDA graphs (llama.cpp only)\"          OFF)\n\n#option(GGML_HIPBLAS                         \"ggml: use hipBLAS\"                               OFF)\noption(GGML_HIP_UMA                         \"ggml: use HIP unified memory architecture\"       OFF)\n#option(GGML_VULKAN                          \"ggml: use Vulkan\"                                OFF)\noption(GGML_VULKAN_CHECK_RESULTS            \"ggml: run Vulkan op checks\"                      OFF)\noption(GGML_VULKAN_DEBUG                    \"ggml: enable Vulkan debug output\"                OFF)\noption(GGML_VULKAN_VALIDATE                 \"ggml: enable Vulkan validation\"                  OFF)\noption(GGML_VULKAN_RUN_TESTS                \"ggml: run Vulkan tests\"                          OFF)\n#option(GGML_METAL                           \"ggml: use Metal\"                                 ${GGML_METAL_DEFAULT})\noption(GGML_METAL_NDEBUG                    \"ggml: disable Metal debugging\"                   OFF)\noption(GGML_METAL_SHADER_DEBUG              \"ggml: compile Metal with -fno-fast-math\"         OFF)\nset(GGML_METAL_MACOSX_VERSION_MIN \"\" CACHE STRING\n                                            \"ggml: metal minimum macOS version\")\nset(GGML_METAL_STD \"\" CACHE STRING          \"ggml: metal standard version (-std flag)\")\n#option(GGML_KOMPUTE                        \"ggml: use Kompute\"                               OFF)\noption(GGML_QKK_64                          \"ggml: use super-block size of 64 for k-quants\"   OFF)\nset(GGML_SCHED_MAX_COPIES  \"4\" CACHE STRING \"ggml: max input copies for pipeline parallelism\")\n\n# add perf arguments\noption(LLAMA_PERF                           \"llama: enable perf\"                               OFF)\n\n#\n# Compile flags\n#\n\nset(THREADS_PREFER_PTHREAD_FLAG ON)\nfind_package(Threads REQUIRED)\n\nlist(APPEND GGML_COMPILE_DEFS GGML_SCHED_MAX_COPIES=${GGML_SCHED_MAX_COPIES})\n\n# enable libstdc++ assertions for debug builds\nif (CMAKE_SYSTEM_NAME MATCHES \"Linux\")\n    list(APPEND GGML_COMPILE_DEFS $<$<CONFIG:Debug>:_GLIBCXX_ASSERTIONS>)\nendif()\n\nif (APPLE AND GGML_ACCELERATE)\n    find_library(ACCELERATE_FRAMEWORK Accelerate)\n    if (ACCELERATE_FRAMEWORK)\n        message(STATUS \"Accelerate framework found\")\n\n        list(APPEND GGML_COMPILE_DEFS GGML_USE_ACCELERATE)\n        list(APPEND GGML_COMPILE_DEFS ACCELERATE_NEW_LAPACK)\n        list(APPEND GGML_COMPILE_DEFS ACCELERATE_LAPACK_ILP64)\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})\n    else()\n        message(WARNING \"Accelerate framework not found\")\n    endif()\nendif()\n\nif (GGML_BLAS)\n    if (LLAMA_STATIC)\n        set(BLA_STATIC ON)\n    endif()\n    if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)\n        set(BLA_SIZEOF_INTEGER 8)\n    endif()\n\n    set(BLA_VENDOR ${GGML_BLAS_VENDOR})\n    find_package(BLAS)\n\n    if (BLAS_FOUND)\n        message(STATUS \"BLAS found, Libraries: ${BLAS_LIBRARIES}\")\n\n        if (\"${BLAS_INCLUDE_DIRS}\" STREQUAL \"\")\n            # BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake.\n            # see https://gitlab.kitware.com/cmake/cmake/-/issues/20268\n            find_package(PkgConfig REQUIRED)\n            if (${GGML_BLAS_VENDOR} MATCHES \"Generic\")\n                pkg_check_modules(DepBLAS REQUIRED blas)\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"OpenBLAS\")\n                # As of openblas v0.3.22, the 64-bit is named openblas64.pc\n                pkg_check_modules(DepBLAS openblas64)\n                if (NOT DepBLAS_FOUND)\n                    pkg_check_modules(DepBLAS REQUIRED openblas)\n                endif()\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"FLAME\")\n                pkg_check_modules(DepBLAS REQUIRED blis)\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"ATLAS\")\n                pkg_check_modules(DepBLAS REQUIRED blas-atlas)\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"FlexiBLAS\")\n                pkg_check_modules(DepBLAS REQUIRED flexiblas_api)\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"Intel\")\n                # all Intel* libraries share the same include path\n                pkg_check_modules(DepBLAS REQUIRED mkl-sdl)\n            elseif (${GGML_BLAS_VENDOR} MATCHES \"NVHPC\")\n                # this doesn't provide pkg-config\n                # suggest to assign BLAS_INCLUDE_DIRS on your own\n                if (\"${NVHPC_VERSION}\" STREQUAL \"\")\n                    message(WARNING \"Better to set NVHPC_VERSION\")\n                else()\n                    set(DepBLAS_FOUND ON)\n                    set(DepBLAS_INCLUDE_DIRS \"/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include\")\n                endif()\n            endif()\n            if (DepBLAS_FOUND)\n                set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS})\n            else()\n                message(WARNING \"BLAS_INCLUDE_DIRS neither been provided nor been automatically\"\n                \" detected by pkgconfig, trying to find cblas.h from possible paths...\")\n                find_path(BLAS_INCLUDE_DIRS\n                    NAMES cblas.h\n                    HINTS\n                        /usr/include\n                        /usr/local/include\n                        /usr/include/openblas\n                        /opt/homebrew/opt/openblas/include\n                        /usr/local/opt/openblas/include\n                        /usr/include/x86_64-linux-gnu/openblas/include\n                )\n            endif()\n        endif()\n\n        message(STATUS \"BLAS found, Includes: ${BLAS_INCLUDE_DIRS}\")\n\n        list(APPEND GGML_COMPILE_OPTS ${BLAS_LINKER_FLAGS})\n\n        list(APPEND GGML_COMPILE_DEFS GGML_USE_OPENBLAS)\n\n        if (${BLAS_INCLUDE_DIRS} MATCHES \"mkl\" AND (${GGML_BLAS_VENDOR} MATCHES \"Generic\" OR ${GGML_BLAS_VENDOR} MATCHES \"Intel\"))\n            list(APPEND GGML_COMPILE_DEFS GGML_BLAS_USE_MKL)\n        endif()\n\n        set(LLAMA_EXTRA_LIBS     ${LLAMA_EXTRA_LIBS}     ${BLAS_LIBRARIES})\n        set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS})\n    else()\n        message(WARNING \"BLAS not found, please refer to \"\n        \"https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors\"\n        \" to set correct GGML_BLAS_VENDOR\")\n    endif()\nendif()\n\nif (GGML_LLAMAFILE)\n    list(APPEND GGML_COMPILE_DEFS GGML_USE_LLAMAFILE)\n\n    set(GGML_HEADERS_LLAMAFILE ${DIRECTORY}/ggml/src/llamafile/sgemm.h)\n    set(GGML_SOURCES_LLAMAFILE ${DIRECTORY}/ggml/src/llamafile/sgemm.cpp)\nendif()\n\nif (GGML_QKK_64)\n    list(APPEND GGML_COMPILE_DEFS GGML_QKK_64)\nendif()\n\nif (LLAMA_PERF)\n    list(APPEND GGML_COMPILE_DEFS GGML_PERF)\nendif()\n\nfunction(get_flags CCID CCVER)\n    set(C_FLAGS \"\")\n    set(CXX_FLAGS \"\")\n\n    if (CCID MATCHES \"Clang\")\n        set(C_FLAGS   -Wunreachable-code-break -Wunreachable-code-return)\n        set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi)\n\n        if (\n            (CCID STREQUAL \"Clang\"      AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR\n            (CCID STREQUAL \"AppleClang\" AND CCVER VERSION_GREATER_EQUAL 7.3.0)\n        )\n            list(APPEND C_FLAGS -Wdouble-promotion)\n        endif()\n    elseif (CCID STREQUAL \"GNU\")\n        set(C_FLAGS   -Wdouble-promotion)\n        set(CXX_FLAGS -Wno-array-bounds)\n\n        if (CCVER VERSION_GREATER_EQUAL 7.1.0)\n            list(APPEND CXX_FLAGS -Wno-format-truncation)\n        endif()\n        if (CCVER VERSION_GREATER_EQUAL 8.1.0)\n            list(APPEND CXX_FLAGS -Wextra-semi)\n        endif()\n    endif()\n\n    set(GF_C_FLAGS   ${C_FLAGS}   PARENT_SCOPE)\n    set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE)\nendfunction()\n\nif (LLAMA_FATAL_WARNINGS)\n    if (CMAKE_CXX_COMPILER_ID MATCHES \"GNU\" OR CMAKE_CXX_COMPILER_ID MATCHES \"Clang\")\n        list(APPEND C_FLAGS   -Werror)\n        list(APPEND CXX_FLAGS -Werror)\n    elseif (CMAKE_CXX_COMPILER_ID STREQUAL \"MSVC\")\n        list(APPEND GGML_COMPILE_OPTS /WX)\n    endif()\nendif()\n\nif (LLAMA_ALL_WARNINGS)\n    if (NOT MSVC)\n        list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function)\n        list(APPEND C_FLAGS       -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes\n                                  -Werror=implicit-int -Werror=implicit-function-declaration)\n        list(APPEND CXX_FLAGS     -Wmissing-declarations -Wmissing-noreturn)\n\n        list(APPEND C_FLAGS   ${WARNING_FLAGS})\n        list(APPEND CXX_FLAGS ${WARNING_FLAGS})\n\n        get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION})\n\n        list(APPEND GGML_COMPILE_OPTS \"$<$<COMPILE_LANGUAGE:C>:${C_FLAGS};${GF_C_FLAGS}>\"\n                                      \"$<$<COMPILE_LANGUAGE:CXX>:${CXX_FLAGS};${GF_CXX_FLAGS}>\")\n    else()\n        # todo : msvc\n        set(C_FLAGS   \"\")\n        set(CXX_FLAGS \"\")\n    endif()\nendif()\n\nif (WIN32)\n    list(APPEND GGML_COMPILE_DEFS _CRT_SECURE_NO_WARNINGS)\n\n    if (BUILD_SHARED_LIBS)\n        set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)\n    endif()\nendif()\n\n# this version of Apple ld64 is buggy\nexecute_process(\n    COMMAND ${CMAKE_C_COMPILER} ${CMAKE_EXE_LINKER_FLAGS} -Wl,-v\n    ERROR_VARIABLE output\n    OUTPUT_QUIET\n)\n\nif (output MATCHES \"dyld-1015\\.7\")\n    list(APPEND GGML_COMPILE_DEFS HAVE_BUGGY_APPLE_LINKER)\nendif()\n\n# Architecture specific\n# TODO: probably these flags need to be tweaked on some architectures\n#       feel free to update the Makefile for your architecture and send a pull request or issue\nmessage(STATUS \"CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}\")\nif (MSVC)\n    string(TOLOWER \"${CMAKE_GENERATOR_PLATFORM}\" CMAKE_GENERATOR_PLATFORM_LWR)\n    message(STATUS \"CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}\")\nelse ()\n    set(CMAKE_GENERATOR_PLATFORM_LWR \"\")\nendif ()\n\nif (NOT MSVC)\n    if (LLAMA_STATIC)\n        list(APPEND GGML_LINK_OPTS -static)\n        if (MINGW)\n            list(APPEND GGML_LINK_OPTS -static-libgcc -static-libstdc++)\n        endif()\n    endif()\n    if (LLAMA_GPROF)\n        list(APPEND GGML_COMPILE_OPTS -pg)\n    endif()\nendif()\n\nif (MINGW)\n    # Target Windows 8 for PrefetchVirtualMemory\n    list(APPEND GGML_COMPILE_DEFS _WIN32_WINNT=${LLAMA_WIN_VER})\nendif()\n\n#\n# POSIX conformance\n#\n\n# clock_gettime came in POSIX.1b (1993)\n# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional\n# posix_memalign came in POSIX.1-2001 / SUSv3\n# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)\nlist(APPEND GGML_COMPILE_DEFS _XOPEN_SOURCE=600)\n\n# Somehow in OpenBSD whenever POSIX conformance is specified\n# some string functions rely on locale_t availability,\n# which was introduced in POSIX.1-2008, forcing us to go higher\nif (CMAKE_SYSTEM_NAME MATCHES \"OpenBSD\")\n    list(REMOVE_ITEM GGML_COMPILE_DEFS _XOPEN_SOURCE=600)\n    list(APPEND GGML_COMPILE_DEFS _XOPEN_SOURCE=700)\nendif()\n\n# Data types, macros and functions related to controlling CPU affinity and\n# some memory allocation are available on Linux through GNU extensions in libc\nif (CMAKE_SYSTEM_NAME MATCHES \"Linux\")\n    list(APPEND GGML_COMPILE_DEFS _GNU_SOURCE)\nendif()\n\n# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,\n# and on macOS its availability depends on enabling Darwin extensions\n# similarly on DragonFly, enabling BSD extensions is necessary\nif (\n    CMAKE_SYSTEM_NAME MATCHES \"Darwin\" OR\n    CMAKE_SYSTEM_NAME MATCHES \"iOS\" OR\n    CMAKE_SYSTEM_NAME MATCHES \"tvOS\" OR\n    CMAKE_SYSTEM_NAME MATCHES \"DragonFly\"\n)\n    list(APPEND GGML_COMPILE_DEFS _DARWIN_C_SOURCE)\nendif()\n\n# alloca is a non-standard interface that is not visible on BSDs when\n# POSIX conformance is specified, but not all of them provide a clean way\n# to enable it in such cases\nif (CMAKE_SYSTEM_NAME MATCHES \"FreeBSD\")\n    list(APPEND GGML_COMPILE_DEFS __BSD_VISIBLE)\nendif()\nif (CMAKE_SYSTEM_NAME MATCHES \"NetBSD\")\n    list(APPEND GGML_COMPILE_DEFS _NETBSD_SOURCE)\nendif()\nif (CMAKE_SYSTEM_NAME MATCHES \"OpenBSD\")\n    list(APPEND GGML_COMPILE_DEFS _BSD_SOURCE)\nendif()\n\nfunction(include_ggml SUFFIX)\n    message(STATUS \"Configuring ggml implementation target llama${SUFFIX} in ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}\")\n\n    #\n    # libraries\n    #\n\n    if (GGML_CUDA)\n        cmake_minimum_required(VERSION 3.18)  # for CMAKE_CUDA_ARCHITECTURES\n\n        get_property(LANGS GLOBAL PROPERTY ENABLED_LANGUAGES)\n        if (NOT CUDA IN_LIST LANGS)\n            message(FATAL_ERROR \"The CUDA language must be enabled.\")\n        endif()\n\n        find_package(CUDAToolkit REQUIRED)\n        set(CUDAToolkit_BIN_DIR ${CUDAToolkit_BIN_DIR} PARENT_SCOPE)\n\n        # architectures are set in gpt4all-backend/CMakeLists.txt\n\n        set(GGML_HEADERS_CUDA ${DIRECTORY}/ggml/include/ggml-cuda.h)\n        file(GLOB   GGML_HEADERS_CUDA \"${DIRECTORY}/ggml/src/ggml-cuda/*.cuh\")\n        list(APPEND GGML_HEADERS_CUDA \"${DIRECTORY}/ggml/include/ggml-cuda.h\")\n\n        file(GLOB   GGML_SOURCES_CUDA \"${DIRECTORY}/ggml/src/ggml-cuda/*.cu\")\n        list(APPEND GGML_SOURCES_CUDA \"${DIRECTORY}/ggml/src/ggml-cuda.cu\")\n        file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/fattn-wmma*.cu\")\n        list(APPEND GGML_SOURCES_CUDA ${SRCS})\n        file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/mmq*.cu\")\n        list(APPEND GGML_SOURCES_CUDA ${SRCS})\n\n        if (GGML_CUDA_FA_ALL_QUANTS)\n            file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/fattn-vec*.cu\")\n            list(APPEND GGML_SOURCES_CUDA ${SRCS})\n            add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS)\n        else()\n            file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu\")\n            list(APPEND GGML_SOURCES_CUDA ${SRCS})\n            file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu\")\n            list(APPEND GGML_SOURCES_CUDA ${SRCS})\n            file(GLOB   SRCS \"${DIRECTORY}/ggml/src/ggml-cuda/template-instances/fattn-vec*f16-f16.cu\")\n            list(APPEND GGML_SOURCES_CUDA ${SRCS})\n        endif()\n\n        list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_CUDA)\n\n        list(APPEND GGML_COMPILE_DEFS GGML_CUDA_DMMV_X=${GGML_CUDA_DMMV_X})\n        list(APPEND GGML_COMPILE_DEFS GGML_CUDA_MMV_Y=${GGML_CUDA_MMV_Y})\n        list(APPEND GGML_COMPILE_DEFS K_QUANTS_PER_ITERATION=${GGML_CUDA_KQUANTS_ITER})\n        list(APPEND GGML_COMPILE_DEFS GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE})\n\n        if (GGML_CUDA_USE_GRAPHS)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_USE_GRAPHS)\n        endif()\n\n        if (GGML_CUDA_FORCE_DMMV)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_DMMV)\n        endif()\n\n        if (GGML_CUDA_FORCE_MMQ)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_MMQ)\n        endif()\n\n        if (GGML_CUDA_FORCE_CUBLAS)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_CUBLAS)\n        endif()\n\n        if (GGML_CUDA_NO_VMM)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_NO_VMM)\n        endif()\n\n        if (GGML_CUDA_F16)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_F16)\n        endif()\n\n        if (GGML_CUDA_NO_PEER_COPY)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_NO_PEER_COPY)\n        endif()\n\n        if (LLAMA_STATIC)\n            if (WIN32)\n                # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library\n                set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)\n            else ()\n                set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)\n            endif()\n        else()\n            set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)\n        endif()\n\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver)\n    endif()\n\n    if (GGML_VULKAN)\n        find_package(Vulkan REQUIRED)\n\n        set(GGML_HEADERS_VULKAN ${DIRECTORY}/ggml/include/ggml-vulkan.h)\n        set(GGML_SOURCES_VULKAN ${DIRECTORY}/ggml/src/ggml-vulkan.cpp)\n\n        list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_VULKAN)\n\n        if (GGML_VULKAN_CHECK_RESULTS)\n            list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_CHECK_RESULTS)\n        endif()\n\n        if (GGML_VULKAN_DEBUG)\n            list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_DEBUG)\n        endif()\n\n        if (GGML_VULKAN_VALIDATE)\n            list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_VALIDATE)\n        endif()\n\n        if (GGML_VULKAN_RUN_TESTS)\n            list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_RUN_TESTS)\n        endif()\n\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} Vulkan::Vulkan)\n    endif()\n\n    if (GGML_HIPBLAS)\n        if ($ENV{ROCM_PATH})\n            set(ROCM_PATH $ENV{ROCM_PATH})\n        else()\n            set(ROCM_PATH /opt/rocm)\n        endif()\n        list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH})\n\n        string(REGEX MATCH \"hipcc(\\.bat)?$\" CXX_IS_HIPCC \"${CMAKE_CXX_COMPILER}\")\n\n        if (CXX_IS_HIPCC AND UNIX)\n            message(WARNING \"Setting hipcc as the C++ compiler is legacy behavior.\"\n                \" Prefer setting the HIP compiler directly. See README for details.\")\n        else()\n            # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES.\n            if (AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES)\n                set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_ARGETS})\n            endif()\n            cmake_minimum_required(VERSION 3.21)\n            get_property(LANGS GLOBAL PROPERTY ENABLED_LANGUAGES)\n            if (NOT HIP IN_LIST LANGS)\n                message(FATAL_ERROR \"The HIP language must be enabled.\")\n            endif()\n        endif()\n        find_package(hip     REQUIRED)\n        find_package(hipblas REQUIRED)\n        find_package(rocblas REQUIRED)\n\n        message(STATUS \"HIP and hipBLAS found\")\n\n        set(GGML_HEADERS_ROCM ${DIRECTORY}/ggml/include/ggml-cuda.h)\n\n        file(GLOB GGML_SOURCES_ROCM \"${DIRECTORY}/ggml/src/ggml-rocm/*.cu\")\n        list(APPEND GGML_SOURCES_ROCM \"${DIRECTORY}/ggml/src/ggml-rocm.cu\")\n\n        list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_HIPBLAS GGML_USE_CUDA)\n\n        if (GGML_HIP_UMA)\n            list(APPEND GGML_COMPILE_DEFS GGML_HIP_UMA)\n        endif()\n\n        if (GGML_CUDA_FORCE_DMMV)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_DMMV)\n        endif()\n\n        if (GGML_CUDA_FORCE_MMQ)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_MMQ)\n        endif()\n\n        if (GGML_CUDA_NO_PEER_COPY)\n            list(APPEND GGML_COMPILE_DEFS GGML_CUDA_NO_PEER_COPY)\n        endif()\n\n        list(APPEND GGML_COMPILE_DEFS GGML_CUDA_DMMV_X=${GGML_CUDA_DMMV_X})\n        list(APPEND GGML_COMPILE_DEFS GGML_CUDA_MMV_Y=${GGML_CUDA_MMV_Y})\n        list(APPEND GGML_COMPILE_DEFS K_QUANTS_PER_ITERATION=${GGML_CUDA_KQUANTS_ITER})\n\n        if (CXX_IS_HIPCC)\n            set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)\n            set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device)\n        else()\n            set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP)\n        endif()\n\n        if (LLAMA_STATIC)\n            message(FATAL_ERROR \"Static linking not supported for HIP/ROCm\")\n        endif()\n\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} PUBLIC hip::host roc::rocblas roc::hipblas)\n    endif()\n\n    set(LLAMA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY})\n\n    if (GGML_KOMPUTE AND NOT GGML_KOMPUTE_ONCE)\n        set(GGML_KOMPUTE_ONCE ON PARENT_SCOPE)\n        if (NOT EXISTS \"${LLAMA_DIR}/ggml/src/kompute/CMakeLists.txt\")\n            message(FATAL_ERROR \"Kompute not found\")\n        endif()\n        message(STATUS \"Kompute found\")\n\n        find_package(Vulkan COMPONENTS glslc)\n        if (NOT Vulkan_FOUND)\n            message(FATAL_ERROR \"Vulkan not found. To build without Vulkan, use -DLLMODEL_KOMPUTE=OFF.\")\n        endif()\n        find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc)\n        if (NOT glslc_executable)\n            message(FATAL_ERROR \"glslc not found. To build without Vulkan, use -DLLMODEL_KOMPUTE=OFF.\")\n        endif()\n\n        function(compile_shader)\n            set(options)\n            set(oneValueArgs)\n            set(multiValueArgs SOURCES)\n            cmake_parse_arguments(compile_shader \"${options}\" \"${oneValueArgs}\" \"${multiValueArgs}\" ${ARGN})\n            foreach(source ${compile_shader_SOURCES})\n                get_filename_component(OP_FILE ${source} NAME)\n                set(spv_file ${CMAKE_CURRENT_BINARY_DIR}/${OP_FILE}.spv)\n                add_custom_command(\n                    OUTPUT ${spv_file}\n                    DEPENDS ${LLAMA_DIR}/ggml/src/kompute-shaders/${source}\n                        ${LLAMA_DIR}/ggml/src/kompute-shaders/common.comp\n                        ${LLAMA_DIR}/ggml/src/kompute-shaders/op_getrows.comp\n                        ${LLAMA_DIR}/ggml/src/kompute-shaders/op_mul_mv_q_n_pre.comp\n                        ${LLAMA_DIR}/ggml/src/kompute-shaders/op_mul_mv_q_n.comp\n                    COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${LLAMA_DIR}/ggml/src/kompute-shaders/${source}\n                    COMMENT \"Compiling ${source} to ${source}.spv\"\n                    )\n\n                get_filename_component(RAW_FILE_NAME ${spv_file} NAME)\n                set(FILE_NAME \"shader${RAW_FILE_NAME}\")\n                string(REPLACE \".comp.spv\" \".h\" HEADER_FILE ${FILE_NAME})\n                string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE)\n                string(REPLACE \".\" \"_\" HEADER_FILE_DEFINE \"${HEADER_FILE_DEFINE}\")\n                set(OUTPUT_HEADER_FILE \"${HEADER_FILE}\")\n                message(STATUS \"${HEADER_FILE} generating ${HEADER_FILE_DEFINE}\")\n                if(CMAKE_GENERATOR MATCHES \"Visual Studio\")\n                    add_custom_command(\n                        OUTPUT ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/\" > ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#ifndef ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#define ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"namespace kp {\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"namespace shader_data {\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"}}\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#endif // define ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        DEPENDS ${spv_file} xxd\n                        COMMENT \"Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/xxd\"\n                        )\n                else()\n                    add_custom_command(\n                        OUTPUT ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/\" > ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#ifndef ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#define ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"namespace kp {\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"namespace shader_data {\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \"}}\" >> ${OUTPUT_HEADER_FILE}\n                        COMMAND ${CMAKE_COMMAND} -E echo \\\"\\#endif // define ${HEADER_FILE_DEFINE}\\\" >> ${OUTPUT_HEADER_FILE}\n                        DEPENDS ${spv_file} xxd\n                        COMMENT \"Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd\"\n                        )\n                endif()\n            endforeach()\n        endfunction()\n\n        set(KOMPUTE_OPT_BUILT_IN_VULKAN_HEADER_TAG \"v1.3.239\" CACHE STRING \"Kompute Vulkan headers tag\")\n        set(KOMPUTE_OPT_LOG_LEVEL Critical CACHE STRING \"Kompute log level\")\n        set(FMT_INSTALL OFF)\n        add_subdirectory(${LLAMA_DIR}/ggml/src/kompute)\n\n        # Compile our shaders\n        compile_shader(SOURCES\n            op_scale.comp\n            op_scale_8.comp\n            op_add.comp\n            op_addrow.comp\n            op_mul.comp\n            op_silu.comp\n            op_relu.comp\n            op_gelu.comp\n            op_softmax.comp\n            op_norm.comp\n            op_rmsnorm.comp\n            op_diagmask.comp\n            op_mul_mat_mat_f32.comp\n            op_mul_mat_f16.comp\n            op_mul_mat_q8_0.comp\n            op_mul_mat_q4_0.comp\n            op_mul_mat_q4_1.comp\n            op_mul_mat_q6_k.comp\n            op_getrows_f32.comp\n            op_getrows_f16.comp\n            op_getrows_q4_0.comp\n            op_getrows_q4_1.comp\n            op_getrows_q6_k.comp\n            op_rope_f16.comp\n            op_rope_f32.comp\n            op_cpy_f16_f16.comp\n            op_cpy_f16_f32.comp\n            op_cpy_f32_f16.comp\n            op_cpy_f32_f32.comp\n        )\n\n        # Create a custom target for our generated shaders\n        add_custom_target(generated_shaders DEPENDS\n            shaderop_scale.h\n            shaderop_scale_8.h\n            shaderop_add.h\n            shaderop_addrow.h\n            shaderop_mul.h\n            shaderop_silu.h\n            shaderop_relu.h\n            shaderop_gelu.h\n            shaderop_softmax.h\n            shaderop_norm.h\n            shaderop_rmsnorm.h\n            shaderop_diagmask.h\n            shaderop_mul_mat_mat_f32.h\n            shaderop_mul_mat_f16.h\n            shaderop_mul_mat_q8_0.h\n            shaderop_mul_mat_q4_0.h\n            shaderop_mul_mat_q4_1.h\n            shaderop_mul_mat_q6_k.h\n            shaderop_getrows_f32.h\n            shaderop_getrows_f16.h\n            shaderop_getrows_q4_0.h\n            shaderop_getrows_q4_1.h\n            shaderop_getrows_q6_k.h\n            shaderop_rope_f16.h\n            shaderop_rope_f32.h\n            shaderop_cpy_f16_f16.h\n            shaderop_cpy_f16_f32.h\n            shaderop_cpy_f32_f16.h\n            shaderop_cpy_f32_f32.h\n        )\n\n        # Create a custom command that depends on the generated_shaders\n        add_custom_command(\n            OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp\n            COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp\n            DEPENDS generated_shaders\n            COMMENT \"Ensuring shaders are generated before compiling ggml-kompute.cpp\"\n        )\n    endif()\n\n    if (GGML_KOMPUTE)\n        list(APPEND GGML_COMPILE_DEFS VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1)\n\n        # Add the stamp to the main sources to ensure dependency tracking\n        set(GGML_SOURCES_KOMPUTE ${LLAMA_DIR}/ggml/src/ggml-kompute.cpp ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp)\n        set(GGML_HEADERS_KOMPUTE ${LLAMA_DIR}/ggml/include/ggml-kompute.h)\n\n        list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_KOMPUTE)\n\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute)\n    endif()\n\n    set(CUDA_CXX_FLAGS \"\")\n\n    if (GGML_CUDA)\n        set(CUDA_FLAGS -use_fast_math)\n\n        if (LLAMA_FATAL_WARNINGS)\n            list(APPEND CUDA_FLAGS -Werror all-warnings)\n        endif()\n\n        if (LLAMA_ALL_WARNINGS AND NOT MSVC)\n            set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c)\n            if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL \"\")\n                list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER})\n            endif()\n\n            execute_process(\n                COMMAND ${NVCC_CMD} -Xcompiler --version\n                OUTPUT_VARIABLE CUDA_CCFULLVER\n                ERROR_QUIET\n            )\n\n            if (NOT CUDA_CCFULLVER MATCHES clang)\n                set(CUDA_CCID \"GNU\")\n                execute_process(\n                    COMMAND ${NVCC_CMD} -Xcompiler \"-dumpfullversion -dumpversion\"\n                    OUTPUT_VARIABLE CUDA_CCVER\n                    OUTPUT_STRIP_TRAILING_WHITESPACE\n                    ERROR_QUIET\n                )\n            else()\n                if (CUDA_CCFULLVER MATCHES Apple)\n                    set(CUDA_CCID \"AppleClang\")\n                else()\n                    set(CUDA_CCID \"Clang\")\n                endif()\n                string(REGEX REPLACE \"^.* version ([0-9.]*).*$\" \"\\\\1\" CUDA_CCVER ${CUDA_CCFULLVER})\n            endif()\n\n            message(\"-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}\")\n\n            get_flags(${CUDA_CCID} ${CUDA_CCVER})\n            list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS})  # This is passed to -Xcompiler later\n        endif()\n\n        if (NOT MSVC)\n            list(APPEND CUDA_CXX_FLAGS -Wno-pedantic)\n        endif()\n    endif()\n\n    if (GGML_METAL)\n        find_library(FOUNDATION_LIBRARY Foundation REQUIRED)\n        find_library(METAL_FRAMEWORK    Metal      REQUIRED)\n        find_library(METALKIT_FRAMEWORK MetalKit   REQUIRED)\n\n        message(STATUS \"Metal framework found\")\n        set(GGML_HEADERS_METAL ${DIRECTORY}/ggml/include/ggml-metal.h)\n        set(GGML_SOURCES_METAL ${DIRECTORY}/ggml/src/ggml-metal.m)\n\n        list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_METAL)\n        if (GGML_METAL_NDEBUG)\n            list(APPEND GGML_COMPILE_DEFS GGML_METAL_NDEBUG)\n        endif()\n\n        # copy ggml-common.h and ggml-metal.metal to bin directory\n        configure_file(${DIRECTORY}/ggml/src/ggml-common.h    ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h    COPYONLY)\n        configure_file(${DIRECTORY}/ggml/src/ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)\n\n        if (GGML_METAL_SHADER_DEBUG)\n            # custom command to do the following:\n            #   xcrun -sdk macosx metal    -fno-fast-math -c ggml-metal.metal -o ggml-metal.air\n            #   xcrun -sdk macosx metallib                   ggml-metal.air   -o default.metallib\n            #\n            # note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works\n            #       disabling fast math is needed in order to pass tests/test-backend-ops\n            # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1\n            # note: unfortunately, we have to call it default.metallib instead of ggml.metallib\n            #       ref: https://github.com/ggerganov/whisper.cpp/issues/1720\n            set(XC_FLAGS -fno-fast-math -fno-inline -g)\n        else()\n            set(XC_FLAGS -O3)\n        endif()\n\n        # Append macOS metal versioning flags\n        if (GGML_METAL_MACOSX_VERSION_MIN)\n            message(STATUS \"Adding -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN} flag to metal compilation\")\n            list(APPEND XC_FLAGS -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN})\n        endif()\n        if (GGML_METAL_STD)\n            message(STATUS \"Adding -std=${GGML_METAL_STD} flag to metal compilation\")\n            list(APPEND XC_FLAGS -std=${GGML_METAL_STD})\n        endif()\n\n        set(GGML_METALLIB \"${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib\")\n        set(GGML_METALLIB \"${GGML_METALLIB}\" PARENT_SCOPE)\n        add_custom_command(\n            OUTPUT ${GGML_METALLIB}\n            COMMAND xcrun -sdk macosx metal    ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air\n            COMMAND xcrun -sdk macosx metallib                ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air   -o ${GGML_METALLIB}\n            COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air\n            COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h\n            COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal\n            DEPENDS ${DIRECTORY}/ggml/src/ggml-metal.metal ${DIRECTORY}/ggml/src/ggml-common.h\n            COMMENT \"Compiling Metal kernels\"\n            )\n\n        add_custom_target(\n            ggml-metal ALL\n            DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib\n            )\n\n        set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}\n            ${FOUNDATION_LIBRARY}\n            ${METAL_FRAMEWORK}\n            ${METALKIT_FRAMEWORK}\n            )\n    endif()\n\n    set(ARCH_FLAGS \"\")\n\n    if (CMAKE_OSX_ARCHITECTURES STREQUAL \"arm64\" OR CMAKE_GENERATOR_PLATFORM_LWR STREQUAL \"arm64\" OR\n        (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND\n         CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|arm.*|ARM64)$\"))\n        message(STATUS \"ARM detected\")\n        if (MSVC)\n            # TODO: arm msvc?\n        else()\n            check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E)\n            if (NOT \"${COMPILER_SUPPORTS_FP16_FORMAT_I3E}\" STREQUAL \"\")\n                list(APPEND ARCH_FLAGS -mfp16-format=ieee)\n            endif()\n            if (${CMAKE_SYSTEM_PROCESSOR} MATCHES \"armv6\")\n                # Raspberry Pi 1, Zero\n                list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access)\n            endif()\n            if (${CMAKE_SYSTEM_PROCESSOR} MATCHES \"armv7\")\n                if (\"${CMAKE_SYSTEM_NAME}\" STREQUAL \"Android\")\n                    # Android armeabi-v7a\n                    list(APPEND ARCH_FLAGS -mfpu=neon-vfpv4 -mno-unaligned-access -funsafe-math-optimizations)\n                else()\n                    # Raspberry Pi 2\n                    list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations)\n                endif()\n            endif()\n            if (${CMAKE_SYSTEM_PROCESSOR} MATCHES \"armv8\")\n                # Android arm64-v8a\n                # Raspberry Pi 3, 4, Zero 2 (32-bit)\n                list(APPEND ARCH_FLAGS -mno-unaligned-access)\n            endif()\n        endif()\n    elseif (CMAKE_OSX_ARCHITECTURES STREQUAL \"x86_64\" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES \"^(x86_64|i686|amd64|x64|win32)$\" OR\n            (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND\n             CMAKE_SYSTEM_PROCESSOR MATCHES \"^(x86_64|i686|AMD64)$\"))\n        message(STATUS \"x86 detected\")\n        if (MSVC)\n            if (GGML_AVX512)\n                list(APPEND ARCH_FLAGS /arch:AVX512)\n                # MSVC has no compile-time flags enabling specific\n                # AVX512 extensions, neither it defines the\n                # macros corresponding to the extensions.\n                # Do it manually.\n                if (GGML_AVX512_VBMI)\n                    list(APPEND GGML_COMPILE_DEFS $<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)\n                    list(APPEND GGML_COMPILE_DEFS $<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)\n                endif()\n                if (GGML_AVX512_VNNI)\n                    list(APPEND GGML_COMPILE_DEFS $<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)\n                    list(APPEND GGML_COMPILE_DEFS $<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)\n                endif()\n            elseif (GGML_AVX2)\n                list(APPEND ARCH_FLAGS /arch:AVX2)\n            elseif (GGML_AVX)\n                list(APPEND ARCH_FLAGS /arch:AVX)\n            endif()\n        else()\n            if (GGML_NATIVE)\n                list(APPEND ARCH_FLAGS -march=native)\n            endif()\n            if (GGML_F16C)\n                list(APPEND ARCH_FLAGS -mf16c)\n            endif()\n            if (GGML_FMA)\n                list(APPEND ARCH_FLAGS -mfma)\n            endif()\n            if (GGML_AVX)\n                list(APPEND ARCH_FLAGS -mavx)\n            endif()\n            if (GGML_AVX2)\n                list(APPEND ARCH_FLAGS -mavx2)\n            endif()\n            if (GGML_AVX512)\n                list(APPEND ARCH_FLAGS -mavx512f)\n                list(APPEND ARCH_FLAGS -mavx512bw)\n            endif()\n            if (GGML_AVX512_VBMI)\n                list(APPEND ARCH_FLAGS -mavx512vbmi)\n            endif()\n            if (GGML_AVX512_VNNI)\n                list(APPEND ARCH_FLAGS -mavx512vnni)\n            endif()\n        endif()\n    elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES \"ppc64\")\n        message(STATUS \"PowerPC detected\")\n        if (${CMAKE_SYSTEM_PROCESSOR} MATCHES \"ppc64le\")\n            list(APPEND ARCH_FLAGS -mcpu=powerpc64le)\n        else()\n            list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)\n            #TODO: Add  targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)\n        endif()\n    else()\n        message(STATUS \"Unknown architecture\")\n    endif()\n\n    list(APPEND GGML_COMPILE_OPTS \"$<$<COMPILE_LANGUAGE:CXX>:${ARCH_FLAGS}>\")\n    list(APPEND GGML_COMPILE_OPTS \"$<$<COMPILE_LANGUAGE:C>:${ARCH_FLAGS}>\")\n\n    if (GGML_CUDA)\n        list(APPEND CUDA_CXX_FLAGS ${ARCH_FLAGS})\n        list(JOIN CUDA_CXX_FLAGS \" \" CUDA_CXX_FLAGS_JOINED)  # pass host compiler flags as a single argument\n        if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL \"\")\n            list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED})\n        endif()\n        list(APPEND GGML_COMPILE_OPTS \"$<$<COMPILE_LANGUAGE:CUDA>:${CUDA_FLAGS}>\")\n    endif()\n\n    # ggml\n\n    add_library(ggml${SUFFIX} OBJECT\n                ${DIRECTORY}/ggml/include/ggml.h\n                ${DIRECTORY}/ggml/include/ggml-alloc.h\n                ${DIRECTORY}/ggml/include/ggml-backend.h\n                ${DIRECTORY}/ggml/src/ggml.c\n                ${DIRECTORY}/ggml/src/ggml-alloc.c\n                ${DIRECTORY}/ggml/src/ggml-backend.c\n                ${DIRECTORY}/ggml/src/ggml-quants.c\n                ${DIRECTORY}/ggml/src/ggml-quants.h\n                ${GGML_SOURCES_CUDA}      ${GGML_HEADERS_CUDA}\n                ${GGML_SOURCES_METAL}     ${GGML_HEADERS_METAL}\n                ${GGML_SOURCES_KOMPUTE}   ${GGML_HEADERS_KOMPUTE}\n                ${GGML_SOURCES_VULKAN}    ${GGML_HEADERS_VULKAN}\n                ${GGML_SOURCES_ROCM}      ${GGML_HEADERS_ROCM}\n                ${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}\n                ${DIRECTORY}/ggml/src/ggml-aarch64.c\n                ${DIRECTORY}/ggml/src/ggml-aarch64.h\n                )\n\n    target_include_directories(ggml${SUFFIX} PUBLIC ${DIRECTORY}/ggml/include ${LLAMA_EXTRA_INCLUDES})\n    target_include_directories(ggml${SUFFIX} PRIVATE ${DIRECTORY}/ggml/src)\n    target_compile_features(ggml${SUFFIX} PUBLIC c_std_11) # don't bump\n\n    target_link_libraries(ggml${SUFFIX} PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})\n\n    if (BUILD_SHARED_LIBS)\n        set_target_properties(ggml${SUFFIX} PROPERTIES POSITION_INDEPENDENT_CODE ON)\n    endif()\n\n    # llama\n\n    add_library(llama${SUFFIX} STATIC\n                ${DIRECTORY}/include/llama.h\n                ${DIRECTORY}/src/llama-grammar.cpp\n                ${DIRECTORY}/src/llama-sampling.cpp\n                ${DIRECTORY}/src/llama-vocab.cpp\n                ${DIRECTORY}/src/llama.cpp\n                ${DIRECTORY}/src/unicode-data.cpp\n                ${DIRECTORY}/src/unicode.cpp\n                ${DIRECTORY}/src/unicode.h\n                )\n\n    target_include_directories(llama${SUFFIX} PUBLIC  ${DIRECTORY}/include ${DIRECTORY}/ggml/include)\n    target_include_directories(llama${SUFFIX} PRIVATE ${DIRECTORY}/src)\n    target_compile_features   (llama${SUFFIX} PUBLIC cxx_std_11) # don't bump\n\n    target_link_libraries(llama${SUFFIX} PRIVATE\n        ggml${SUFFIX}\n        ${LLAMA_EXTRA_LIBS}\n        )\n\n    if (BUILD_SHARED_LIBS)\n        set_target_properties(llama${SUFFIX} PROPERTIES POSITION_INDEPENDENT_CODE ON)\n        target_compile_definitions(llama${SUFFIX} PRIVATE LLAMA_SHARED LLAMA_BUILD)\n    endif()\n\n    # target options\n\n    set_target_properties(ggml${SUFFIX} llama${SUFFIX} PROPERTIES\n        CXX_STANDARD 11\n        CXX_STANDARD_REQUIRED true\n        C_STANDARD 11\n        C_STANDARD_REQUIRED true\n        )\n\n    target_compile_options(ggml${SUFFIX} PRIVATE \"${GGML_COMPILE_OPTS}\")\n    target_compile_options(llama${SUFFIX} PRIVATE \"${GGML_COMPILE_OPTS}\")\n\n    target_compile_definitions(ggml${SUFFIX} PRIVATE \"${GGML_COMPILE_DEFS}\")\n    target_compile_definitions(llama${SUFFIX} PRIVATE \"${GGML_COMPILE_DEFS}\")\n\n    target_compile_definitions(ggml${SUFFIX} PUBLIC \"${GGML_COMPILE_DEFS_PUBLIC}\")\n    target_compile_definitions(llama${SUFFIX} PUBLIC \"${GGML_COMPILE_DEFS_PUBLIC}\")\n\n    target_link_options(ggml${SUFFIX} PRIVATE \"${GGML_LINK_OPTS}\")\n    target_link_options(llama${SUFFIX} PRIVATE \"${GGML_LINK_OPTS}\")\nendfunction()\n"
  },
  {
    "path": "gpt4all-backend/src/dlhandle.cpp",
    "content": "#include \"dlhandle.h\"\n\n#include <string>\n\n#ifndef _WIN32\n#   include <dlfcn.h>\n#else\n#   include <cassert>\n#   include <sstream>\n#   define WIN32_LEAN_AND_MEAN\n#   ifndef NOMINMAX\n#       define NOMINMAX\n#   endif\n#   include <windows.h>\n#endif\n\nusing namespace std::string_literals;\nnamespace fs = std::filesystem;\n\n\n#ifndef _WIN32\n\nDlhandle::Dlhandle(const fs::path &fpath)\n{\n    chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL);\n    if (!chandle) {\n        throw Exception(\"dlopen: \"s + dlerror());\n    }\n}\n\nDlhandle::~Dlhandle()\n{\n    if (chandle) dlclose(chandle);\n}\n\nvoid *Dlhandle::get_internal(const char *symbol) const\n{\n    return dlsym(chandle, symbol);\n}\n\n#else // defined(_WIN32)\n\nDlhandle::Dlhandle(const fs::path &fpath)\n{\n    fs::path afpath = fs::absolute(fpath);\n\n    // Suppress the \"Entry Point Not Found\" dialog, caused by outdated nvcuda.dll from the GPU driver\n    UINT lastErrorMode = GetErrorMode();\n    SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS);\n\n    chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);\n\n    SetErrorMode(lastErrorMode);\n\n    if (!chandle) {\n        DWORD err = GetLastError();\n        std::ostringstream ss;\n        ss << \"LoadLibraryExW failed with error 0x\" << std::hex << err;\n        throw Exception(ss.str());\n    }\n}\n\nDlhandle::~Dlhandle()\n{\n    if (chandle) FreeLibrary(HMODULE(chandle));\n}\n\nvoid *Dlhandle::get_internal(const char *symbol) const\n{\n    return GetProcAddress(HMODULE(chandle), symbol);\n}\n\n#endif // defined(_WIN32)\n"
  },
  {
    "path": "gpt4all-backend/src/dlhandle.h",
    "content": "#pragma once\n\n#include <filesystem>\n#include <stdexcept>\n#include <string>\n#include <utility>\n\nnamespace fs = std::filesystem;\n\n\nclass Dlhandle {\n    void *chandle = nullptr;\n\npublic:\n    class Exception : public std::runtime_error {\n    public:\n        using std::runtime_error::runtime_error;\n    };\n\n    Dlhandle() = default;\n    Dlhandle(const fs::path &fpath);\n    Dlhandle(const Dlhandle &o) = delete;\n    Dlhandle(Dlhandle &&o)\n        : chandle(o.chandle)\n    {\n        o.chandle = nullptr;\n    }\n\n    ~Dlhandle();\n\n    Dlhandle &operator=(Dlhandle &&o) {\n        chandle = std::exchange(o.chandle, nullptr);\n        return *this;\n    }\n\n    template <typename T>\n    T *get(const std::string &symbol) const {\n        return reinterpret_cast<T *>(get_internal(symbol.c_str()));\n    }\n\n    auto get_fnc(const std::string &symbol) const {\n        return get<void*(...)>(symbol);\n    }\n\nprivate:\n    void *get_internal(const char *symbol) const;\n};\n"
  },
  {
    "path": "gpt4all-backend/src/llamamodel.cpp",
    "content": "#define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE\n#include \"llamamodel_impl.h\"\n\n#include \"llmodel.h\"\n#include \"utils.h\"\n\n#include <ggml.h>\n#include <llama.h>\n\n#include <algorithm>\n#include <cassert>\n#include <cmath>\n#include <cstdint>\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <fstream>\n#include <functional>\n#include <initializer_list>\n#include <iomanip>\n#include <iostream>\n#include <iterator>\n#include <memory>\n#include <numeric>\n#include <optional>\n#include <sstream>\n#include <stdexcept>\n#include <string>\n#include <thread>\n#include <vector>\n\n#ifdef GGML_USE_KOMPUTE\n#   include <ggml-kompute.h>\n#elif defined(GGML_USE_VULKAN)\n#   include <ggml-vulkan.h>\n#elif defined(GGML_USE_CUDA)\n#   include <ggml-cuda.h>\n#endif\n\nusing namespace std::string_literals;\n\n\n// Maximum supported GGUF version\nstatic constexpr int GGUF_VER_MAX = 3;\n\nstatic const char * const modelType_ = \"LLaMA\";\n\n// note: same order as LLM_ARCH_NAMES in llama.cpp\nstatic const std::vector<const char *> KNOWN_ARCHES {\n    \"llama\",\n    \"falcon\",\n    // \"grok\", -- 314B parameters\n    \"gpt2\",\n    // \"gptj\", -- no inference code\n    \"gptneox\",\n    \"granite\",\n    \"granitemoe\",\n    \"mpt\",\n    \"baichuan\",\n    \"starcoder\",\n    \"refact\",\n    \"bert\",\n    \"nomic-bert\",\n    // \"jina-bert-v2\", -- Assertion `i01 >= 0 && i01 < ne01' failed.\n    \"bloom\",\n    \"stablelm\",\n    \"qwen\",\n    \"qwen2\",\n    \"qwen2moe\",\n    \"phi2\",\n    \"phi3\",\n    // \"plamo\", -- https://github.com/ggerganov/llama.cpp/issues/5669\n    \"codeshell\",\n    \"orion\",\n    \"internlm2\",\n    // \"minicpm\", -- CUDA generates garbage\n    \"gemma\",\n    \"gemma2\",\n    \"starcoder2\",\n    // \"mamba\", -- CUDA missing SSM_CONV\n    \"xverse\",\n    \"command-r\",\n    // \"dbrx\", -- 16x12B parameters\n    \"olmo\",\n    \"olmoe\",\n    \"openelm\",\n    // \"arctic\", -- 10B+128x3.66B parameters\n    \"deepseek2\",\n    \"chatglm\",\n    // \"bitnet\", -- tensor not within file bounds?\n    // \"t5\", -- seq2seq model\n    \"jais\",\n};\n\nstatic const std::vector<const char *> EMBEDDING_ARCHES {\n    \"bert\", \"nomic-bert\",\n};\n\nstatic bool is_embedding_arch(const std::string &arch)\n{\n    return std::find(EMBEDDING_ARCHES.begin(), EMBEDDING_ARCHES.end(), arch) < EMBEDDING_ARCHES.end();\n}\n\nstatic bool llama_verbose()\n{\n    const char* var = getenv(\"GPT4ALL_VERBOSE_LLAMACPP\");\n    return var && *var;\n}\n\nstatic void llama_log_callback(ggml_log_level level, const char *text, void *userdata, bool warn)\n{\n    (void)userdata;\n\n    static ggml_log_level lastlevel = GGML_LOG_LEVEL_NONE;\n    if (!llama_verbose()) {\n        auto efflevel = level == GGML_LOG_LEVEL_CONT ? lastlevel : level;\n        lastlevel = efflevel;\n        switch (efflevel) {\n            case GGML_LOG_LEVEL_CONT:\n                UNREACHABLE();\n                break;\n            case GGML_LOG_LEVEL_WARN:\n                if (warn) break;\n                [[fallthrough]];\n            case GGML_LOG_LEVEL_NONE: // not used?\n            case GGML_LOG_LEVEL_INFO:\n            case GGML_LOG_LEVEL_DEBUG:\n                return; // suppress\n            case GGML_LOG_LEVEL_ERROR:\n                ;\n        }\n    }\n\n    fputs(text, stderr);\n}\n\nstruct gpt_params {\n    int32_t n_keep        = 0;    // number of tokens to keep from initial prompt\n\n    // sampling parameters\n    float   tfs_z         = 1.0f; // 1.0 = disabled\n    float   typical_p     = 1.0f; // 1.0 = disabled\n\n    std::string prompt = \"\";\n\n    enum ggml_type kv_type = GGML_TYPE_F16; // use f16 instead of f32 for memory kv\n\n    bool use_mmap          = true;  // use mmap for faster loads\n    bool use_mlock         = false; // use mlock to keep model in memory\n};\n\nconst char *get_arch_name(gguf_context *ctx_gguf)\n{\n    const int kid = gguf_find_key(ctx_gguf, \"general.architecture\");\n    if (kid == -1)\n        throw std::runtime_error(\"key not found in model: general.architecture\");\n\n    enum gguf_type ktype = gguf_get_kv_type(ctx_gguf, kid);\n    if (ktype != GGUF_TYPE_STRING)\n        throw std::runtime_error(\"key general.architecture has wrong type\");\n\n    return gguf_get_val_str(ctx_gguf, kid);\n}\n\nstatic gguf_context *load_gguf(const char *fname)\n{\n    struct gguf_init_params params = {\n        /*.no_alloc = */ true,\n        /*.ctx      = */ nullptr,\n    };\n    gguf_context *ctx = gguf_init_from_file(fname, params);\n    if (!ctx) {\n        std::cerr << __func__ << \": gguf_init_from_file failed\\n\";\n        return nullptr;\n    }\n\n    int gguf_ver = gguf_get_version(ctx);\n    if (gguf_ver > GGUF_VER_MAX) {\n        std::cerr << __func__ << \": unsupported gguf version: \" << gguf_ver << \"\\n\";\n        gguf_free(ctx);\n        return nullptr;\n    }\n\n    return ctx;\n}\n\nstatic int32_t get_arch_key_u32(std::string const &modelPath, std::string const &archKey)\n{\n    int32_t value = -1;\n    std::string arch;\n\n    auto * ctx = load_gguf(modelPath.c_str());\n    if (!ctx)\n        goto cleanup;\n\n    try {\n        arch = get_arch_name(ctx);\n    } catch (const std::runtime_error &) {\n        goto cleanup; // cannot read key\n    }\n\n    {\n        auto key = arch + \".\" + archKey;\n        int keyidx = gguf_find_key(ctx, key.c_str());\n        if (keyidx != -1) {\n            value = gguf_get_val_u32(ctx, keyidx);\n        } else {\n            std::cerr << __func__ << \": \" << key << \" not found in \" << modelPath << \"\\n\";\n        }\n    }\n\ncleanup:\n    gguf_free(ctx);\n    return value;\n}\n\nstruct LLamaPrivate {\n    bool                         modelLoaded  = false;\n    int                          device       = -1;\n    std::string                  deviceName;\n    int64_t                      n_threads    = 0;\n    std::vector<LLModel::Token>  end_tokens;\n    const char                  *backend_name = nullptr;\n    std::vector<LLModel::Token>  inputTokens;\n\n    llama_model          *model        = nullptr;\n    llama_context        *ctx          = nullptr;\n    llama_model_params    model_params;\n    llama_context_params  ctx_params;\n    llama_sampler        *sampler_chain;\n};\n\nLLamaModel::LLamaModel()\n    : d_ptr(std::make_unique<LLamaPrivate>())\n{\n    auto sparams = llama_sampler_chain_default_params();\n    d_ptr->sampler_chain = llama_sampler_chain_init(sparams);\n}\n\n// default hparams (LLaMA 7B)\nstruct llama_file_hparams {\n    uint32_t n_vocab = 32000;\n    uint32_t n_embd  = 4096;\n    uint32_t n_mult  = 256;\n    uint32_t n_head  = 32;\n    uint32_t n_layer = 32;\n    uint32_t n_rot   = 64;\n    enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;\n};\n\nsize_t LLamaModel::requiredMem(const std::string &modelPath, int n_ctx, int ngl)\n{\n    // TODO(cebtenzzre): update to GGUF\n    (void)ngl; // FIXME(cetenzzre): use this value\n    auto fin = std::ifstream(modelPath, std::ios::binary);\n    fin.seekg(0, std::ios_base::end);\n    size_t filesize = fin.tellg();\n    fin.seekg(0, std::ios_base::beg);\n    uint32_t magic = 0;\n    fin.read(reinterpret_cast<char*>(&magic), sizeof(magic));\n    if (magic != 0x67676a74) return 0;\n    uint32_t version = 0;\n    fin.read(reinterpret_cast<char*>(&version), sizeof(version));\n    llama_file_hparams hparams;\n    fin.read(reinterpret_cast<char*>(&hparams.n_vocab), sizeof(hparams.n_vocab));\n    fin.read(reinterpret_cast<char*>(&hparams.n_embd), sizeof(hparams.n_embd));\n    fin.read(reinterpret_cast<char*>(&hparams.n_head), sizeof(hparams.n_head));\n    fin.read(reinterpret_cast<char*>(&hparams.n_layer), sizeof(hparams.n_layer));\n    fin.read(reinterpret_cast<char*>(&hparams.n_rot), sizeof(hparams.n_rot));\n    fin.read(reinterpret_cast<char*>(&hparams.ftype), sizeof(hparams.ftype));\n    const size_t kvcache_element_size = 2; // fp16\n    const size_t est_kvcache_size = hparams.n_embd * hparams.n_layer * 2u * n_ctx * kvcache_element_size;\n    return filesize + est_kvcache_size;\n}\n\nbool LLamaModel::isModelBlacklisted(const std::string &modelPath) const\n{\n    auto * ctx = load_gguf(modelPath.c_str());\n    if (!ctx) {\n        std::cerr << __func__ << \": failed to load \" << modelPath << \"\\n\";\n        return false;\n    }\n\n    auto get_key = [ctx, &modelPath](const char *name) {\n        int keyidx = gguf_find_key(ctx, name);\n        if (keyidx == -1) {\n            throw std::logic_error(name + \" not found in \"s + modelPath);\n        }\n        return keyidx;\n    };\n\n    bool res = false;\n    try {\n        std::string name(gguf_get_val_str(ctx, get_key(\"general.name\")));\n        int token_idx = get_key(\"tokenizer.ggml.tokens\");\n        int n_vocab = gguf_get_arr_n(ctx, token_idx);\n\n        // check for known bad models\n        if (name == \"open-orca_mistral-7b-openorca\"\n            && n_vocab == 32002\n            && gguf_get_arr_str(ctx, token_idx, 32000) == \"<dummy32000>\"s // should be <|im_end|>\n        ) {\n            res = true;\n        }\n    } catch (const std::logic_error &e) {\n        std::cerr << __func__ << \": \" << e.what() << \"\\n\";\n    }\n\n    gguf_free(ctx);\n    return res;\n}\n\nbool LLamaModel::isEmbeddingModel(const std::string &modelPath) const\n{\n    bool result = false;\n    std::string arch;\n\n    auto *ctx_gguf = load_gguf(modelPath.c_str());\n    if (!ctx_gguf) {\n        std::cerr << __func__ << \": failed to load GGUF from \" <<  modelPath << \"\\n\";\n        goto cleanup;\n    }\n\n    try {\n        arch = get_arch_name(ctx_gguf);\n    } catch (const std::runtime_error &) {\n        goto cleanup; // cannot read key\n    }\n\n    result = is_embedding_arch(arch);\n\ncleanup:\n    gguf_free(ctx_gguf);\n    return result;\n}\n\nbool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)\n{\n    d_ptr->modelLoaded = false;\n\n    // clean up after previous loadModel()\n    if (d_ptr->model) {\n        llama_free_model(d_ptr->model);\n        d_ptr->model = nullptr;\n    }\n    if (d_ptr->ctx) {\n        llama_free(d_ptr->ctx);\n        d_ptr->ctx = nullptr;\n    }\n\n    if (n_ctx < 8) {\n        std::cerr << \"warning: minimum context size is 8, using minimum size.\\n\";\n        n_ctx = 8;\n    }\n\n    // -- load the model --\n\n    gpt_params params;\n\n    d_ptr->model_params = llama_model_default_params();\n\n    d_ptr->model_params.use_mmap  = params.use_mmap;\n#if defined (__APPLE__)\n    d_ptr->model_params.use_mlock = true;\n#else\n    d_ptr->model_params.use_mlock = params.use_mlock;\n#endif\n\n    d_ptr->model_params.progress_callback = &LLModel::staticProgressCallback;\n    d_ptr->model_params.progress_callback_user_data = this;\n\n    d_ptr->backend_name = \"cpu\"; // default\n\n#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA)\n    if (d_ptr->device != -1) {\n        d_ptr->model_params.main_gpu = d_ptr->device;\n        d_ptr->model_params.n_gpu_layers = ngl;\n        d_ptr->model_params.split_mode = LLAMA_SPLIT_MODE_NONE;\n    } else {\n#ifdef GGML_USE_CUDA\n        std::cerr << \"Llama ERROR: CUDA loadModel was called without a device\\n\";\n        return false;\n#endif // GGML_USE_CUDA\n    }\n#elif defined(GGML_USE_METAL)\n    (void)ngl;\n\n    if (llama_verbose()) {\n        std::cerr << \"llama.cpp: using Metal\" << std::endl;\n    }\n    d_ptr->backend_name = \"metal\";\n\n    // always fully offload on Metal\n    // TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model\n    d_ptr->model_params.n_gpu_layers = 100;\n#else // !KOMPUTE && !VULKAN && !CUDA && !METAL\n    (void)ngl;\n#endif\n\n    d_ptr->model = llama_load_model_from_file(modelPath.c_str(), d_ptr->model_params);\n    if (!d_ptr->model) {\n        fflush(stdout);\n#ifndef GGML_USE_CUDA\n        d_ptr->device = -1;\n        d_ptr->deviceName.clear();\n#endif\n        std::cerr << \"LLAMA ERROR: failed to load model from \" << modelPath << std::endl;\n        return false;\n    }\n\n    // -- initialize the context --\n\n    d_ptr->ctx_params = llama_context_default_params();\n\n    bool isEmbedding = is_embedding_arch(llama_model_arch(d_ptr->model));\n    const int n_ctx_train = llama_n_ctx_train(d_ptr->model);\n    if (isEmbedding) {\n        d_ptr->ctx_params.n_batch  = n_ctx;\n        d_ptr->ctx_params.n_ubatch = n_ctx;\n    } else {\n        if (n_ctx > n_ctx_train) {\n            std::cerr << \"warning: model was trained on only \" << n_ctx_train << \" context tokens (\"\n                      << n_ctx << \" specified)\\n\";\n        }\n    }\n\n    d_ptr->ctx_params.n_ctx  = n_ctx;\n    d_ptr->ctx_params.type_k = params.kv_type;\n    d_ptr->ctx_params.type_v = params.kv_type;\n\n    // The new batch API provides space for n_vocab*n_tokens logits. Tell llama.cpp early\n    // that we want this many logits so the state serializes consistently.\n    d_ptr->ctx_params.logits_all = true;\n\n    d_ptr->n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());\n    d_ptr->ctx_params.n_threads       = d_ptr->n_threads;\n    d_ptr->ctx_params.n_threads_batch = d_ptr->n_threads;\n\n    if (isEmbedding)\n        d_ptr->ctx_params.embeddings = true;\n\n    d_ptr->ctx = llama_new_context_with_model(d_ptr->model, d_ptr->ctx_params);\n    if (!d_ptr->ctx) {\n        fflush(stdout);\n        std::cerr << \"LLAMA ERROR: failed to init context for model \" <<  modelPath << std::endl;\n        llama_free_model(d_ptr->model);\n        d_ptr->model = nullptr;\n#ifndef GGML_USE_CUDA\n        d_ptr->device = -1;\n        d_ptr->deviceName.clear();\n#endif\n        return false;\n    }\n\n    d_ptr->end_tokens = {llama_token_eos(d_ptr->model)};\n\n    if (usingGPUDevice()) {\n#ifdef GGML_USE_KOMPUTE\n        if (llama_verbose()) {\n            std::cerr << \"llama.cpp: using Vulkan on \" << d_ptr->deviceName << std::endl;\n        }\n        d_ptr->backend_name = \"kompute\";\n#elif defined(GGML_USE_VULKAN)\n        d_ptr->backend_name = \"vulkan\";\n#elif defined(GGML_USE_CUDA)\n        d_ptr->backend_name = \"cuda\";\n#endif\n    }\n\n    m_supportsEmbedding = isEmbedding;\n    m_supportsCompletion = !isEmbedding;\n\n    fflush(stdout);\n    d_ptr->modelLoaded = true;\n    return true;\n}\n\nvoid LLamaModel::setThreadCount(int32_t n_threads)\n{\n    d_ptr->n_threads = n_threads;\n    llama_set_n_threads(d_ptr->ctx, n_threads, n_threads);\n}\n\nint32_t LLamaModel::threadCount() const\n{\n    return d_ptr->n_threads;\n}\n\nLLamaModel::~LLamaModel()\n{\n    if (d_ptr->ctx) {\n        llama_free(d_ptr->ctx);\n    }\n    llama_free_model(d_ptr->model);\n    llama_sampler_free(d_ptr->sampler_chain);\n}\n\nbool LLamaModel::isModelLoaded() const\n{\n    return d_ptr->modelLoaded;\n}\n\nsize_t LLamaModel::stateSize() const\n{\n    return llama_state_get_size(d_ptr->ctx);\n}\n\nsize_t LLamaModel::saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const\n{\n    size_t bytesWritten = llama_state_get_data(d_ptr->ctx, stateOut.data(), stateOut.size());\n    if (bytesWritten)\n        inputTokensOut.assign(d_ptr->inputTokens.begin(), d_ptr->inputTokens.end());\n    return bytesWritten;\n}\n\nsize_t LLamaModel::restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens)\n{\n    size_t bytesRead = llama_state_set_data(d_ptr->ctx, state.data(), state.size());\n    if (bytesRead)\n        d_ptr->inputTokens.assign(inputTokens.begin(), inputTokens.end());\n    return bytesRead;\n}\n\nstd::vector<LLModel::Token> LLamaModel::tokenize(std::string_view str) const\n{\n    std::vector<LLModel::Token> fres(str.length() + 4);\n    int32_t fres_len = llama_tokenize(\n        d_ptr->model, str.data(), str.length(), fres.data(), fres.size(), /*add_special*/ true, /*parse_special*/ true\n    );\n    fres.resize(fres_len);\n    return fres;\n}\n\nbool LLamaModel::isSpecialToken(Token id) const\n{\n    return llama_token_get_attr(d_ptr->model, id)\n        & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN);\n}\n\nstd::string LLamaModel::tokenToString(Token id) const\n{\n    std::vector<char> result(8, 0);\n    const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);\n    if (n_tokens < 0) {\n        result.resize(-n_tokens);\n        int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);\n        GGML_ASSERT(check == -n_tokens);\n    }\n    else {\n        result.resize(n_tokens);\n    }\n\n    return std::string(result.data(), result.size());\n}\n\nvoid LLamaModel::initSampler(const PromptContext &promptCtx)\n{\n    auto *model = d_ptr->model;\n    auto *chain = d_ptr->sampler_chain;\n\n    // clear sampler chain\n    for (int i = llama_sampler_chain_n(chain) - 1; i >= 0; i--) {\n        auto *smpl = llama_sampler_chain_remove(chain, i);\n        llama_sampler_free(smpl);\n    }\n\n    // build new chain\n    llama_sampler_chain_add(chain,\n        llama_sampler_init_penalties(\n            llama_n_vocab(model),\n            llama_token_eos(model),\n            llama_token_nl(model),\n            promptCtx.repeat_last_n,\n            promptCtx.repeat_penalty,\n            // TODO(jared): consider making the below configurable\n            /*penalty_freq*/    0.0f,\n            /*penalty_present*/ 0.0f,\n            /*penalize_nl*/     true,\n            /*ignore_eos*/      false\n        )\n    );\n    if (promptCtx.temp == 0.0f) {\n        llama_sampler_chain_add(chain, llama_sampler_init_greedy());\n    } else {\n        struct llama_sampler *samplers[] = {\n            llama_sampler_init_top_k(promptCtx.top_k),\n            llama_sampler_init_top_p(promptCtx.top_p, 1),\n            llama_sampler_init_min_p(promptCtx.min_p, 1),\n            llama_sampler_init_temp(promptCtx.temp),\n            llama_sampler_init_softmax(),\n            llama_sampler_init_dist(LLAMA_DEFAULT_SEED),\n        };\n        for (auto *smpl : samplers)\n            llama_sampler_chain_add(chain, smpl);\n    }\n}\n\nLLModel::Token LLamaModel::sampleToken() const\n{\n    return llama_sampler_sample(d_ptr->sampler_chain, d_ptr->ctx, -1);\n}\n\nbool LLamaModel::evalTokens(int32_t nPast, std::span<const Token> tokens) const\n{\n    assert(!tokens.empty());\n\n    llama_kv_cache_seq_rm(d_ptr->ctx, 0, nPast, -1);\n\n    llama_batch batch = llama_batch_init(tokens.size(), 0, 1);\n\n    batch.n_tokens = tokens.size();\n\n    for (int32_t i = 0; i < batch.n_tokens; i++) {\n        batch.token   [i] = tokens[i];\n        batch.pos     [i] = nPast + i;\n        batch.n_seq_id[i] = 1;\n        batch.seq_id  [i][0] = 0;\n        batch.logits  [i] = false;\n    }\n\n    // llama_decode will output logits only for the last token of the prompt\n    batch.logits[batch.n_tokens - 1] = true;\n\n    int res = llama_decode(d_ptr->ctx, batch);\n    llama_batch_free(batch);\n    return res == 0;\n}\n\nvoid LLamaModel::shiftContext(const PromptContext &promptCtx, int32_t *nPast)\n{\n    // infinite text generation via context shifting\n\n    // erase up to n_ctx*contextErase tokens\n    int n_keep = shouldAddBOS();\n    int n_past = *nPast;\n    int n_discard = std::min(n_past - n_keep, int(contextLength() * promptCtx.contextErase));\n\n    assert(n_discard > 0);\n    if (n_discard <= 0)\n        return;\n\n    std::cerr << \"Llama: context full, swapping: n_past = \" << n_past << \", n_keep = \" << n_keep\n              << \", n_discard = \" << n_discard << \"\\n\";\n\n    // erase the first n_discard tokens from the context\n    llama_kv_cache_seq_rm (d_ptr->ctx, 0, n_keep,             n_keep + n_discard);\n    llama_kv_cache_seq_add(d_ptr->ctx, 0, n_keep + n_discard, n_past,             -n_discard);\n\n    auto &inp = d_ptr->inputTokens;\n    inp.erase(inp.begin() + n_keep, inp.begin() + n_keep + n_discard);\n    *nPast = inp.size();\n}\n\nint32_t LLamaModel::contextLength() const\n{\n    return llama_n_ctx(d_ptr->ctx);\n}\n\nauto LLamaModel::specialTokens() -> std::unordered_map<std::string, std::string> const\n{\n    if (!d_ptr->model)\n        throw std::logic_error(\"model not loaded\");\n\n    std::unordered_map<std::string, std::string> tokens;\n    if (auto id = llama_token_bos(d_ptr->model); id != LLAMA_TOKEN_NULL)\n        tokens.emplace(\"bos_token\", tokenToString(id));\n    if (auto id = llama_token_eos(d_ptr->model); id != LLAMA_TOKEN_NULL)\n        tokens.emplace(\"eos_token\", tokenToString(id));\n    return tokens;\n}\n\nint32_t LLamaModel::inputLength() const\n{\n    return d_ptr->inputTokens.size();\n}\n\nint32_t LLamaModel::computeModelInputPosition(std::span<const Token> input) const\n{\n    // find common prefix\n    auto cacheIt = d_ptr->inputTokens.begin();\n    auto inputIt = input.begin();\n    while (cacheIt < d_ptr->inputTokens.end() && inputIt < input.end() && *cacheIt == *inputIt) {\n        ++cacheIt; ++inputIt;\n    }\n    // tell the caller to ignore the tokens between [begin, inputIt)\n    return inputIt - input.begin();\n}\n\nvoid LLamaModel::setModelInputPosition(int32_t pos)\n{\n    auto &inp = d_ptr->inputTokens;\n    assert(pos >= 0);\n    assert(pos <= inp.size());\n    // truncate token cache to end at the new n_past\n    if (pos < inp.size())\n        inp.resize(pos);\n}\n\nvoid LLamaModel::appendInputToken(Token tok)\n{\n    d_ptr->inputTokens.push_back(tok);\n}\n\nauto LLamaModel::inputTokens() const -> std::span<const Token>\n{\n    return d_ptr->inputTokens;\n}\n\nconst std::vector<LLModel::Token> &LLamaModel::endTokens() const\n{\n    return d_ptr->end_tokens;\n}\n\nbool LLamaModel::shouldAddBOS() const\n{\n    return llama_add_bos_token(d_ptr->model);\n}\n\nint32_t LLamaModel::maxContextLength(std::string const &modelPath) const\n{\n    return get_arch_key_u32(modelPath, \"context_length\");\n}\n\nint32_t LLamaModel::layerCount(std::string const &modelPath) const\n{\n    return get_arch_key_u32(modelPath, \"block_count\");\n}\n\n// TODO(jared): reduce redundant code and operations by combining all metadata getters for unloaded\n//              models into a class that keeps the model file open\nauto LLamaModel::chatTemplate(const char *modelPath) const -> std::expected<std::string, std::string>\n{\n    auto *ctx = load_gguf(modelPath);\n    if (!ctx)\n        return std::unexpected(\"failed to open model file\");\n\n    std::expected<std::string, std::string> result;\n    enum gguf_type ktype;\n    const int kid = gguf_find_key(ctx, \"tokenizer.chat_template\");\n    if (kid == -1) {\n        result = std::unexpected(\"key not found\");\n        goto cleanup;\n    }\n\n    ktype = gguf_get_kv_type(ctx, kid);\n    if (ktype != GGUF_TYPE_STRING) {\n        result = std::unexpected(\n            \"expected key type STRING (\" + std::to_string(GGUF_TYPE_STRING) + \"), got \" + std::to_string(ktype)\n        );\n        goto cleanup;\n    }\n\n    result = gguf_get_val_str(ctx, kid);\n\ncleanup:\n    gguf_free(ctx);\n    return result;\n}\n\n#ifdef GGML_USE_VULKAN\nstatic const char *getVulkanVendorName(uint32_t vendorID)\n{\n    switch (vendorID) {\n        case 0x10DE: return \"nvidia\";\n        case 0x1002: return \"amd\";\n        case 0x8086: return \"intel\";\n        default:     return \"unknown\";\n    }\n}\n#endif\n\nstd::vector<LLModel::GPUDevice> LLamaModel::availableGPUDevices(size_t memoryRequired) const\n{\n#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA)\n    size_t count = 0;\n\n#ifdef GGML_USE_KOMPUTE\n    auto *lcppDevices = ggml_vk_available_devices(memoryRequired, &count);\n#elif defined(GGML_USE_VULKAN)\n    (void)memoryRequired; // hasn't been used since GGUF was added\n    auto *lcppDevices = ggml_vk_available_devices(&count);\n#else // defined(GGML_USE_CUDA)\n    (void)memoryRequired;\n    auto *lcppDevices = ggml_cuda_available_devices(&count);\n#endif\n\n    if (lcppDevices) {\n        std::vector<LLModel::GPUDevice> devices;\n        devices.reserve(count);\n\n        for (size_t i = 0; i < count; ++i) {\n            auto & dev = lcppDevices[i];\n\n            devices.emplace_back(\n#ifdef GGML_USE_KOMPUTE\n                /* backend  = */ \"kompute\",\n                /* index    = */ dev.index,\n                /* type     = */ dev.type,\n                /* heapSize = */ dev.heapSize,\n                /* name     = */ dev.name,\n                /* vendor   = */ dev.vendor\n#elif defined(GGML_USE_VULKAN)\n                /* backend  = */ \"vulkan\",\n                /* index    = */ dev.index,\n                /* type     = */ dev.type,\n                /* heapSize = */ dev.heapSize,\n                /* name     = */ dev.name,\n                /* vendor   = */ getVulkanVendorName(dev.vendorID)\n#else // defined(GGML_USE_CUDA)\n                /* backend  = */ \"cuda\",\n                /* index    = */ dev.index,\n                /* type     = */ 2, // vk::PhysicalDeviceType::eDiscreteGpu\n                /* heapSize = */ dev.heapSize,\n                /* name     = */ dev.name,\n                /* vendor   = */ \"nvidia\"\n#endif\n            );\n\n#ifndef GGML_USE_CUDA\n            ggml_vk_device_destroy(&dev);\n#else\n            ggml_cuda_device_destroy(&dev);\n#endif\n        }\n\n        free(lcppDevices);\n        return devices;\n    }\n#else\n    (void)memoryRequired;\n    std::cerr << __func__ << \": built without a GPU backend\\n\";\n#endif\n\n    return {};\n}\n\nbool LLamaModel::initializeGPUDevice(size_t memoryRequired, const std::string &name) const\n{\n#if defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA)\n    auto devices = availableGPUDevices(memoryRequired);\n\n    auto dev_it = devices.begin();\n#ifndef GGML_USE_CUDA\n    if (name == \"amd\" || name == \"nvidia\" || name == \"intel\") {\n        dev_it = std::find_if(dev_it, devices.end(), [&name](auto &dev) { return dev.vendor == name; });\n    } else\n#endif\n    if (name != \"gpu\") {\n        dev_it = std::find_if(dev_it, devices.end(), [&name](auto &dev) { return dev.name == name; });\n    }\n\n    if (dev_it < devices.end()) {\n        d_ptr->device     = dev_it->index;\n        d_ptr->deviceName = dev_it->name;\n        return true;\n    }\n    return false;\n#elif defined(GGML_USE_KOMPUTE)\n    ggml_vk_device device;\n    bool ok = ggml_vk_get_device(&device, memoryRequired, name.c_str());\n    if (ok) {\n        d_ptr->device = device.index;\n        d_ptr->deviceName = device.name;\n        ggml_vk_device_destroy(&device);\n        return true;\n    }\n#else\n    (void)memoryRequired;\n    (void)name;\n#endif\n    return false;\n}\n\nbool LLamaModel::initializeGPUDevice(int device, std::string *unavail_reason) const\n{\n#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA)\n    (void)unavail_reason;\n    auto devices = availableGPUDevices();\n    auto it = std::find_if(devices.begin(), devices.end(), [device](auto &dev) { return dev.index == device; });\n    d_ptr->device = device;\n    d_ptr->deviceName = it < devices.end() ? it->name : \"(unknown)\";\n    return true;\n#else\n    (void)device;\n    if (unavail_reason) {\n        *unavail_reason = \"built without a GPU backend\";\n    }\n    return false;\n#endif\n}\n\nbool LLamaModel::usingGPUDevice() const\n{\n    if (!d_ptr->model)\n        return false;\n\n    bool usingGPU = llama_model_using_gpu(d_ptr->model);\n#ifdef GGML_USE_KOMPUTE\n    assert(!usingGPU || ggml_vk_has_device());\n#endif\n    return usingGPU;\n}\n\nconst char *LLamaModel::backendName() const\n{\n    return d_ptr->backend_name;\n}\n\nconst char *LLamaModel::gpuDeviceName() const\n{\n    if (usingGPUDevice()) {\n#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA)\n        return d_ptr->deviceName.c_str();\n#elif defined(GGML_USE_METAL)\n        return \"Metal\";\n#endif\n    }\n    return nullptr;\n}\n\nvoid llama_batch_add(\n                 struct llama_batch & batch,\n                        llama_token   id,\n                          llama_pos   pos,\n    const std::vector<llama_seq_id> & seq_ids,\n                               bool   logits) {\n    batch.token   [batch.n_tokens] = id;\n    batch.pos     [batch.n_tokens] = pos;\n    batch.n_seq_id[batch.n_tokens] = seq_ids.size();\n    for (size_t i = 0; i < seq_ids.size(); ++i) {\n        batch.seq_id[batch.n_tokens][i] = seq_ids[i];\n    }\n    batch.logits  [batch.n_tokens] = logits;\n\n    batch.n_tokens++;\n}\n\nstatic void batch_add_seq(llama_batch &batch, const std::vector<LLModel::Token> &tokens, int seq_id)\n{\n    for (unsigned i = 0; i < tokens.size(); i++) {\n        llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);\n    }\n}\n\nsize_t LLamaModel::embeddingSize() const\n{\n    return llama_n_embd(d_ptr->model);\n}\n\nstruct EmbModelSpec {\n    const char *docPrefix;\n    const char *queryPrefix;\n    std::vector<const char *> otherPrefixes = {};\n    bool matryoshkaCapable = false;\n    const char *recommendedDims = nullptr;\n};\n\nstruct EmbModelGroup {\n    EmbModelSpec spec;\n    std::vector<const char *> names;\n};\n\nstatic const EmbModelSpec NOPREFIX_SPEC {\"\", \"\"};\nstatic const EmbModelSpec NOMIC_SPEC    {\"search_document\", \"search_query\", {\"clustering\", \"classification\"}};\nstatic const EmbModelSpec E5_SPEC       {\"passage\", \"query\"};\n\nstatic const EmbModelSpec NOMIC_1_5_SPEC {\n    \"search_document\", \"search_query\", {\"clustering\", \"classification\"}, true, \"[768, 512, 384, 256, 128]\",\n};\nstatic const EmbModelSpec LLM_EMBEDDER_SPEC {\n    \"Represent this document for retrieval\",\n    \"Represent this query for retrieving relevant documents\",\n};\nstatic const EmbModelSpec BGE_SPEC {\n    \"\", \"Represent this sentence for searching relevant passages\",\n};\nstatic const EmbModelSpec E5_MISTRAL_SPEC {\n    \"\", \"Instruct: Given a query, retrieve relevant passages that answer the query\\nQuery\",\n};\n\nstatic const EmbModelGroup EMBEDDING_MODEL_SPECS[] {\n    {NOPREFIX_SPEC,     {\"all-MiniLM-L6-v1\", \"all-MiniLM-L12-v1\", \"all-MiniLM-L6-v2\", \"all-MiniLM-L12-v2\"}},\n    {NOMIC_SPEC,        {\"nomic-embed-text-v1\", \"nomic-embed-text-v1-ablated\", \"nomic-embed-text-v1-unsupervised\"}},\n    {NOMIC_1_5_SPEC,    {\"nomic-embed-text-v1.5\"}},\n    {LLM_EMBEDDER_SPEC, {\"llm-embedder\"}},\n    {BGE_SPEC,          {\"bge-small-en\", \"bge-base-en\", \"bge-large-en\",\n                         \"bge-small-en-v1.5\", \"bge-base-en-v1.5\", \"bge-large-en-v1.5\"}},\n    // NOTE: E5 Mistral is not yet implemented in llama.cpp, so it's not in EMBEDDING_ARCHES\n    {E5_SPEC,           {\"e5-small\", \"e5-base\", \"e5-large\",\n                         \"e5-small-unsupervised\", \"e5-base-unsupervised\", \"e5-large-unsupervised\",\n                         \"e5-small-v2\", \"e5-base-v2\", \"e5-large-v2\"}},\n    {E5_MISTRAL_SPEC,   {\"e5-mistral-7b-instruct\",\n                         \"multilingual-e5-small\", \"multilingual-e5-base\", \"multilingual-e5-large\",\n                         \"multilingual-e5-large-instruct\"}},\n};\n\nstatic const EmbModelSpec *getEmbedSpec(const std::string &modelName) {\n    static const auto &specs = EMBEDDING_MODEL_SPECS;\n    auto it = std::find_if(specs, std::end(specs),\n        [&modelName](auto &spec) {\n            auto &names = spec.names;\n            return std::find(names.begin(), names.end(), modelName) < names.end();\n        }\n    );\n    return it < std::end(specs) ? &it->spec : nullptr;\n}\n\nvoid LLamaModel::embed(\n    const std::vector<std::string> &texts, float *embeddings, bool isRetrieval, int dimensionality, size_t *tokenCount,\n    bool doMean, bool atlas\n) {\n    const EmbModelSpec *spec;\n    std::optional<std::string> prefix;\n    if (d_ptr->model && (spec = getEmbedSpec(llama_model_name(d_ptr->model))))\n        prefix = isRetrieval ? spec->queryPrefix : spec->docPrefix;\n\n    embed(texts, embeddings, prefix, dimensionality, tokenCount, doMean, atlas);\n}\n\nvoid LLamaModel::embed(\n    const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix, int dimensionality,\n    size_t *tokenCount, bool doMean, bool atlas, LLModel::EmbedCancelCallback *cancelCb\n) {\n    if (!d_ptr->model)\n        throw std::logic_error(\"no model is loaded\");\n\n    const char *modelName = llama_model_name(d_ptr->model);\n    if (!m_supportsEmbedding)\n        throw std::logic_error(\"not an embedding model: \"s + modelName);\n\n    auto *spec = getEmbedSpec(modelName);\n    if (!spec)\n        std::cerr << __func__ << \": warning: unknown model \" << modelName << \"\\n\";\n\n    const int32_t n_embd = llama_n_embd(d_ptr->model);\n    if (dimensionality < 0) {\n        dimensionality = n_embd;\n    } else if (spec && dimensionality != n_embd) {\n        auto msg = [dimensionality, modelName]() {\n            return \"unsupported dimensionality \" + std::to_string(dimensionality) + \" for model \" + modelName;\n        };\n        if (!spec->matryoshkaCapable)\n            throw std::out_of_range(msg() + \" (supported: \" + std::to_string(n_embd) + \")\");\n        if (dimensionality == 0 || dimensionality > n_embd)\n            throw std::out_of_range(msg() + \" (recommended: \" + spec->recommendedDims + \")\");\n    }\n\n    if (!prefix) {\n        if (!spec)\n            throw std::invalid_argument(\"unknown model \"s + modelName + \", specify a prefix if applicable or an empty string\");\n        prefix = spec->docPrefix;\n    } else if (spec && prefix != spec->docPrefix && prefix != spec->queryPrefix &&\n               std::find(spec->otherPrefixes.begin(), spec->otherPrefixes.end(), *prefix) == spec->otherPrefixes.end())\n    {\n        std::stringstream ss;\n        ss << std::quoted(*prefix) << \" is not a valid task type for model \" << modelName;\n        throw std::invalid_argument(ss.str());\n    }\n\n    embedInternal(texts, embeddings, *prefix, dimensionality, tokenCount, doMean, atlas, cancelCb, spec);\n}\n\n// MD5 hash of \"nomic empty\"\nstatic const char EMPTY_PLACEHOLDER[] = \"24df574ea1c998de59d5be15e769658e\";\n\nauto product(double a) -> std::function<double(double)>\n{\n    return [a](double b) { return a * b; };\n}\n\ntemplate <typename T>\ndouble getL2NormScale(T *start, T *end)\n{\n    double magnitude = std::sqrt(std::inner_product(start, end, start, 0.0));\n    return 1.0 / std::max(magnitude, 1e-12);\n}\n\nvoid LLamaModel::embedInternal(\n    const std::vector<std::string> &texts, float *embeddings, std::string prefix, int dimensionality,\n    size_t *tokenCount, bool doMean, bool atlas, LLModel::EmbedCancelCallback *cancelCb, const EmbModelSpec *spec\n) {\n    typedef std::vector<LLModel::Token> TokenString;\n    static constexpr int32_t atlasMaxLength = 8192;\n    static constexpr int chunkOverlap = 8; // Atlas overlaps chunks of input by 8 tokens\n\n    const llama_token bos_token = llama_token_bos(d_ptr->model);\n    const llama_token eos_token = llama_token_eos(d_ptr->model);\n\n    bool useBOS = llama_add_bos_token(d_ptr->model);\n    bool useEOS = llama_vocab_type(d_ptr->model) == LLAMA_VOCAB_TYPE_WPM;\n\n    // no EOS, optional BOS\n    auto tokenize = [this, useBOS, useEOS, eos_token](std::string text, TokenString &tokens, bool wantBOS) {\n        if (!text.empty() && text[0] != ' ') {\n            text = ' ' + text; // normalize for SPM - our fork of llama.cpp doesn't add a space prefix\n        }\n\n        tokens.resize(text.length()+4);\n        int32_t n_tokens = llama_tokenize_gpt4all(\n            d_ptr->model, text.c_str(), text.length(), tokens.data(), tokens.size(), /*add_special*/ wantBOS,\n            /*parse_special*/ false, /*insert_space*/ false\n        );\n        if (n_tokens) {\n            (void)eos_token;\n            (void)useBOS;\n            assert((useEOS && wantBOS && useBOS) == (eos_token != -1 && tokens[n_tokens - 1] == eos_token));\n            if (useEOS && wantBOS)\n                n_tokens--; // erase EOS/SEP\n        }\n        tokens.resize(n_tokens);\n    };\n\n    // tokenize the texts\n    std::vector<TokenString> inputs;\n    for (unsigned i = 0; i < texts.size(); i++) {\n        auto &text = texts[i];\n        auto &inp = inputs.emplace_back();\n        tokenize(text, inp, false);\n        if (atlas && inp.size() > atlasMaxLength) {\n            if (doMean) {\n                throw std::length_error(\n                    \"length of text at index \" + std::to_string(i) + \" is \" + std::to_string(inp.size()) +\n                    \" tokens which exceeds limit of \" + std::to_string(atlasMaxLength)\n                );\n            }\n            inp.resize(atlasMaxLength);\n        } else if (inp.empty()) {\n            if (!atlas || !text.empty()) {\n                std::cerr << __func__ << \": warning: chunking tokenized text at index \" << std::to_string(i)\n                          << \" into zero tokens\\n\";\n            }\n            tokenize(EMPTY_PLACEHOLDER, inp, false);\n        }\n    }\n\n    // tokenize the prefix\n    TokenString prefixTokens;\n    if (prefix.empty()) {\n        prefixTokens.push_back(bos_token);\n    } else {\n        tokenize(prefix + ':', prefixTokens, true);\n    }\n\n    // n_ctx_train: max sequence length of model (RoPE scaling not implemented)\n    const uint32_t n_ctx_train = llama_n_ctx_train(d_ptr->model);\n    // n_batch (equals n_ctx): max tokens per call to llama_decode (one more more sequences)\n    const uint32_t n_batch = llama_n_batch(d_ptr->ctx);\n\n    // effective sequence length minus prefix and SEP token\n    const uint32_t max_len = std::min(n_ctx_train, n_batch) - (prefixTokens.size() + useEOS);\n    if (max_len <= chunkOverlap) {\n        throw std::logic_error(\"max chunk length of \" + std::to_string(max_len) + \" is smaller than overlap of \" +\n                               std::to_string(chunkOverlap) + \" tokens\");\n    }\n\n    // split into max_len-sized chunks\n    struct split_batch { unsigned idx; TokenString batch; };\n    std::vector<split_batch> batches;\n    size_t totalTokens = 0;\n    for (unsigned i = 0; i < inputs.size(); i++) {\n        auto &input = inputs[i];\n        for (unsigned j = 0; j < input.size(); j += max_len) {\n            if (j) { j -= chunkOverlap; }\n            unsigned end = std::min(j + max_len, unsigned(input.size()));\n            batches.push_back({ i, {} });\n            auto &batch = batches.back().batch;\n            batch = prefixTokens;\n            batch.insert(batch.end(), input.begin() + j, input.begin() + end);\n            totalTokens += end - j;\n            batch.push_back(eos_token);\n            if (!doMean) { break; /* limit text to one chunk */ }\n        }\n    }\n    inputs.clear();\n\n    if (cancelCb) {\n        // copy of batching code below, but just count tokens instead of running inference\n        unsigned nBatchTokens = 0;\n        std::vector<unsigned> batchSizes;\n        for (const auto &inp: batches) {\n            if (nBatchTokens + inp.batch.size() > n_batch) {\n                batchSizes.push_back(nBatchTokens);\n                nBatchTokens = 0;\n            }\n            nBatchTokens += inp.batch.size();\n        }\n        batchSizes.push_back(nBatchTokens);\n        if (cancelCb(batchSizes.data(), batchSizes.size(), d_ptr->backend_name)) {\n            throw std::runtime_error(\"operation was canceled\");\n        }\n    }\n\n    // initialize batch\n    struct llama_batch batch = llama_batch_init(n_batch, 0, 1);\n\n    // n_texts x n_embd matrix\n    const int32_t n_embd = llama_n_embd(d_ptr->model);\n    std::vector<double> embeddingsSum(texts.size() * n_embd);\n    std::vector<int> embeddingsSumTotal(texts.size());\n    std::vector<int> queued_indices; // text indices of batches to be processed\n\n    auto decode = [this, &queued_indices, n_embd, &batch, &embeddingsSum, &embeddingsSumTotal, spec, dimensionality]() {\n        if (llama_decode(d_ptr->ctx, batch) < 0)\n            throw std::runtime_error(\"llama_decode failed\");\n\n        for (int i = 0; i < batch.n_tokens; ++i) {\n            if (!batch.logits[i]) { continue; }\n            int i_prompt = queued_indices[batch.seq_id[i][0]];\n            auto *out = &embeddingsSum[i_prompt * n_embd];\n\n            // sequence embeddings aren't available when pooling_type is NONE\n            auto *embd = llama_get_embeddings_seq(d_ptr->ctx, batch.seq_id[i][0]);\n            if (!embd) { embd = llama_get_embeddings_ith(d_ptr->ctx, i); }\n            assert(embd);\n\n            auto *embd_end = embd + n_embd;\n\n            // layer normalization for nomic-embed-text-v1.5\n            if (spec && spec->matryoshkaCapable) {\n                // normalize mean\n                double mean = std::accumulate(embd, embd_end, 0.0) / n_embd;\n                std::transform(embd, embd_end, embd, [mean](double f){ return f - mean; });\n\n                // unbiased sample variance, with Bessel's correction\n                double variance = std::inner_product(embd, embd_end, embd, 0.0) / (n_embd - 1);\n\n                // trim to matryoshka dim\n                embd_end = embd + dimensionality;\n\n                // normalize variance\n                std::transform(embd, embd_end, embd, product(1.0 / std::sqrt(variance + 1e-5)));\n            }\n\n            // L2 norm\n            auto scale = getL2NormScale(embd, embd_end);\n            std::transform(embd, embd_end, out, out, [scale](double e, double o){ return o + scale * e; });\n            embeddingsSumTotal[i_prompt]++;\n        }\n    };\n\n    // break into batches\n    for (const auto &inp: batches) {\n        // encode if at capacity\n        if (batch.n_tokens + inp.batch.size() > n_batch) {\n            decode();\n            batch.n_tokens = 0;\n            queued_indices.clear();\n        }\n\n        // add to batch\n        batch_add_seq(batch, inp.batch, queued_indices.size());\n        queued_indices.push_back(inp.idx);\n    }\n\n    // final batch\n    decode();\n\n    for (unsigned i = 0; i < texts.size(); i++) {\n        auto *embd = &embeddingsSum[i * n_embd];\n        auto *embd_end = embd + dimensionality;\n        int total = embeddingsSumTotal[i];\n\n        // average over chunks\n        std::transform(embd, embd_end, embd, product(1.0 / total));\n\n        // L2 norm and copy\n        auto scale = getL2NormScale(embd, embd_end);\n        std::transform(embd, embd_end, embeddings, product(scale));\n        embeddings += dimensionality;\n    }\n\n    if (tokenCount) { *tokenCount = totalTokens; }\n\n    llama_batch_free(batch);\n}\n\n#if defined(_WIN32)\n#define DLL_EXPORT __declspec(dllexport)\n#else\n#define DLL_EXPORT __attribute__ ((visibility (\"default\")))\n#endif\n\nextern \"C\" {\nDLL_EXPORT bool is_g4a_backend_model_implementation()\n{\n    return true;\n}\n\nDLL_EXPORT const char *get_model_type()\n{\n    return modelType_;\n}\n\nDLL_EXPORT const char *get_build_variant()\n{\n    return GGML_BUILD_VARIANT;\n}\n\nDLL_EXPORT char *get_file_arch(const char *fname)\n{\n    char *arch = nullptr;\n    std::string archStr;\n\n    auto *ctx = load_gguf(fname);\n    if (!ctx)\n        goto cleanup;\n\n    try {\n        archStr = get_arch_name(ctx);\n    } catch (const std::runtime_error &) {\n        goto cleanup; // cannot read key\n    }\n\n    if (is_embedding_arch(archStr) && gguf_find_key(ctx, (archStr + \".pooling_type\").c_str()) < 0) {\n        // old bert.cpp embedding model\n    } else {\n        arch = strdup(archStr.c_str());\n    }\n\ncleanup:\n    gguf_free(ctx);\n    return arch;\n}\n\nDLL_EXPORT bool is_arch_supported(const char *arch)\n{\n    return std::find(KNOWN_ARCHES.begin(), KNOWN_ARCHES.end(), std::string(arch)) < KNOWN_ARCHES.end();\n}\n\nDLL_EXPORT LLModel *construct()\n{\n    llama_log_set([](auto l, auto t, auto u) { llama_log_callback(l, t, u, false); }, nullptr);\n#ifdef GGML_USE_CUDA\n    ggml_backend_cuda_log_set_callback([](auto l, auto t, auto u) { llama_log_callback(l, t, u, true); }, nullptr);\n#endif\n    return new LLamaModel;\n}\n}\n"
  },
  {
    "path": "gpt4all-backend/src/llamamodel_impl.h",
    "content": "#ifndef LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE\n#error This file is NOT meant to be included outside of llamamodel.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE\n#endif\n#ifndef LLAMAMODEL_H\n#define LLAMAMODEL_H\n\n#include \"llmodel.h\"\n\n#include <memory>\n#include <span>\n#include <string>\n#include <string_view>\n#include <vector>\n#include <unordered_map>\n\nstruct LLamaPrivate;\nstruct EmbModelSpec;\n\nclass LLamaModel : public LLModel {\npublic:\n    LLamaModel();\n    ~LLamaModel();\n\n    bool supportsEmbedding() const override { return m_supportsEmbedding; }\n    bool supportsCompletion() const override { return m_supportsCompletion; }\n    bool loadModel(const std::string &modelPath, int n_ctx, int ngl) override;\n    bool isModelBlacklisted(const std::string &modelPath) const override;\n    bool isEmbeddingModel(const std::string &modelPath) const override;\n    bool isModelLoaded() const override;\n    size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) override;\n    size_t stateSize() const override;\n    size_t saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const override;\n    size_t restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens) override;\n    void setThreadCount(int32_t n_threads) override;\n    int32_t threadCount() const override;\n    std::vector<GPUDevice> availableGPUDevices(size_t memoryRequired = 0) const override;\n    bool initializeGPUDevice(size_t memoryRequired, const std::string &name) const override;\n    bool initializeGPUDevice(int device, std::string *unavail_reason = nullptr) const override;\n    bool usingGPUDevice() const override;\n    const char *backendName() const override;\n    const char *gpuDeviceName() const override;\n\n    size_t embeddingSize() const override;\n    // user-specified prefix\n    void embed(const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix,\n               int dimensionality = -1, size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false,\n               EmbedCancelCallback *cancelCb = nullptr) override;\n    // automatic prefix\n    void embed(const std::vector<std::string> &texts, float *embeddings, bool isRetrieval, int dimensionality = -1,\n               size_t *tokenCount = nullptr, bool doMean = true, bool atlas = false) override;\n\n    int32_t contextLength() const override;\n    auto specialTokens() -> std::unordered_map<std::string, std::string> const override;\n\nprotected:\n    std::vector<Token> tokenize(std::string_view str) const override;\n    bool isSpecialToken(Token id) const override;\n    std::string tokenToString(Token id) const override;\n    void initSampler(const PromptContext &ctx) override;\n    Token sampleToken() const override;\n    bool evalTokens(int32_t nPast, std::span<const Token> tokens) const override;\n    void shiftContext(const PromptContext &promptCtx, int32_t *nPast) override;\n    int32_t inputLength() const override;\n    int32_t computeModelInputPosition(std::span<const Token> input) const override;\n    void setModelInputPosition(int32_t pos) override;\n    void appendInputToken(Token tok) override;\n    std::span<const Token> inputTokens() const override;\n    const std::vector<Token> &endTokens() const override;\n    bool shouldAddBOS() const override;\n    int32_t maxContextLength(std::string const &modelPath) const override;\n    int32_t layerCount(std::string const &modelPath) const override;\n    auto chatTemplate(const char *modelPath) const -> std::expected<std::string, std::string> override;\n\n    void embedInternal(const std::vector<std::string> &texts, float *embeddings, std::string prefix, int dimensionality,\n                       size_t *tokenCount, bool doMean, bool atlas, EmbedCancelCallback *cancelCb,\n                       const EmbModelSpec *spec);\n\nprivate:\n    std::unique_ptr<LLamaPrivate> d_ptr;\n    bool m_supportsEmbedding = false;\n    bool m_supportsCompletion = false;\n};\n\n#endif // LLAMAMODEL_H\n"
  },
  {
    "path": "gpt4all-backend/src/llmodel.cpp",
    "content": "#include \"llmodel.h\"\n\n#include \"dlhandle.h\"\n\n#include <cassert>\n#include <cstdlib>\n#include <filesystem>\n#include <fstream>\n#include <iostream>\n#include <iterator>\n#include <memory>\n#include <optional>\n#include <regex>\n#include <sstream>\n#include <string>\n#include <unordered_map>\n#include <vector>\n\n#ifdef _WIN32\n#   define WIN32_LEAN_AND_MEAN\n#   ifndef NOMINMAX\n#       define NOMINMAX\n#   endif\n#   include <windows.h>\n#endif\n\n#ifdef _MSC_VER\n#   include <intrin.h>\n#endif\n\n#if defined(__APPLE__) && defined(__aarch64__)\n#   include \"sysinfo.h\" // for getSystemTotalRAMInBytes\n#endif\n\nnamespace fs = std::filesystem;\n\n#ifndef __APPLE__\nstatic const std::string DEFAULT_BACKENDS[] = {\"kompute\", \"cpu\"};\n#elif defined(__aarch64__)\nstatic const std::string DEFAULT_BACKENDS[] = {\"metal\", \"cpu\"};\n#else\nstatic const std::string DEFAULT_BACKENDS[] = {\"cpu\"};\n#endif\n\nstd::string s_implementations_search_path = \".\";\n\n#if !(defined(__x86_64__) || defined(_M_X64))\n    // irrelevant on non-x86_64\n    #define cpu_supports_avx()  -1\n    #define cpu_supports_avx2() -1\n#elif defined(_MSC_VER)\n    // MSVC\n    static int get_cpu_info(int func_id, int reg_id) {\n        int info[4];\n        __cpuid(info, func_id);\n        return info[reg_id];\n    }\n\n    // AVX via EAX=1: Processor Info and Feature Bits, bit 28 of ECX\n    #define cpu_supports_avx()  !!(get_cpu_info(1, 2) & (1 << 28))\n    // AVX2 via EAX=7, ECX=0: Extended Features, bit 5 of EBX\n    #define cpu_supports_avx2() !!(get_cpu_info(7, 1) & (1 <<  5))\n#else\n    // gcc/clang\n    #define cpu_supports_avx()  !!__builtin_cpu_supports(\"avx\")\n    #define cpu_supports_avx2() !!__builtin_cpu_supports(\"avx2\")\n#endif\n\nLLModel::Implementation::Implementation(Dlhandle &&dlhandle_)\n    : m_dlhandle(new Dlhandle(std::move(dlhandle_))) {\n    auto get_model_type = m_dlhandle->get<const char *()>(\"get_model_type\");\n    assert(get_model_type);\n    m_modelType = get_model_type();\n    auto get_build_variant = m_dlhandle->get<const char *()>(\"get_build_variant\");\n    assert(get_build_variant);\n    m_buildVariant = get_build_variant();\n    m_getFileArch = m_dlhandle->get<char *(const char *)>(\"get_file_arch\");\n    assert(m_getFileArch);\n    m_isArchSupported = m_dlhandle->get<bool(const char *)>(\"is_arch_supported\");\n    assert(m_isArchSupported);\n    m_construct = m_dlhandle->get<LLModel *()>(\"construct\");\n    assert(m_construct);\n}\n\nLLModel::Implementation::Implementation(Implementation &&o)\n    : m_getFileArch(o.m_getFileArch)\n    , m_isArchSupported(o.m_isArchSupported)\n    , m_construct(o.m_construct)\n    , m_modelType(o.m_modelType)\n    , m_buildVariant(o.m_buildVariant)\n    , m_dlhandle(o.m_dlhandle) {\n    o.m_dlhandle = nullptr;\n}\n\nLLModel::Implementation::~Implementation()\n{\n    delete m_dlhandle;\n}\n\nstatic bool isImplementation(const Dlhandle &dl)\n{\n    return dl.get<bool(uint32_t)>(\"is_g4a_backend_model_implementation\");\n}\n\n// Add the CUDA Toolkit to the DLL search path on Windows.\n// This is necessary for chat.exe to find CUDA when started from Qt Creator.\nstatic void addCudaSearchPath()\n{\n#ifdef _WIN32\n    if (const auto *cudaPath = _wgetenv(L\"CUDA_PATH\")) {\n        auto libDir = std::wstring(cudaPath) + L\"\\\\bin\";\n        if (!AddDllDirectory(libDir.c_str())) {\n            auto err = GetLastError();\n            std::wcerr << L\"AddDllDirectory(\\\"\" << libDir << L\"\\\") failed with error 0x\" << std::hex << err << L\"\\n\";\n        }\n    }\n#endif\n}\n\nconst std::vector<LLModel::Implementation> &LLModel::Implementation::implementationList()\n{\n    if (cpu_supports_avx() == 0) {\n        throw std::runtime_error(\"CPU does not support AVX\");\n    }\n\n    // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the\n    // individual models without the cleanup of the static list interfering\n    static auto* libs = new std::vector<Implementation>([] () {\n        std::vector<Implementation> fres;\n\n        addCudaSearchPath();\n\n        std::string impl_name_re = \"llamamodel-mainline-(cpu|metal|kompute|vulkan|cuda)\";\n        if (cpu_supports_avx2() == 0) {\n            impl_name_re += \"-avxonly\";\n        }\n        std::regex re(impl_name_re);\n        auto search_in_directory = [&](const std::string& paths) {\n            std::stringstream ss(paths);\n            std::string path;\n            // Split the paths string by the delimiter and process each path.\n            while (std::getline(ss, path, ';')) {\n                fs::directory_iterator iter;\n                try {\n                    iter = fs::directory_iterator(std::u8string(path.begin(), path.end()));\n                } catch (const fs::filesystem_error &) {\n                    continue; // skip nonexistent path\n                }\n                // Iterate over all libraries\n                for (const auto &f : iter) {\n                    const fs::path &p = f.path();\n\n                    if (p.extension() != LIB_FILE_EXT) continue;\n                    if (!std::regex_search(p.stem().string(), re)) continue;\n\n                    // Add to list if model implementation\n                    Dlhandle dl;\n                    try {\n                        dl = Dlhandle(p);\n                    } catch (const Dlhandle::Exception &e) {\n                        std::cerr << \"Failed to load \" << p.filename().string() << \": \" << e.what() << \"\\n\";\n                        continue;\n                    }\n                    if (!isImplementation(dl)) {\n                        std::cerr << \"Not an implementation: \" << p.filename().string() << \"\\n\";\n                        continue;\n                    }\n                    fres.emplace_back(Implementation(std::move(dl)));\n                }\n            }\n        };\n\n        search_in_directory(s_implementations_search_path);\n\n        return fres;\n    }());\n    // Return static result\n    return *libs;\n}\n\nstatic std::string applyCPUVariant(const std::string &buildVariant)\n{\n    if (buildVariant != \"metal\" && cpu_supports_avx2() == 0) {\n        return buildVariant + \"-avxonly\";\n    }\n    return buildVariant;\n}\n\nconst LLModel::Implementation* LLModel::Implementation::implementation(const char *fname, const std::string& buildVariant)\n{\n    bool buildVariantMatched = false;\n    std::optional<std::string> archName;\n    for (const auto& i : implementationList()) {\n        if (buildVariant != i.m_buildVariant) continue;\n        buildVariantMatched = true;\n\n        char *arch = i.m_getFileArch(fname);\n        if (!arch) continue;\n        archName = arch;\n\n        bool archSupported = i.m_isArchSupported(arch);\n        free(arch);\n        if (archSupported) return &i;\n    }\n\n    if (!buildVariantMatched)\n        return nullptr;\n    if (!archName)\n        throw UnsupportedModelError(\"Unsupported file format\");\n\n    throw BadArchError(std::move(*archName));\n}\n\nLLModel *LLModel::Implementation::construct(const std::string &modelPath, const std::string &backend, int n_ctx)\n{\n    std::vector<std::string> desiredBackends;\n    if (backend != \"auto\") {\n        desiredBackends.push_back(backend);\n    } else {\n        desiredBackends.insert(desiredBackends.end(), DEFAULT_BACKENDS, std::end(DEFAULT_BACKENDS));\n    }\n\n    for (const auto &desiredBackend: desiredBackends) {\n        const auto *impl = implementation(modelPath.c_str(), applyCPUVariant(desiredBackend));\n\n        if (impl) {\n            // Construct llmodel implementation\n            auto *fres = impl->m_construct();\n            fres->m_implementation = impl;\n\n#if defined(__APPLE__) && defined(__aarch64__) // FIXME: See if metal works for intel macs\n            /* TODO(cebtenzzre): after we fix requiredMem, we should change this to happen at\n             * load time, not construct time. right now n_ctx is incorrectly hardcoded 2048 in\n             * most (all?) places where this is called, causing underestimation of required\n             * memory. */\n            if (backend == \"auto\" && desiredBackend == \"metal\") {\n                // on a 16GB M2 Mac a 13B q4_0 (0.52) works for me but a 13B q4_K_M (0.55) does not\n                size_t req_mem = fres->requiredMem(modelPath, n_ctx, 100);\n                if (req_mem >= size_t(0.53f * getSystemTotalRAMInBytes())) {\n                    delete fres;\n                    continue;\n                }\n            }\n#else\n            (void)n_ctx;\n#endif\n\n            return fres;\n        }\n    }\n\n    throw MissingImplementationError(\"Could not find any implementations for backend: \" + backend);\n}\n\nLLModel *LLModel::Implementation::constructGlobalLlama(const std::optional<std::string> &backend)\n{\n    static std::unordered_map<std::string, std::unique_ptr<LLModel>> implCache;\n\n    const std::vector<Implementation> *impls;\n    try {\n        impls = &implementationList();\n    } catch (const std::runtime_error &e) {\n        std::cerr << __func__ << \": implementationList failed: \" << e.what() << \"\\n\";\n        return nullptr;\n    }\n\n    std::vector<std::string> desiredBackends;\n    if (backend) {\n        desiredBackends.push_back(backend.value());\n    } else {\n        desiredBackends.insert(desiredBackends.end(), DEFAULT_BACKENDS, std::end(DEFAULT_BACKENDS));\n    }\n\n    const Implementation *impl = nullptr;\n\n    for (const auto &desiredBackend: desiredBackends) {\n        auto cacheIt = implCache.find(desiredBackend);\n        if (cacheIt != implCache.end())\n            return cacheIt->second.get(); // cached\n\n        for (const auto &i: *impls) {\n            if (i.m_modelType == \"LLaMA\" && i.m_buildVariant == applyCPUVariant(desiredBackend)) {\n                impl = &i;\n                break;\n            }\n        }\n\n        if (impl) {\n            auto *fres = impl->m_construct();\n            fres->m_implementation = impl;\n            implCache[desiredBackend] = std::unique_ptr<LLModel>(fres);\n            return fres;\n        }\n    }\n\n    std::cerr << __func__ << \": could not find Llama implementation for backend: \" << backend.value_or(\"default\") << \"\\n\";\n    return nullptr;\n}\n\nstd::vector<LLModel::GPUDevice> LLModel::Implementation::availableGPUDevices(size_t memoryRequired)\n{\n    std::vector<LLModel::GPUDevice> devices;\n#ifndef __APPLE__\n    static const std::string backends[] = {\"kompute\", \"cuda\"};\n    for (const auto &backend: backends) {\n        auto *llama = constructGlobalLlama(backend);\n        if (llama) {\n            auto backendDevs = llama->availableGPUDevices(memoryRequired);\n            devices.insert(devices.end(), backendDevs.begin(), backendDevs.end());\n        }\n    }\n#endif\n    return devices;\n}\n\nint32_t LLModel::Implementation::maxContextLength(const std::string &modelPath)\n{\n    auto *llama = constructGlobalLlama();\n    return llama ? llama->maxContextLength(modelPath) : -1;\n}\n\nint32_t LLModel::Implementation::layerCount(const std::string &modelPath)\n{\n    auto *llama = constructGlobalLlama();\n    return llama ? llama->layerCount(modelPath) : -1;\n}\n\nbool LLModel::Implementation::isEmbeddingModel(const std::string &modelPath)\n{\n    auto *llama = constructGlobalLlama();\n    return llama && llama->isEmbeddingModel(modelPath);\n}\n\nauto LLModel::Implementation::chatTemplate(const char *modelPath) -> std::expected<std::string, std::string>\n{\n    auto *llama = constructGlobalLlama();\n    return llama ? llama->chatTemplate(modelPath) : std::unexpected(\"backend not available\");\n}\n\nvoid LLModel::Implementation::setImplementationsSearchPath(const std::string& path)\n{\n    s_implementations_search_path = path;\n}\n\nconst std::string& LLModel::Implementation::implementationsSearchPath()\n{\n    return s_implementations_search_path;\n}\n\nbool LLModel::Implementation::hasSupportedCPU()\n{\n    return cpu_supports_avx() != 0;\n}\n\nint LLModel::Implementation::cpuSupportsAVX2()\n{\n    return cpu_supports_avx2();\n}\n"
  },
  {
    "path": "gpt4all-backend/src/llmodel_c.cpp",
    "content": "#include \"llmodel_c.h\"\n\n#include \"llmodel.h\"\n\n#include <algorithm>\n#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <exception>\n#include <iostream>\n#include <memory>\n#include <optional>\n#include <string>\n#include <string_view>\n#include <vector>\n#include <span>\n\nnamespace ranges = std::ranges;\n\nstatic_assert(sizeof(token_t) == sizeof(LLModel::Token));\n\nstruct LLModelWrapper {\n    LLModel *llModel = nullptr;\n    ~LLModelWrapper() { delete llModel; }\n};\n\nllmodel_model llmodel_model_create(const char *model_path)\n{\n    const char *error;\n    auto fres = llmodel_model_create2(model_path, \"auto\", &error);\n    if (!fres) {\n        fprintf(stderr, \"Unable to instantiate model: %s\\n\", error);\n    }\n    return fres;\n}\n\nstatic void llmodel_set_error(const char **errptr, const char *message)\n{\n    thread_local static std::string last_error_message;\n    if (errptr) {\n        last_error_message = message;\n        *errptr = last_error_message.c_str();\n    }\n}\n\nllmodel_model llmodel_model_create2(const char *model_path, const char *backend, const char **error)\n{\n    LLModel *llModel;\n    try {\n        llModel = LLModel::Implementation::construct(model_path, backend);\n    } catch (const std::exception& e) {\n        llmodel_set_error(error, e.what());\n        return nullptr;\n    }\n\n    auto wrapper = new LLModelWrapper;\n    wrapper->llModel = llModel;\n    return wrapper;\n}\n\nvoid llmodel_model_destroy(llmodel_model model)\n{\n    delete static_cast<LLModelWrapper *>(model);\n}\n\nsize_t llmodel_required_mem(llmodel_model model, const char *model_path, int n_ctx, int ngl)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->requiredMem(model_path, n_ctx, ngl);\n}\n\nbool llmodel_loadModel(llmodel_model model, const char *model_path, int n_ctx, int ngl)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n\n    std::string modelPath(model_path);\n    if (wrapper->llModel->isModelBlacklisted(modelPath)) {\n        size_t slash = modelPath.find_last_of(\"/\\\\\");\n        auto basename = slash == std::string::npos ? modelPath : modelPath.substr(slash + 1);\n        std::cerr << \"warning: model '\" << basename << \"' is out-of-date, please check for an updated version\\n\";\n    }\n    return wrapper->llModel->loadModel(modelPath, n_ctx, ngl);\n}\n\nbool llmodel_isModelLoaded(llmodel_model model)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->isModelLoaded();\n}\n\nuint64_t llmodel_state_get_size(llmodel_model model)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->stateSize();\n}\n\nuint64_t llmodel_state_get_data(llmodel_model model, uint8_t *state_out, uint64_t state_size,\n                                token_t **input_tokens_out, uint64_t *n_input_tokens)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    std::vector<LLModel::Token> inputTokens;\n    auto bytesWritten = wrapper->llModel->saveState({state_out, size_t(state_size)}, inputTokens);\n    if (bytesWritten) {\n        auto *buf = new LLModel::Token[inputTokens.size()];\n        ranges::copy(inputTokens, buf);\n        *input_tokens_out = buf;\n        *n_input_tokens = uint64_t(inputTokens.size());\n    } else {\n        *input_tokens_out = nullptr;\n        *n_input_tokens = 0;\n    }\n    return bytesWritten;\n}\n\nvoid llmodel_state_free_input_tokens(LLModel::Token *input_tokens)\n{\n    delete[] input_tokens;\n}\n\nuint64_t llmodel_state_set_data(llmodel_model model, const uint8_t *state, uint64_t state_size,\n                                const token_t *input_tokens, uint64_t n_input_tokens)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->restoreState({state, size_t(state_size)}, {input_tokens, size_t(n_input_tokens)});\n}\n\nbool llmodel_prompt(llmodel_model               model,\n                    const char                 *prompt,\n                    llmodel_prompt_callback     prompt_callback,\n                    llmodel_response_callback   response_callback,\n                    llmodel_prompt_context     *ctx,\n                    const char                **error)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n\n    // Copy the C prompt context\n    LLModel::PromptContext promptContext {\n        .n_predict      = ctx->n_predict,\n        .top_k          = ctx->top_k,\n        .top_p          = ctx->top_p,\n        .min_p          = ctx->min_p,\n        .temp           = ctx->temp,\n        .n_batch        = ctx->n_batch,\n        .repeat_penalty = ctx->repeat_penalty,\n        .repeat_last_n  = ctx->repeat_last_n,\n        .contextErase   = ctx->context_erase,\n    };\n\n    auto prompt_func = [prompt_callback](std::span<const LLModel::Token> token_ids, bool cached) {\n        return prompt_callback(token_ids.data(), token_ids.size(), cached);\n    };\n    auto response_func = [response_callback](LLModel::Token token_id, std::string_view piece) {\n        return response_callback(token_id, piece.data());\n    };\n\n    // Call the C++ prompt method\n    try {\n        wrapper->llModel->prompt(prompt, prompt_func, response_func, promptContext);\n    } catch (std::exception const &e) {\n        llmodel_set_error(error, e.what());\n        return false;\n    }\n\n    return true;\n}\n\nfloat *llmodel_embed(\n    llmodel_model model, const char **texts, size_t *embedding_size, const char *prefix, int dimensionality,\n    size_t *token_count, bool do_mean, bool atlas, llmodel_emb_cancel_callback cancel_cb, const char **error\n) {\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n\n    if (!texts || !*texts) {\n        llmodel_set_error(error, \"'texts' is NULL or empty\");\n        return nullptr;\n    }\n\n    std::vector<std::string> textsVec;\n    while (*texts) { textsVec.emplace_back(*texts++); }\n\n    size_t embd_size;\n    float *embedding;\n\n    try {\n        embd_size = wrapper->llModel->embeddingSize();\n        if (dimensionality > 0 && dimensionality < int(embd_size))\n            embd_size = dimensionality;\n\n        embd_size *= textsVec.size();\n\n        std::optional<std::string> prefixStr;\n        if (prefix) { prefixStr = prefix; }\n\n        embedding = new float[embd_size];\n        wrapper->llModel->embed(textsVec, embedding, prefixStr, dimensionality, token_count, do_mean, atlas, cancel_cb);\n    } catch (std::exception const &e) {\n        llmodel_set_error(error, e.what());\n        return nullptr;\n    }\n\n    *embedding_size = embd_size;\n    return embedding;\n}\n\nvoid llmodel_free_embedding(float *ptr)\n{\n    delete[] ptr;\n}\n\nvoid llmodel_setThreadCount(llmodel_model model, int32_t n_threads)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    wrapper->llModel->setThreadCount(n_threads);\n}\n\nint32_t llmodel_threadCount(llmodel_model model)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->threadCount();\n}\n\nvoid llmodel_set_implementation_search_path(const char *path)\n{\n    LLModel::Implementation::setImplementationsSearchPath(path);\n}\n\nconst char *llmodel_get_implementation_search_path()\n{\n    return LLModel::Implementation::implementationsSearchPath().c_str();\n}\n\n// RAII wrapper around a C-style struct\nstruct llmodel_gpu_device_cpp: llmodel_gpu_device {\n    llmodel_gpu_device_cpp() = default;\n\n    llmodel_gpu_device_cpp(const llmodel_gpu_device_cpp  &) = delete;\n    llmodel_gpu_device_cpp(      llmodel_gpu_device_cpp &&) = delete;\n\n    const llmodel_gpu_device_cpp &operator=(const llmodel_gpu_device_cpp  &) = delete;\n          llmodel_gpu_device_cpp &operator=(      llmodel_gpu_device_cpp &&) = delete;\n\n    ~llmodel_gpu_device_cpp() {\n        free(const_cast<char *>(name));\n        free(const_cast<char *>(vendor));\n    }\n};\n\nstatic_assert(sizeof(llmodel_gpu_device_cpp) == sizeof(llmodel_gpu_device));\n\nstruct llmodel_gpu_device *llmodel_available_gpu_devices(size_t memoryRequired, int *num_devices)\n{\n    static thread_local std::unique_ptr<llmodel_gpu_device_cpp[]> c_devices;\n\n    auto devices = LLModel::Implementation::availableGPUDevices(memoryRequired);\n    *num_devices = devices.size();\n\n    if (devices.empty()) { return nullptr; /* no devices */ }\n\n    c_devices = std::make_unique<llmodel_gpu_device_cpp[]>(devices.size());\n    for (unsigned i = 0; i < devices.size(); i++) {\n        const auto &dev  =   devices[i];\n              auto &cdev = c_devices[i];\n        cdev.backend  = dev.backend;\n        cdev.index    = dev.index;\n        cdev.type     = dev.type;\n        cdev.heapSize = dev.heapSize;\n        cdev.name     = strdup(dev.name.c_str());\n        cdev.vendor   = strdup(dev.vendor.c_str());\n    }\n\n    return c_devices.get();\n}\n\nbool llmodel_gpu_init_gpu_device_by_string(llmodel_model model, size_t memoryRequired, const char *device)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->initializeGPUDevice(memoryRequired, std::string(device));\n}\n\nbool llmodel_gpu_init_gpu_device_by_struct(llmodel_model model, const llmodel_gpu_device *device)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->initializeGPUDevice(device->index);\n}\n\nbool llmodel_gpu_init_gpu_device_by_int(llmodel_model model, int device)\n{\n    auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->initializeGPUDevice(device);\n}\n\nconst char *llmodel_model_backend_name(llmodel_model model)\n{\n    const auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->backendName();\n}\n\nconst char *llmodel_model_gpu_device_name(llmodel_model model)\n{\n    const auto *wrapper = static_cast<LLModelWrapper *>(model);\n    return wrapper->llModel->gpuDeviceName();\n}\n\nint32_t llmodel_count_prompt_tokens(llmodel_model model, const char *prompt, const char **error)\n{\n    auto *wrapper = static_cast<const LLModelWrapper *>(model);\n    try {\n        return wrapper->llModel->countPromptTokens(prompt);\n    } catch (const std::exception& e) {\n        llmodel_set_error(error, e.what());\n        return -1;\n    }\n}\n\nvoid llmodel_model_foreach_special_token(llmodel_model model, llmodel_special_token_callback callback)\n{\n    auto *wrapper = static_cast<const LLModelWrapper *>(model);\n    for (auto &[name, token] : wrapper->llModel->specialTokens())\n        callback(name.c_str(), token.c_str());\n}\n"
  },
  {
    "path": "gpt4all-backend/src/llmodel_shared.cpp",
    "content": "#include \"llmodel.h\"\n\n#include <algorithm>\n#include <cassert>\n#include <cstddef>\n#include <cstdint>\n#include <iostream>\n#include <iterator>\n#include <optional>\n#include <ranges>\n#include <stdexcept>\n#include <string>\n#include <string_view>\n#include <vector>\n\nnamespace ranges = std::ranges;\nnamespace views  = std::ranges::views;\n\nvoid LLModel::prompt(\n    std::string_view        prompt,\n    const PromptCallback   &promptCallback,\n    const ResponseCallback &responseCallback,\n    const PromptContext    &promptCtx\n) {\n    if (!isModelLoaded())\n        throw std::invalid_argument(\"Attempted to prompt an unloaded model.\");\n    if (!supportsCompletion())\n        throw std::invalid_argument(\"Not a text completion model.\");\n    if (!promptCtx.n_batch)\n        throw std::invalid_argument(\"Batch size cannot be zero.\");\n    if (!promptCtx.n_predict)\n        return; // nothing requested\n\n    auto embd_inp = tokenize(prompt);\n    if (embd_inp.empty())\n        throw std::invalid_argument(\"Prompt tokenized to zero tokens.\");\n\n    if (auto res = decodePrompt(promptCallback, promptCtx, std::move(embd_inp)))\n        generateResponse(responseCallback, promptCtx, /*n_past*/ *res);\n}\n\nint32_t LLModel::countPromptTokens(std::string_view prompt) const\n{\n    if (!isModelLoaded())\n        throw std::invalid_argument(\"Attempted to tokenize with an unloaded model.\");\n    return int32_t(tokenize(prompt).size());\n}\n\nauto LLModel::decodePrompt(\n    const PromptCallback &promptCallback,\n    const PromptContext  &promptCtx,\n    std::vector<Token>    embd_inp\n) -> std::optional<int32_t>\n{\n    assert(!embd_inp.empty());\n\n    int32_t nCtx = contextLength();\n    int32_t n_batch = std::min(promptCtx.n_batch, LLMODEL_MAX_PROMPT_BATCH);\n\n    // Find the greatest n_past where the beginning of embd_inp matches the end of the token cache, starting at the\n    // requested n_past.\n    // This is used to skip unnecessary work when the prompt shares a common prefix with the previous result.\n    int32_t nPast = computeModelInputPosition(embd_inp);\n\n    // always decode up to a full batch before generating, even if cached\n    nPast -= std::min(n_batch, nPast);\n\n    // TODO(jared): generalize this to find the smallest new_embd_inp.size() - nPast given the cache\n    if (!nPast && int32_t(embd_inp.size()) > nCtx) {\n        // no cache hit -> shift the input before even processing\n\n        int32_t nKeep     = shouldAddBOS();\n        auto    newLength = int32_t(nCtx * (1.f - promptCtx.contextErase));\n        int32_t nDiscard  = int32_t(embd_inp.size()) - std::max(1, std::min(nCtx, newLength));\n\n        // execute the callback even for skipped tokens. this misrepresents the position of BOS but we don't care\n        auto discardedTokens = embd_inp | views::drop(nKeep) | views::take(nDiscard);\n        if (!promptCallback(discardedTokens, true))\n            return std::nullopt;\n\n        // erase nDiscard tokens\n        embd_inp.erase(discardedTokens.begin(), discardedTokens.end());\n        assert(int32_t(embd_inp.size()) <= nCtx);\n\n        // check the cache again, just in case\n        nPast = computeModelInputPosition(embd_inp);\n        nPast -= std::min(n_batch, nPast);\n    }\n\n    setModelInputPosition(nPast);\n\n    // execute the callback even for skipped tokens\n    if (!promptCallback(embd_inp | views::take(nPast), true))\n        return std::nullopt;\n\n    // process the prompt in batches\n    for (int32_t i = nPast; i < embd_inp.size();) {\n        auto batch_end = std::min(i + n_batch, int32_t(embd_inp.size()));\n        std::span batch(embd_inp.begin() + i, embd_inp.begin() + batch_end);\n\n        // Check if the context has run out...\n        if (nPast + int32_t(batch.size()) > nCtx) {\n            shiftContext(promptCtx, &nPast);\n            assert(nPast + int32_t(batch.size()) <= nCtx);\n        }\n\n        // FIXME(Adam): We should find a way to bubble these strings to the UI level to allow for translation\n        if (!evalTokens(nPast, batch))\n            throw std::runtime_error(\"An internal error was encountered during prompt processing.\");\n\n        for (auto &tok : batch) {\n            appendInputToken(tok);\n            nPast++;\n            if (!promptCallback({ &tok, 1 }, false))\n                return std::nullopt;\n        }\n        i = batch_end;\n    }\n\n    return nPast;\n}\n\n/*\n * If string s overlaps with the string key such that some prefix of the key is at the end\n * of the string, return the position in s where the first match starts. Otherwise, return\n * std::string::npos. Examples:\n * s = \"bfo\",  key = \"foo\" -> 1\n * s = \"fooa\", key = \"foo\" -> npos\n */\nstatic std::string::size_type stringsOverlap(const std::string &s, const std::string &key)\n{\n    if (s.empty() || key.empty())\n        throw std::invalid_argument(\"arguments to stringsOverlap must not be empty\");\n\n    for (int start = std::max(0, int(s.size()) - int(key.size())); start < s.size(); start++) {\n        if (s.compare(start, s.size(), key, 0, s.size() - start) == 0)\n            return start;\n    }\n    return std::string::npos;\n}\n\nvoid LLModel::generateResponse(\n    const ResponseCallback &responseCallback,\n    const PromptContext    &promptCtx,\n    int32_t                 nPast\n) {\n    static const char *stopSequences[] {\n        \"### System\", \"### Instruction\", \"### Human\", \"### User\", \"### Response\", \"### Assistant\", \"### Context\",\n        \"<|im_start|>\", \"<|im_end|>\", \"<|endoftext|>\",\n    };\n\n    initSampler(promptCtx);\n\n    std::string cachedResponse;\n    std::vector<Token> cachedTokens;\n    int n_predicted = 0;\n\n    // Predict next tokens\n    for (bool stop = false; !stop;) {\n        // Sample next token\n        std::optional<Token> new_tok = sampleToken();\n        std::string new_piece = tokenToString(new_tok.value());\n        cachedTokens.push_back(new_tok.value());\n        cachedResponse += new_piece;\n\n        auto accept = [this, &promptCtx, &new_tok, &nPast] {\n            // Shift context if out of space\n            if (nPast >= contextLength()) {\n                shiftContext(promptCtx, &nPast);\n                assert(nPast < contextLength());\n            }\n\n            // Accept the token\n            Token tok = std::exchange(new_tok, std::nullopt).value();\n            if (!evalTokens(nPast, { &tok, 1 }))\n                throw std::runtime_error(\"An internal error was encountered during response generation.\");\n\n            appendInputToken(tok);\n            nPast++;\n        };\n\n        // Check for EOS\n        auto lengthLimit = std::string::npos;\n        for (const auto token : endTokens()) {\n            if (new_tok == token) {\n                stop = true;\n                lengthLimit = cachedResponse.size() - new_piece.size();\n            }\n        }\n\n        if (lengthLimit != std::string::npos) {\n            // EOS matched\n        } else if (!isSpecialToken(new_tok.value())) {\n            // Check if the response contains a stop sequence\n            for (const auto &p : stopSequences) {\n                auto match = cachedResponse.find(p);\n                if (match != std::string::npos) stop = true;\n                lengthLimit = std::min(lengthLimit, match);\n                if (match == 0) break;\n            }\n\n            // Check if the response matches the start of a stop sequence\n            if (lengthLimit == std::string::npos) {\n                for (const auto &p : stopSequences) {\n                    auto match = stringsOverlap(cachedResponse, p);\n                    lengthLimit = std::min(lengthLimit, match);\n                    if (match == 0) break;\n                }\n            }\n        } else if (ranges::find(stopSequences, new_piece) < std::end(stopSequences)) {\n            // Special tokens must exactly match a stop sequence\n            stop = true;\n            lengthLimit = cachedResponse.size() - new_piece.size();\n        }\n\n        // Empty the cache, up to the length limit\n        std::string::size_type responseLength = 0;\n        while (!cachedTokens.empty()) {\n            Token tok = cachedTokens.front();\n            std::string piece = tokenToString(tok);\n\n            // Stop if the piece (or part of it) does not fit within the length limit\n            if (responseLength + (stop ? 1 : piece.size()) > lengthLimit)\n                break;\n\n            // Remove token from cache\n            assert(cachedResponse.starts_with(piece));\n            cachedTokens.erase(cachedTokens.begin(), cachedTokens.begin() + 1);\n            cachedResponse.erase(cachedResponse.begin(), cachedResponse.begin() + piece.size());\n\n            // Accept the token, if needed (not cached)\n            if (cachedTokens.empty() && new_tok)\n                accept();\n\n            // Send the token\n            if (!responseCallback(tok, piece) || ++n_predicted >= promptCtx.n_predict) {\n                stop = true;\n                break;\n            }\n\n            // FIXME(jared): we could avoid printing partial stop sequences if we didn't have to\n            // output token IDs and could cache a partial token for the next prompt call\n            responseLength += piece.size();\n        }\n        assert(cachedTokens.empty() == cachedResponse.empty());\n\n        // Accept the token, if needed (in cache)\n        if (new_tok) {\n            assert(!cachedTokens.empty() && cachedTokens.back() == new_tok);\n            if (stop) {\n                cachedTokens.pop_back();\n            } else {\n                accept();\n            }\n        }\n    }\n\n    if (inputLength() < cachedTokens.size()) {\n        /* This is theoretically possible if the longest stop sequence is greater than\n         * n_ctx * contextErase tokens. */\n        throw std::runtime_error(\"shifted too much context, can't go back\");\n    }\n\n#ifndef NDEBUG\n    auto inp = inputTokens();\n    auto discard_start = inp.end() - cachedTokens.size();\n    assert(std::equal(discard_start, inp.end(), cachedTokens.begin()));\n#endif\n}\n\nvoid LLModel::embed(\n    const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix, int dimensionality,\n    size_t *tokenCount, bool doMean, bool atlas, EmbedCancelCallback *cancelCb\n) {\n    (void)texts;\n    (void)embeddings;\n    (void)prefix;\n    (void)dimensionality;\n    (void)tokenCount;\n    (void)doMean;\n    (void)atlas;\n    (void)cancelCb;\n    throw std::logic_error(std::string(implementation().modelType()) + \" does not support embeddings\");\n}\n\nvoid LLModel::embed(\n    const std::vector<std::string> &texts, float *embeddings, bool isRetrieval, int dimensionality, size_t *tokenCount,\n    bool doMean, bool atlas\n) {\n    (void)texts;\n    (void)embeddings;\n    (void)isRetrieval;\n    (void)dimensionality;\n    (void)tokenCount;\n    (void)doMean;\n    (void)atlas;\n    throw std::logic_error(std::string(implementation().modelType()) + \" does not support embeddings\");\n}\n"
  },
  {
    "path": "gpt4all-backend/src/utils.h",
    "content": "#pragma once\n\n#include <cassert>\n\n#ifdef NDEBUG\n#   ifdef __has_builtin\n#       if __has_builtin(__builtin_unreachable)\n#           define UNREACHABLE() __builtin_unreachable()\n#       else\n#           define UNREACHABLE() do {} while (0)\n#       endif\n#   else\n#       define UNREACHABLE() do {} while (0)\n#   endif\n#else\n#   define UNREACHABLE() assert(!\"Unreachable statement was reached\")\n#endif\n"
  },
  {
    "path": "gpt4all-bindings/README.md",
    "content": "# GPT4All Language Bindings\nThese are the language bindings for the GPT4All backend. They provide functionality to load GPT4All models (and other llama.cpp models), generate text, and (in the case of the Python bindings) embed text as a vector representation.\n\nSee their respective folders for language-specific documentation.\n\n### Languages\n- [Python](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python) (Nomic official, maintained by [@cebtenzzre](https://github.com/cebtenzzre))\n- [Node.js/Typescript](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/typescript) (community, maintained by [@jacoobes](https://github.com/jacoobes) and [@iimez](https://github.com/iimez))\n\n<br/>\n<br/>\n\n<details><summary><b>Archived Bindings</b></summary>\n<br/>\n\nThe following bindings have been removed from this repository due to lack of maintenance. If adopted, they can be brought back&mdash;feel free to message a developer on Dicsord if you are interested in maintaining one of them. Below are links to their last available version (not necessarily the last working version).\n- C#: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/csharp)\n- Java: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/java)\n- Go: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/golang)\n\n</details>\n"
  },
  {
    "path": "gpt4all-bindings/cli/README.md",
    "content": "# GPT4All Command-Line Interface (CLI)\r\n\r\nGPT4All on the command-line.\r\n\r\nMore details on the [wiki](https://github.com/nomic-ai/gpt4all/wiki/Python-CLI).\r\n\r\n## Quickstart\r\n\r\nThe CLI is based on the `gpt4all` Python bindings and the `typer` package.\r\n\r\nThe following shows one way to get started with the CLI, the documentation has more information.\r\nTypically, you will want to replace `python` with `python3` on _Unix-like_ systems and `py -3` on\r\n_Windows_. Also, it's assumed you have all the necessary Python components already installed.\r\n\r\nThe CLI is a self-contained Python script named [app.py] ([download][app.py-download]). As long as\r\nits package dependencies are present, you can download and run it from wherever you like.\r\n\r\n[app.py]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/cli/app.py\r\n[app.py-download]: https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-bindings/cli/app.py\r\n\r\n```shell\r\n# optional but recommended: create and use a virtual environment\r\npython -m venv gpt4all-cli\r\n```\r\n_Windows_ and _Unix-like_ systems differ slightly in how you activate a _virtual environment_:\r\n- _Unix-like_, typically: `. gpt4all-cli/bin/activate`\r\n- _Windows_: `gpt4all-cli\\Scripts\\activate`\r\n\r\nThen:\r\n```shell\r\n# pip-install the necessary packages; omit '--user' if using a virtual environment\r\npython -m pip install --user --upgrade gpt4all typer\r\n# run the CLI\r\npython app.py repl\r\n```\r\nBy default, it will automatically download the `Mistral Instruct` model to `.cache/gpt4all/` in your\r\nuser directory, if necessary.\r\n\r\nIf you have already saved a model beforehand, specify its path with the `-m`/`--model` argument,\r\nfor example:\r\n```shell\r\npython app.py repl --model /home/user/my-gpt4all-models/mistral-7b-instruct-v0.1.Q4_0.gguf\r\n```\r\n"
  },
  {
    "path": "gpt4all-bindings/cli/app.py",
    "content": "#!/usr/bin/env python3\n\"\"\"GPT4All CLI\n\nThe GPT4All CLI is a self-contained script based on the `gpt4all` and `typer` packages. It offers a\nREPL to communicate with a language model similar to the chat GUI application, but more basic.\n\"\"\"\n\nimport importlib.metadata\nimport io\nimport sys\nfrom collections import namedtuple\nfrom typing_extensions import Annotated\n\nimport typer\nfrom gpt4all import GPT4All\n\n\nMESSAGES = [\n    {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n    {\"role\": \"user\", \"content\": \"Hello there.\"},\n    {\"role\": \"assistant\", \"content\": \"Hi, how can I help you?\"},\n]\n\nSPECIAL_COMMANDS = {\n    \"/reset\": lambda messages: messages.clear(),\n    \"/exit\": lambda _: sys.exit(),\n    \"/clear\": lambda _: print(\"\\n\" * 100),\n    \"/help\": lambda _: print(\"Special commands: /reset, /exit, /help and /clear\"),\n}\n\nVersionInfo = namedtuple('VersionInfo', ['major', 'minor', 'micro'])\nVERSION_INFO = VersionInfo(1, 0, 2)\nVERSION = '.'.join(map(str, VERSION_INFO))  # convert to string form, like: '1.2.3'\n\nCLI_START_MESSAGE = f\"\"\"\n    \n ██████  ██████  ████████ ██   ██  █████  ██      ██      \n██       ██   ██    ██    ██   ██ ██   ██ ██      ██      \n██   ███ ██████     ██    ███████ ███████ ██      ██      \n██    ██ ██         ██         ██ ██   ██ ██      ██      \n ██████  ██         ██         ██ ██   ██ ███████ ███████ \n                                                          \n\nWelcome to the GPT4All CLI! Version {VERSION}\nType /help for special commands.\n                                                    \n\"\"\"\n\n# create typer app\napp = typer.Typer()\n\n@app.command()\ndef repl(\n    model: Annotated[\n        str,\n        typer.Option(\"--model\", \"-m\", help=\"Model to use for chatbot\"),\n    ] = \"mistral-7b-instruct-v0.1.Q4_0.gguf\",\n    n_threads: Annotated[\n        int,\n        typer.Option(\"--n-threads\", \"-t\", help=\"Number of threads to use for chatbot\"),\n    ] = None,\n    device: Annotated[\n        str,\n        typer.Option(\"--device\", \"-d\", help=\"Device to use for chatbot, e.g. gpu, amd, nvidia, intel. Defaults to CPU.\"),\n    ] = None,\n):\n    \"\"\"The CLI read-eval-print loop.\"\"\"\n    gpt4all_instance = GPT4All(model, device=device)\n\n    # if threads are passed, set them\n    if n_threads is not None:\n        num_threads = gpt4all_instance.model.thread_count()\n        print(f\"\\nAdjusted: {num_threads} →\", end=\"\")\n\n        # set number of threads\n        gpt4all_instance.model.set_thread_count(n_threads)\n\n        num_threads = gpt4all_instance.model.thread_count()\n        print(f\" {num_threads} threads\", end=\"\", flush=True)\n    else:\n        print(f\"\\nUsing {gpt4all_instance.model.thread_count()} threads\", end=\"\")\n\n    print(CLI_START_MESSAGE)\n\n    use_new_loop = False\n    try:\n        version = importlib.metadata.version('gpt4all')\n        version_major = int(version.split('.')[0])\n        if version_major >= 1:\n            use_new_loop = True\n    except:\n        pass  # fall back to old loop\n    if use_new_loop:\n        _new_loop(gpt4all_instance)\n    else:\n        _old_loop(gpt4all_instance)\n\n\ndef _old_loop(gpt4all_instance):\n    while True:\n        message = input(\" ⇢  \")\n\n        # Check if special command and take action\n        if message in SPECIAL_COMMANDS:\n            SPECIAL_COMMANDS[message](MESSAGES)\n            continue\n\n        # if regular message, append to messages\n        MESSAGES.append({\"role\": \"user\", \"content\": message})\n\n        # execute chat completion and ignore the full response since \n        # we are outputting it incrementally\n        full_response = gpt4all_instance.chat_completion(\n            MESSAGES,\n            # preferential kwargs for chat ux\n            n_past=0,\n            n_predict=200,\n            top_k=40,\n            top_p=0.9,\n            min_p=0.0,\n            temp=0.9,\n            n_batch=9,\n            repeat_penalty=1.1,\n            repeat_last_n=64,\n            context_erase=0.0,\n            # required kwargs for cli ux (incremental response)\n            verbose=False,\n            streaming=True,\n        )\n        # record assistant's response to messages\n        MESSAGES.append(full_response.get(\"choices\")[0].get(\"message\"))\n        print() # newline before next prompt\n\n\ndef _new_loop(gpt4all_instance):\n    with gpt4all_instance.chat_session():\n        while True:\n            message = input(\" ⇢  \")\n\n            # Check if special command and take action\n            if message in SPECIAL_COMMANDS:\n                SPECIAL_COMMANDS[message](MESSAGES)\n                continue\n\n            # if regular message, append to messages\n            MESSAGES.append({\"role\": \"user\", \"content\": message})\n\n            # execute chat completion and ignore the full response since \n            # we are outputting it incrementally\n            response_generator = gpt4all_instance.generate(\n                message,\n                # preferential kwargs for chat ux\n                max_tokens=200,\n                temp=0.9,\n                top_k=40,\n                top_p=0.9,\n                min_p=0.0,\n                repeat_penalty=1.1,\n                repeat_last_n=64,\n                n_batch=9,\n                # required kwargs for cli ux (incremental response)\n                streaming=True,\n            )\n            response = io.StringIO()\n            for token in response_generator:\n                print(token, end='', flush=True)\n                response.write(token)\n\n            # record assistant's response to messages\n            response_message = {'role': 'assistant', 'content': response.getvalue()}\n            response.close()\n            gpt4all_instance.current_chat_session.append(response_message)\n            MESSAGES.append(response_message)\n            print() # newline before next prompt\n\n\n@app.command()\ndef version():\n    \"\"\"The CLI version command.\"\"\"\n    print(f\"gpt4all-cli v{VERSION}\")\n\n\nif __name__ == \"__main__\":\n    app()\n"
  },
  {
    "path": "gpt4all-bindings/cli/developer_notes.md",
    "content": "# Developing the CLI\r\n## Documentation\r\nDocumentation can be found in three places:\r\n- `app.py` docstrings & comments\r\n- a Readme: `gpt4all-bindings/cli/README.md`\r\n- the actual CLI documentation: `gpt4all-bindings/python/docs/gpt4all_cli.md`\r\n\r\nThe _docstrings_ are meant for programmatic use. Since the CLI is primarily geared towards users and\r\nnot to build on top, they're kept terse.\r\n\r\nThe _Readme_ is mostly meant for users and includes:\r\n- a link to the _CLI documentation_ (on the [website])\r\n- a Quickstart section with some guidance on how to get started with a sane setup\r\n\r\nThe _CLI documentation_ and other documentation are located in the above mentioned `docs/` folder.\r\nThey're in Markdown format and built for the [website]. Of the three, they should be the most\r\ndetailed.\r\n\r\n[website]: https://docs.gpt4all.io/gpt4all_cli.html\r\n\r\n\r\n## Versioning\r\nThe version number should now follow the `gpt4all` PyPI package, so compatibility is more clear.\r\n\r\nThe one place to change it is the `namedtuple` called `VERSION_INFO`.\r\n"
  },
  {
    "path": "gpt4all-bindings/python/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n\n# Cython\n/*.c\n*DO_NOT_MODIFY/"
  },
  {
    "path": "gpt4all-bindings/python/.isort.cfg",
    "content": "[settings]\nknown_third_party=geopy,nltk,np,numpy,pandas,pysbd,fire,torch\n\nline_length=120\ninclude_trailing_comma=True\nmulti_line_output=3\nuse_parentheses=True"
  },
  {
    "path": "gpt4all-bindings/python/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).\n\n## [Unreleased]\n\n### Added\n- Warn on Windows if the Microsoft Visual C++ runtime libraries are not found ([#2920](https://github.com/nomic-ai/gpt4all/pull/2920))\n- Basic cache for faster prefill when the input shares a prefix with previous context ([#3073](https://github.com/nomic-ai/gpt4all/pull/3073))\n- Add ability to modify or replace the history of an active chat session ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n\n### Changed\n- Rebase llama.cpp on latest upstream as of September 26th ([#2998](https://github.com/nomic-ai/gpt4all/pull/2998))\n- Change the error message when a message is too long ([#3004](https://github.com/nomic-ai/gpt4all/pull/3004))\n- Fix CalledProcessError on Intel Macs since v2.8.0 ([#3045](https://github.com/nomic-ai/gpt4all/pull/3045))\n- Use Jinja for chat templates instead of per-message QString.arg-style templates ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n\n## [2.8.2] - 2024-08-14\n\n### Fixed\n- Fixed incompatibility with Python 3.8 since v2.7.0 and Python <=3.11 since v2.8.1 ([#2871](https://github.com/nomic-ai/gpt4all/pull/2871))\n\n## [2.8.1] - 2024-08-13\n\n### Added\n- Use greedy sampling when temperature is set to zero ([#2854](https://github.com/nomic-ai/gpt4all/pull/2854))\n\n### Changed\n- Search for pip-installed CUDA 11 as well as CUDA 12 ([#2802](https://github.com/nomic-ai/gpt4all/pull/2802))\n- Stop shipping CUBINs to reduce wheel size ([#2802](https://github.com/nomic-ai/gpt4all/pull/2802))\n- Use llama\\_kv\\_cache ops to shift context faster ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n- Don't stop generating at end of context ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n\n### Fixed\n- Make reverse prompt detection work more reliably and prevent it from breaking output ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n- Explicitly target macOS 12.6 in CI to fix Metal compatibility on older macOS ([#2849](https://github.com/nomic-ai/gpt4all/pull/2849))\n- Do not initialize Vulkan driver when only using CPU ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))\n- Fix a segfault on exit when using CPU mode on Linux with NVIDIA and EGL ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))\n\n## [2.8.0] - 2024-08-05\n\n### Added\n- Support GPT-NeoX, Gemma 2, OpenELM, ChatGLM, and Jais architectures (all with Vulkan support) ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))\n- Enable Vulkan support for StarCoder2, XVERSE, Command R, and OLMo ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))\n- Support DeepSeek-V2 architecture (no Vulkan support) ([#2702](https://github.com/nomic-ai/gpt4all/pull/2702))\n- Add Llama 3.1 8B Instruct to models3.json (by [@3Simplex](https://github.com/3Simplex) in [#2731](https://github.com/nomic-ai/gpt4all/pull/2731) and [#2732](https://github.com/nomic-ai/gpt4all/pull/2732))\n- Support Llama 3.1 RoPE scaling ([#2758](https://github.com/nomic-ai/gpt4all/pull/2758))\n- Add Qwen2-1.5B-Instruct to models3.json (by [@ThiloteE](https://github.com/ThiloteE) in [#2759](https://github.com/nomic-ai/gpt4all/pull/2759))\n- Detect use of a Python interpreter under Rosetta for a clearer error message ([#2793](https://github.com/nomic-ai/gpt4all/pull/2793))\n\n### Changed\n- Build against CUDA 11.8 instead of CUDA 12 for better compatibility with older drivers ([#2639](https://github.com/nomic-ai/gpt4all/pull/2639))\n- Update llama.cpp to commit 87e397d00 from July 19th ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))\n\n### Removed\n- Remove unused internal llmodel\\_has\\_gpu\\_device ([#2409](https://github.com/nomic-ai/gpt4all/pull/2409))\n- Remove support for GPT-J models ([#2676](https://github.com/nomic-ai/gpt4all/pull/2676), [#2693](https://github.com/nomic-ai/gpt4all/pull/2693))\n\n### Fixed\n- Fix debug mode crash on Windows and undefined behavior in LLamaModel::embedInternal ([#2467](https://github.com/nomic-ai/gpt4all/pull/2467))\n- Fix CUDA PTX errors with some GPT4All builds ([#2421](https://github.com/nomic-ai/gpt4all/pull/2421))\n- Fix mishandling of inputs greater than n\\_ctx tokens after [#1970](https://github.com/nomic-ai/gpt4all/pull/1970) ([#2498](https://github.com/nomic-ai/gpt4all/pull/2498))\n- Fix crash when Kompute falls back to CPU ([#2640](https://github.com/nomic-ai/gpt4all/pull/2640))\n- Fix several Kompute resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))\n- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))\n- Fix several backend issues ([#2778](https://github.com/nomic-ai/gpt4all/pull/2778))\n  - Restore leading space removal logic that was incorrectly removed in [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)\n  - CUDA: Cherry-pick llama.cpp DMMV cols requirement fix that caused a crash with long conversations since [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)\n\n[Unreleased]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.2...HEAD\n[2.8.2]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.1...python-v2.8.2\n[2.8.1]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.0...python-v2.8.1\n[2.8.0]: https://github.com/nomic-ai/gpt4all/compare/python-v2.7.0...python-v2.8.0\n"
  },
  {
    "path": "gpt4all-bindings/python/LICENSE.txt",
    "content": "Copyright (c) 2023 Nomic, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE."
  },
  {
    "path": "gpt4all-bindings/python/MANIFEST.in",
    "content": "recursive-include gpt4all/llmodel_DO_NOT_MODIFY *"
  },
  {
    "path": "gpt4all-bindings/python/README.md",
    "content": "# Python GPT4All\n\nThis package contains a set of Python bindings around the `llmodel` C-API.\n\nPackage on PyPI: https://pypi.org/project/gpt4all/\n\n## Documentation\nhttps://docs.gpt4all.io/gpt4all_python.html\n\n## Installation\n\nThe easiest way to install the Python bindings for GPT4All is to use pip:\n\n```\npip install gpt4all\n```\n\nThis will download the latest version of the `gpt4all` package from PyPI.\n\n## Local Build\n\nAs an alternative to downloading via pip, you may build the Python bindings from source.\n\n### Prerequisites\n\nYou will need a compiler. On Windows, you should install Visual Studio with the C++ Development components. On macOS, you will need the full version of Xcode&mdash;Xcode Command Line Tools lacks certain required tools. On Linux, you will need a GCC or Clang toolchain with C++ support.\n\nOn Windows and Linux, building GPT4All with full GPU support requires the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home) and the latest [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).\n\n### Building the python bindings\n\n1. Clone GPT4All and change directory:\n```\ngit clone --recurse-submodules https://github.com/nomic-ai/gpt4all.git\ncd gpt4all/gpt4all-backend\n```\n\n2. Build the backend.\n\nIf you are using Windows and have Visual Studio installed:\n```\ncmake -B build\ncmake --build build --parallel --config RelWithDebInfo\n```\n\nFor all other platforms:\n```\ncmake -B build -DCMAKE_BUILD_TYPE=RelWithDebInfo\ncmake --build build --parallel\n```\n\n`RelWithDebInfo` is a good default, but you can also use `Release` or `Debug` depending on the situation.\n\n2. Install the Python package:\n```\ncd ../gpt4all-bindings/python\npip install -e .\n```\n\n## Usage\n\nTest it out! In a Python script or console:\n\n```python\nfrom gpt4all import GPT4All\nmodel = GPT4All(\"orca-mini-3b-gguf2-q4_0.gguf\")\noutput = model.generate(\"The capital of France is \", max_tokens=3)\nprint(output)\n```\n\n\nGPU Usage\n```python\nfrom gpt4all import GPT4All\nmodel = GPT4All(\"orca-mini-3b-gguf2-q4_0.gguf\", device='gpu') # device='amd', device='intel'\noutput = model.generate(\"The capital of France is \", max_tokens=3)\nprint(output)\n```\n\n## Troubleshooting a Local Build\n- If you're on Windows and have compiled with a MinGW toolchain, you might run into an error like:\n  ```\n  FileNotFoundError: Could not find module '<...>\\gpt4all-bindings\\python\\gpt4all\\llmodel_DO_NOT_MODIFY\\build\\libllmodel.dll'\n  (or one of its dependencies). Try using the full path with constructor syntax.\n  ```\n  The key phrase in this case is _\"or one of its dependencies\"_. The Python interpreter you're using\n  probably doesn't see the MinGW runtime dependencies. At the moment, the following three are required:\n  `libgcc_s_seh-1.dll`, `libstdc++-6.dll` and `libwinpthread-1.dll`. You should copy them from MinGW\n  into a folder where Python will see them, preferably next to `libllmodel.dll`.\n\n- Note regarding the Microsoft toolchain: Compiling with MSVC is possible, but not the official way to\n  go about it at the moment. MSVC doesn't produce DLLs with a `lib` prefix, which the bindings expect.\n  You'd have to amend that yourself.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/css/custom.css",
    "content": ".md-content h1,\n.md-content h2 {\n  margin-top: 0.5em;\n  margin-bottom: 0.5em;\n}\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_api_server/home.md",
    "content": "# GPT4All API Server\n\nGPT4All provides a local API server that allows you to run LLMs over an HTTP API. \n\n## Key Features\n\n- **Local Execution**: Run models on your own hardware for privacy and offline use.\n- **LocalDocs Integration**: Run the API with relevant text snippets provided to your LLM from a [LocalDocs collection](../gpt4all_desktop/localdocs.md).\n- **OpenAI API Compatibility**: Use existing OpenAI-compatible clients and tools with your local models.\n\n## Activating the API Server\n\n1. Open the GPT4All Chat Desktop Application.\n2. Go to `Settings` > `Application` and scroll down to `Advanced`.\n3. Check the box for the `\"Enable Local API Server\"` setting.\n4. The server listens on port 4891 by default. You can choose another port number in the `\"API Server Port\"` setting.\n\n## Connecting to the API Server\n\nThe base URL used for the API server is `http://localhost:4891/v1` (or `http://localhost:<PORT_NUM>/v1` if you are using a different port number). \n\nThe server only accepts HTTP connections (not HTTPS) and only listens on localhost (127.0.0.1) (e.g. not to the IPv6 localhost address `::1`.)\n\n## Examples\n\n!!! note \"Example GPT4All API calls\"\n\n    === \"cURL\"\n\n        ```bash\n        curl -X POST http://localhost:4891/v1/chat/completions -d '{\n        \"model\": \"Phi-3 Mini Instruct\",\n        \"messages\": [{\"role\":\"user\",\"content\":\"Who is Lionel Messi?\"}],\n        \"max_tokens\": 50,\n        \"temperature\": 0.28\n        }'\n        ```\n\n    === \"PowerShell\"\n\n        ```powershell\n        Invoke-WebRequest -URI http://localhost:4891/v1/chat/completions -Method POST -ContentType application/json -Body '{\n        \"model\": \"Phi-3 Mini Instruct\",\n        \"messages\": [{\"role\":\"user\",\"content\":\"Who is Lionel Messi?\"}],\n        \"max_tokens\": 50,\n        \"temperature\": 0.28\n        }'\n        ```\n\n## API Endpoints\n\n| Method | Path | Description |\n|--------|------|-------------|\n| GET | `/v1/models` | List available models |\n| GET | `/v1/models/<name>` | Get details of a specific model |\n| POST | `/v1/completions` | Generate text completions |\n| POST | `/v1/chat/completions` | Generate chat completions |\n\n## LocalDocs Integration\n\nYou can use LocalDocs with the API server:\n\n1. Open the Chats view in the GPT4All application.\n2. Scroll to the bottom of the chat history sidebar.\n3. Select the server chat (it has a different background color).\n4. Activate LocalDocs collections in the right sidebar.\n\n(Note: LocalDocs can currently only be activated through the GPT4All UI, not via the API itself).\n\nNow, your API calls to your local LLM will have relevant references from your LocalDocs collection retrieved and placed in the input message for the LLM to respond to.\n\nThe references retrieved for your API call can be accessed in the API response object at \n\n`response[\"choices\"][0][\"references\"]`\n\nThe data included in the `references` are:\n\n- `text`: the actual text content from the snippet that was extracted from the reference document\n\n- `author`: the author of the reference document (if available)\n\n- `date`: the date of creation of the reference document (if available)\n\n- `page`: the page number the snippet is from (only available for PDF documents for now)\n\n- `title`: the title of the reference document (if available)\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/chat_templates.md",
    "content": "## What are chat templates?\nNatively, large language models only know how to complete plain text and do not know the difference between their input and their output. In order to support a chat with a person, LLMs are designed to use a template to convert the conversation to plain text using a specific format.\n\nFor a given model, it is important to use an appropriate chat template, as each model is designed to work best with a specific format. The chat templates included with the built-in models should be sufficient for most purposes.\n\nThere are two reasons you would want to alter the chat template:\n\n- You are sideloading a model and there is no chat template available,\n- You would like to have greater control over the input to the LLM than a system message provides.\n\n\n## What is a system message?\nA system message is a message that controls the responses from the LLM in a way that affects the entire conversation. System messages can be short, such as \"Speak like a pirate.\", or they can be long and contain a lot of context for the LLM to keep in mind.\n\nNot all models are designed to use a system message, so they work with some models better than others.\n\n\n## How do I customize the chat template or system message?\nTo customize the chat template or system message, go to Settings > Model. Make sure to select the correct model at the top. If you clone a model, you can use a different chat template or system message from the base model, enabling you to use different settings for each conversation.\n\nThese settings take effect immediately. After changing them, you can click \"Redo last response\" in the chat view, and the response will take the new settings into account.\n\n\n## Do I need to write a chat template?\nYou typically do not need to write your own chat template. The exception is models that are not in the official model list and do not come with a chat template built-in. These will show a \"Clear\" option above the chat template field in the Model Settings page instead of a \"Reset\" option. See the section on [finding] or [creating] a chat template.\n\n[finding]: #how-do-i-find-a-chat-template\n[creating]: #advanced-how-do-chat-templates-work\n\n\n## What changed in GPT4All v3.5?\nGPT4All v3.5 overhauled the chat template system. There are three crucial differences:\n\n- The chat template now formats an entire conversation instead of a single pair of messages,\n- The chat template now uses Jinja syntax instead of `%1` and `%2` placeholders,\n- And the system message should no longer contain control tokens or trailing whitespace.\n\nIf you are using any chat templates or system messages that had been added or altered from the default before upgrading to GPT4All v3.5 or newer, these will no longer work. See below for how to solve common errors you may see after upgrading.\n\n\n## Error/Warning: System message is not plain text.\nThis is easy to fix. Go to the model's settings and look at the system prompt. There are three things to look for:\n\n- Control tokens such as `<|im_start|>`, `<|start_header_id|>`, or `<|system|>`\n- A prefix such as `### System` or `SYSTEM:`\n- Trailing whitespace, such as a space character or blank line.\n\nIf you see any of these things, remove them. For example, this legacy system prompt:\n```\n<|start_header_id|>system<|end_header_id|>\nYou are a helpful assistant.<|eot_id|>\n```\n\nShould become this:\n```\nYou are a helpful assistant.\n```\n\nIf you do not see anything that needs to be changed, you can dismiss the error by making a minor modification to the message and then changing it back.\n\nIf you see a warning, your system message does not appear to be plain text. If you believe this warning is incorrect, it can be safely ignored. If in doubt, ask on the [Discord].\n\n[Discord]: https://discord.gg/mGZE39AS3e\n\n\n## Error: Legacy system prompt needs to be updated in Settings.\nThis is the same as [above][above-1], but appears on the chat page.\n\n[above-1]: #errorwarning-system-message-is-not-plain-text\n\n\n## Error/Warning: Chat template is not in Jinja format.\nThis is the result of attempting to use an old-style template (possibly from a previous version) in GPT4All 3.5+.\n\nGo to the Model Settings page and select the affected model. If you see a \"Reset\" button, and you have not intentionally modified the prompt template, you can click \"Reset\". Otherwise, this is what you can do:\n\n1. Back up your chat template by copying it safely to a text file and saving it. In the next step, it will be removed from GPT4All.\n2. Click \"Reset\" or \"Clear\".\n3. If you clicked \"Clear\", the chat template is now gone. Follow the steps to [find][finding] or [create][creating] a basic chat template for your model.\n4. Customize the chat template to suit your needs. For help, read the section about [creating] a chat template.\n\n\n## Error: Legacy prompt template needs to be updated in Settings.\nThis is the same as [above][above-2], but appears on the chat page.\n\n[above-2]: #errorwarning-chat-template-is-not-in-jinja-format\n\n\n## The chat template has a syntax error.\nIf there is a syntax error while editing the chat template, the details will be displayed in an error message above the input box. This could be because the chat template is not actually in Jinja format (see [above][above-2]).\n\nOtherwise, you have either typed something correctly, or the model comes with a template that is incompatible with GPT4All. See [the below section][creating] on creating chat templates and make sure that everything is correct. When in doubt, ask on the [Discord].\n\n\n## Error: No chat template configured.\nThis may appear for models that are not from the official model list and do not include a chat template. Older versions of GPT4All picked a poor default in this case. You will get much better results if you follow the steps to [find][finding] or [create][creating] a chat template for your model.\n\n\n## Error: The chat template cannot be blank.\nIf the button above the chat template on the Model Settings page says \"Clear\", see [above][above-3]. If you see \"Reset\", click that button to restore a reasonable default. Also see the section on [syntax errors][chat-syntax-error].\n\n[above-3]: #error-no-chat-template-configured\n[chat-syntax-error]: #the-chat-template-has-a-syntax-error\n\n\n## How do I find a chat template?\nWhen in doubt, you can always ask the [Discord] community for help. Below are the instructions to find one on your own.\n\nThe authoritative source for a model's chat template is the HuggingFace repo that the original (non-GGUF) model came from. First, you should find this page. If you just have a model file, you can try a google search for the model's name. If you know the page you downloaded the GGUF model from, its README usually links to the original non-GGUF model.\n\nOnce you have located the original model, there are two methods you can use to extract its chat template. Pick whichever one you are most comfortable with.\n\n### Using the CLI (all models)\n1. Install `jq` using your preferred package manager - e.g. Chocolatey (Windows), Homebrew (macOS), or apt (Ubuntu).\n2. Download `tokenizer_config.json` from the model's \"Files and versions\" tab.\n3. Open a command prompt in the directory which you have downloaded the model file.\n4. Run `jq -r \".chat_template\" tokenizer_config.json`. This shows the chat template in a human-readable form. You can copy this and paste it into the settings page.\n5. (Optional) You can save the output to a text file like this: `jq -r \".chat_template\" tokenizer_config.json >chat_template.txt`\n\nIf the output is \"null\", the model does not provide a chat template. See the [below instructions][creating] on creating a chat template.\n\n### Python (open models)\n1. Install `transformers` using your preferred python package manager, e.g. `pip install transformers`. Make sure it is at least version v4.43.0.\n2. Copy the ID of the HuggingFace model, using the clipboard icon next to the name. For example, if the URL is `https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B`, the ID is `NousResearch/Hermes-2-Pro-Llama-3-8B`.\n3. Open a python interpreter (`python`) and run the following commands. Change the model ID in the example to the one you copied.\n```\n>>> from transformers import AutoTokenizer\n>>> tokenizer = AutoTokenizer.from_pretrained('NousResearch/Hermes-2-Pro-Llama-3-8B')\n>>> print(tokenizer.get_chat_template())\n```\nYou can copy the output and paste it into the settings page.\n4. (Optional) You can save the output to a text file like this:\n```\n>>> open('chat_template.txt', 'w').write(tokenizer.get_chat_template())\n```\n\nIf you get a ValueError exception, this model does not provide a chat template. See the [below instructions][creating] on creating a chat template.\n\n\n### Python (gated models)\nSome models, such as Llama and Mistral, do not allow public access to their chat template. You must either use the CLI method above, or follow the following instructions to use Python:\n\n1. For these steps, you must have git and git-lfs installed.\n2. You must have a HuggingFace account and be logged in.\n3. You must already have access to the gated model. Otherwise, request access.\n4. You must have an SSH key configured for git access to HuggingFace.\n5. `git clone` the model's HuggingFace repo using the SSH clone URL. There is no need to download the entire model, which is very large. A good way to do this on Linux is:\n```console\n$ GIT_LFS_SKIP_SMUDGE=1 git clone hf.co:meta-llama/Llama-3.1-8B-Instruct.git\n$ cd Llama-3.1-8B-Instruct\n$ git lfs pull -I \"tokenizer.*\"\n```\n6. Follow the above instructions for open models, but replace the model ID with the path to the directory containing `tokenizer\\_config.json`:\n```\n>>> tokenizer = AutoTokenizer.from_pretrained('.')\n```\n\n\n## Advanced: How do chat templates work?\nThe chat template is applied to the entire conversation you see in the chat window. The template loops over the list of messages, each containing `role` and `content` fields. `role` is either `user`, `assistant`, or `system`.\n\nGPT4All also supports the special variables `bos_token`, `eos_token`, and `add_generation_prompt`. See the [HuggingFace docs] for what those do.\n\n[HuggingFace docs]: https://huggingface.co/docs/transformers/v4.46.3/en/chat_templating#special-variables\n\n\n## Advanced: How do I make a chat template?\nThe best way to create a chat template is to start by using an existing one as a reference. Then, modify it to use the format documented for the given model. Its README page may explicitly give an example of its template. Or, it may mention the name of a well-known standard template, such as ChatML, Alpaca, Vicuna. GPT4All does not yet include presets for these templates, so they will have to be found in other models or taken from the community.\n\nFor more information, see the very helpful [HuggingFace guide]. Some of this is not applicable, such as the information about tool calling and RAG - GPT4All implements those features differently.\n\nSome models use a prompt template that does not intuitively map to a multi-turn chat, because it is more intended for single instructions. The [FastChat] implementation of these templates is a useful reference for the correct way to extend them to multiple messages.\n\n[HuggingFace guide]: https://huggingface.co/docs/transformers/v4.46.3/en/chat_templating#advanced-template-writing-tips\n[FastChat]: https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py\n\n\n# Advanced: What are GPT4All v1 templates?\nGPT4All supports its own template syntax, which is nonstandard but provides complete control over the way LocalDocs sources and file attachments are inserted into the conversation. These templates begin with `{# gpt4all v1 #}` and look similar to the example below.\n\nFor standard templates, GPT4All combines the user message, sources, and attachments into the `content` field. For GPT4All v1 templates, this is not done, so they must be used directly in the template for those features to work correctly.\n\n```jinja\n{# gpt4all v1 #}\n{%- for message in messages %}\n    {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' }}\n    {%- if message['role'] == 'user' %}\n        {%- for source in message['sources'] %}\n            {%- if loop.first %}\n                {{- '### Context:\\n' }}\n            {%- endif %}\n            {{- 'Collection: ' + source['collection'] + '\\n'   +\n                'Path: '       + source['path']       + '\\n'   +\n                'Excerpt: '    + source['text']       + '\\n\\n' }}\n        {%- endfor %}\n    {%- endif %}\n    {%- for attachment in message['prompt_attachments'] %}\n        {{- attachment['processed_content'] + '\\n\\n' }}\n    {%- endfor %}\n    {{- message['content'] | trim }}\n    {{- '<|eot_id|>' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n```\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/chats.md",
    "content": "# Chats\n\nChats are conversations with [language models](models.md) that run locally on your device.\n\n!!! note \"Example Chats\"\n\n    === \"Llama 3\"\n        Download `Llama 3` and prompt:\n        ```\n        explain why the sky is blue in a way that is correct and makes sense to a child\n        ```\n\n        With the default sampling settings, you should see text resembling the following:\n\n        ```\n        What a great question!\n        So, you know how we can see different colors like red, yellow, green, and orange? Well, when sunlight enters Earth's atmosphere, it starts to interact with tiny particles called molecules of gases like nitrogen (N2) and oxygen (02). These molecules are really small, but they're all around us!\n        Now, here's the cool part: these molecules scatter, or bounce off, shorter wavelengths of light more than longer wavelengths. Think of it like a game of pool where the cue ball hits other balls with different sizes and speeds.\n        The shortest wavelength of visible light is violet (about 400 nanometers), followed by blue (around 450-495 nanometers). These shorter wavelengths are scattered in all directions, kind of like how a basketball player might bounce the ball around on the court. This scattering effect makes the sky appear blue to our eyes!\n        Here's why it doesn't look as bright or intense as a blue crayon: there are many more molecules in the atmosphere that scatter longer wavelengths (like red and orange) than shorter ones, so those colors get spread out and become less noticeable.\n        To sum it up:\n        1. Sunlight enters Earth's atmosphere.\n        2. Tiny gas molecule particles interact with light, scattering shorter wavelengths like blue and violet.\n        3. This scattered blue light reaches our eyes from all directions, making the sky appear blue!\n        Now, go outside and enjoy that beautiful blue sky!\n        ```\n\n    === \"Nous Hermes 2 Mistral DPO\"\n        Download `Nous Hermes 2 Mistral DPO` and prompt:\n        ```\n        write me a react app i can run from the command line to play a quick game\n        ```\n\n        With the default sampling settings, you should see text and code blocks resembling the following:\n        \n        ```\n        I'll provide you with a simple React app that allows you to play a guessing game. This app will generate a random number between 1 and 10, and ask you to guess it. You can run this app locally using Create React App. Follow the steps below:\n        Open your terminal or command line interface.\n        Navigate to the directory where you want to create the project (e.g., cd Documents/Projects).\n        Run the following commands:\n        ```\n\n        ```bash\n        npx create-react-app guessing-game\n        cd guessing-game\n        ```\n\n        ```\n        Open the project in your preferred code editor by running code . (for VS Code) or any other command to open the folder with your text editor.\n        Replace the content of the src/App.js file with the following code:\n        ```\n\n        ```javascript\n        import React, { useState } from 'react';\n\n        function App() {\n        const [guess, setGuess] = useState('');\n        const [message, setMessage] = useState('');\n        const [numberToGuess, setNumberToGuess] = useState(Math.floor(Math.random() * 10) + 1);\n\n        function handleChange(e) {\n            setGuess(e.target.value);\n        }\n\n        function handleSubmit(e) {\n            e.preventDefault();\n            if (parseInt(guess) === numberToGuess) {\n            setMessage('You guessed it right!');\n            } else {\n            setMessage(`Wrong guess, the correct number was ${numberToGuess}. Try again next time.`);\n            }\n        }\n\n        return (\n            <div>\n            <h1>Guess a Number</h1>\n            <p>{message}</p>\n            <form onSubmit={handleSubmit}>\n                <input type=\"number\" value={guess} onChange={handleChange} />\n                <button type=\"submit\">Submit</button>\n            </form>\n            </div>\n        );\n        }\n\n        export default App;\n        ```\n\n        ```\n        Save the file and go back to your terminal or command line interface. Run npm start. This will start a local development server, and you can access the app in your browser at http://localhost:3000.\n        Play the guessing game by entering a number between 1 and 10 into the input field and clicking \"Submit\". The app will tell you if your guess is correct or not.\n        Remember that this is just a simple example, and you can expand upon it to make the game more interesting with additional features like high scores, multiple difficulty levels, etc.\n        ```\n\n## New Chat\n\nChoose a model with the dropdown at the top of the Chats page\n\nIf you don't have any models, [download one](models.md#download-models). Once you have models, you can start chats by loading your default model, which you can configure in [settings](settings.md#application-settings)\n\n![Choose a model](../assets/three_model_options.png)\n\n## LocalDocs\n\nOpen the [LocalDocs](localdocs.md) panel with the button in the top-right corner to bring your files into the chat. With LocalDocs, your chats are enhanced with semantically related snippets from your files included in the model's context.\n\n![Open LocalDocs](../assets/open_local_docs.png)\n\n## Chat History\n\nView your chat history with the button in the top-left corner of the Chats page.\n\n<table>\n<tr>\n    <td>\n    <img src=\"../assets/closed_chat_panel.png\" alt=\"Close chats\" style=\"width:100%\">\n    </td>\n    <td>\n    <img src=\"../assets/open_chat_panel.png\" alt=\"Open chats\" style=\"width:100%\">\n    </td>\n</tr>\n</table>\n\nYou can change a chat name or delete it from your chat history at any time.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md",
    "content": "# Using GPT4All to Privately Chat with your Obsidian Vault\n\nObsidian for Desktop is a powerful management and note-taking software designed to create and organize markdown notes. This tutorial allows you to sync and access your Obsidian note files directly on your computer. By connecting it to LocalDocs, you can integrate these files into your LLM chats for private access and enhanced context.\n\n## Download Obsidian for Desktop\n\n!!! note \"Download Obsidian for Desktop\"\n\n      1. **Download Obsidian for Desktop**:\n         - Visit the [Obsidian website](https://obsidian.md) and create an account account.\n         - Click the Download button in the center of the homepage\n         - For more help with installing Obsidian see [Getting Started with Obsidian](https://help.obsidian.md/Getting+started/Download+and+install+Obsidian)\n      \n      2. **Set Up Obsidian**:\n         - Launch Obsidian from your Applications folder (macOS), Start menu (Windows), or equivalent location (Linux).\n         - On the welcome screen, you can either create a new vault (a collection of notes) or open an existing one.\n         - To create a new vault, click Create a new vault, name your vault, choose a location on your computer, and click Create.\n   \n   \n      3. **Sign in and Sync**:\n            - Once installed, you can start adding and organizing notes.\n            - Choose the folders you want to sync to your computer.\n   \n\n\n## Connect Obsidian to LocalDocs\n\n!!! note \"Connect Obsidian to LocalDocs\"\n\n      1. **Open LocalDocs**:\n         - Navigate to the LocalDocs feature within GPT4All.\n\n         <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of LocalDocs interface -->\n                  <img width=\"1348\" alt=\"LocalDocs interface\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/d8fb2d79-2063-45d4-bcce-7299fb75b144\">\n               </td>\n            </tr>\n         </table>\n   \n      2. **Add Collection**:\n         - Click on **+ Add Collection** to begin linking your Obsidian Vault.\n      \n         <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of adding collection in LocalDocs -->\n                  <img width=\"1348\" alt=\"Screenshot of adding collection\" src=\"https://raw.githubusercontent.com/nomic-ai/gpt4all/124ef867a9d9afd9e14d3858cd77bce858f79773/gpt4all-bindings/python/docs/assets/obsidian_adding_collection.png\">\n               </td>\n            </tr>\n         </table>\n   \n         - Name your collection\n   \n   \n      3. **Create Collection**:\n         - Click **Create Collection** to initiate the embedding process. Progress will be displayed within the LocalDocs interface.\n   \n      4. **Access Files in Chats**:\n         - Load a model to chat with your files (Llama 3 Instruct is the fastest)\n         - In your chat, open 'LocalDocs' with the button in the top-right corner to provide context from your synced Obsidian notes.\n      \n         <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of accessing LocalDocs in chats -->\n                  <img width=\"1447\" alt=\"Accessing LocalDocs in chats\" src=\"https://raw.githubusercontent.com/nomic-ai/gpt4all/124ef867a9d9afd9e14d3858cd77bce858f79773/gpt4all-bindings/python/docs/assets/obsidian_docs.png\">\n               </td>\n            </tr>\n         </table>\n   \n      5. **Interact With Your Notes:**\n         - Use the model to interact with your files\n         <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of interacting sources -->\n                  <img width=\"662\" alt=\"osbsidian user interaction\" src=\"https://raw.githubusercontent.com/nomic-ai/gpt4all/124ef867a9d9afd9e14d3858cd77bce858f79773/gpt4all-bindings/python/docs/assets/osbsidian_user_interaction.png\">\n               </td>\n            </tr>\n         </table>\n         <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of viewing sources -->\n                  <img width=\"662\" alt=\"osbsidian GPT4ALL response\" src=\"https://raw.githubusercontent.com/nomic-ai/gpt4all/124ef867a9d9afd9e14d3858cd77bce858f79773/gpt4all-bindings/python/docs/assets/obsidian_response.png\">\n               </td>\n            </tr>\n         </table>\n   \n      6. **View Referenced Files**:\n         - Click on **Sources** below LLM responses to see which Obsidian Notes were referenced.\n      \n         <table>\n            <tr>\n               <td>\n                  <!-- Referenced Files  -->\n                  <img width=\"643\" alt=\"Referenced Files\" src=\"https://raw.githubusercontent.com/nomic-ai/gpt4all/124ef867a9d9afd9e14d3858cd77bce858f79773/gpt4all-bindings/python/docs/assets/obsidian_sources.png\">\n               </td>\n            </tr>\n         </table>\n\n## How It Works\n\nObsidian for Desktop syncs your Obsidian notes to your computer, while LocalDocs integrates these files into your LLM chats using embedding models. These models find semantically similar snippets from your files to enhance the context of your interactions.\n\nTo learn more about embedding models and explore further, refer to the [Nomic Python SDK documentation](https://docs.nomic.ai/atlas/capabilities/embeddings).\n\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md",
    "content": "# Using GPT4All to Privately Chat with your OneDrive Data\n\nLocal and Private AI Chat with your OneDrive Data\n\nOneDrive for Desktop allows you to sync and access your OneDrive files directly on your computer. By connecting your synced directory to LocalDocs, you can start using GPT4All to privately chat with data stored in your OneDrive.\n\n## Download OneDrive for Desktop\n\n!!! note \"Download OneDrive for Desktop\"\n\n    1. **Download OneDrive for Desktop**:\n    - Visit [Microsoft OneDrive](https://www.microsoft.com/en-us/microsoft-365/onedrive/download).\n    - Press 'download' for your respective device type.\n    - Download the OneDrive for Desktop application.\n    \n    2. **Install OneDrive for Desktop**\n    - Run the installer file you downloaded.\n    - Follow the prompts to complete the installation process.\n    \n    3. **Sign in and Sync**\n    - Once installed, sign in to OneDrive for Desktop with your Microsoft account credentials.\n    - Choose the folders you want to sync to your computer.\n\n## Connect OneDrive to LocalDocs\n\n!!! note \"Connect OneDrive to LocalDocs\"\n\n    1. **Install GPT4All and Open LocalDocs**:\n    \n        - Go to [nomic.ai/gpt4all](https://nomic.ai/gpt4all) to install GPT4All for your operating system.\n        \n        - Navigate to the LocalDocs feature within GPT4All to configure it to use your synced OneDrive directory.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Placeholder for screenshot of LocalDocs interface -->\n                <img width=\"1348\" alt=\"Screenshot 2024-07-10 at 10 55 41 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/54254bc0-d9a0-40c4-9fd1-5059abaad583\">\n            </td>\n        </tr>\n        </table>\n\n    2. **Add Collection**:\n    \n        - Click on **+ Add Collection** to begin linking your OneDrive folders.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Placeholder for screenshot of adding collection in LocalDocs -->\n               <img width=\"1348\" alt=\"Screenshot 2024-07-10 at 10 56 29 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/7f12969a-753a-4757-bb9e-9b607cf315ca\">\n            </td>\n        </tr>\n        </table>\n\n        - Name the Collection and specify the OneDrive folder path.\n\n    3. **Create Collection**:\n    \n        - Click **Create Collection** to initiate the embedding process. Progress will be displayed within the LocalDocs interface.\n\n    4. **Access Files in Chats**:\n    \n        - Load a model within GPT4All to chat with your files.\n        \n        - In your chat, open 'LocalDocs' using the button in the top-right corner to provide context from your synced OneDrive files.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Placeholder for screenshot of accessing LocalDocs in chats -->\n                <img width=\"1447\" alt=\"Screenshot 2024-07-10 at 10 58 55 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/b5a67fe6-0d6a-42ae-b3b8-cc0f91cbf5b1\">\n            </td>\n        </tr>\n        </table>\n\n    5. **Interact With Your OneDrive**:\n    \n        - Use the model to interact with your files directly from OneDrive.\n        \n        <table>\n        <tr>\n            <td>\n                <!-- Placeholder for screenshot of interacting with sources -->\n                <img width=\"662\" alt=\"Screenshot 2024-07-10 at 11 04 55 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/2c9815b8-3d1c-4179-bf76-3ddbafb193bf\">\n            </td>\n        </tr>\n        </table>\n        \n        <table>\n        <tr>\n            <td>\n                <img width=\"662\" alt=\"Screenshot 2024-07-11 at 11 21 46 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/ce8be292-b025-415a-bd54-f11868e0cd0a\">\n            </td>\n        </tr>\n        </table>\n\n    6. **View Referenced Files**:\n    \n        - Click on **Sources** below responses to see which OneDrive files were referenced.\n\n        <table>\n        <tr>\n            <td>\n              <img width=\"643\" alt=\"Screenshot 2024-07-11 at 11 22 49 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/6fe3f10d-2791-4153-88a7-2198ab3ac945\">\n            </td>\n        </tr>\n        </table>\n\n## How It Works\n\nOneDrive for Desktop syncs your OneDrive files to your computer, while LocalDocs maintains a database of these synced files for use by your local GPT4All model. As your OneDrive updates, LocalDocs will automatically detect file changes and stay up to date. LocalDocs leverages [Nomic Embedding](https://docs.nomic.ai/atlas/capabilities/embeddings) models to find semantically similar snippets from your files, enhancing the context of your interactions.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md",
    "content": "# Using GPT4All to Privately Chat with your Google Drive Data\nLocal and Private AI Chat with your Google Drive Data\n\nGoogle Drive for Desktop allows you to sync and access your Google Drive files directly on your computer. By connecting your synced directory to LocalDocs, you can start using GPT4All to privately chat with data stored in your Google Drive.\n\n## Download Google Drive for Desktop\n\n!!! note \"Download Google Drive for Desktop\"\n\n    1. **Download Google Drive for Desktop**:\n    - Visit [drive.google.com](https://drive.google.com) and sign in with your Google account.\n    - Navigate to the **Settings** (gear icon) and select **Settings** from the dropdown menu.\n    - Scroll down to **Google Drive for desktop** and click **Download**.\n\n    2. **Install Google Drive for Desktop**\n    - Run the installer file you downloaded.\n    - Follow the prompts to complete the installation process.\n\n    3. **Sign in and Sync**\n    - Once installed, sign in to Google Drive for Desktop with your Google account credentials.\n    - Choose the folders you want to sync to your computer.\n\nFor advanced help, see [Setting up Google Drive for Desktop](https://support.google.com/drive/answer/10838124?hl=en)\n## Connect Google Drive to LocalDocs\n\n!!! note \"Connect Google Drive to LocalDocs\"\n\n    1. **Install GPT4All and Open LocalDocs**:\n    \n        - Go to [nomic.ai/gpt4all](https://nomic.ai/gpt4all) to install GPT4All for your operating system.\n        \n        - Navigate to the LocalDocs feature within GPT4All to configure it to use your synced directory.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Screenshot of LocalDocs interface -->\n                <img width=\"1348\" alt=\"Screenshot 2024-07-09 at 3 15 35 PM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/d8fb2d79-2063-45d4-bcce-7299fb75b144\">\n            </td>\n        </tr>\n        </table>\n\n    2. **Add Collection**:\n    \n        - Click on **+ Add Collection** to begin linking your Google Drive folders.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Screenshot of adding collection in LocalDocs -->\n                <img width=\"1348\" alt=\"Screenshot 2024-07-09 at 3 17 24 PM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/39063615-9eb6-4c47-bde7-c9f04f9b168b\">\n            </td>\n        </tr>\n        </table>\n\n        - Name Collection\n\n\n    3. **Create Collection**:\n    \n        - Click **Create Collection** to initiate the embedding process. Progress will be displayed within the LocalDocs interface.\n\n    4. **Access Files in Chats**:\n    \n        - Load a model to chat with your files (Llama 3 Instruct performs best)\n        \n        - In your chat, open 'LocalDocs' with the button in the top-right corner to provide context from your synced Google Drive files.\n\n        <table>\n        <tr>\n            <td>\n                <!-- Screenshot of accessing LocalDocs in chats -->\n                <img width=\"1447\" alt=\"Screenshot 2024-07-09 at 3 20 53 PM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/ce68811f-9abd-451b-ac0a-fb941e185d7a\">\n            </td>\n        </tr>\n        </table>\n\n    5. **Interact With Your Drive:**\n    \n        - Use the model to interact with your files\n        \n        <table>\n        <tr>\n            <td>\n                <!-- Screenshot of interacting sources -->\n                <img width=\"662\" alt=\"Screenshot 2024-07-09 at 3 36 51 PM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/bc55bc36-e613-419d-a568-adb1cd993854\">\n            </td>\n        </tr>\n        </table>\n\n        <table>\n        <tr>\n            <td>\n              <img width=\"662\" alt=\"Screenshot 2024-07-11 at 11 34 00 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/1c0fd19a-5a22-4726-a841-d26c1bea81fc\">\n            </td>\n        </tr>\n        </table>\n    \n    6. **View Referenced Files**:\n    \n        - Click on **Sources** below LLM responses to see which Google Drive files were referenced.\n\n        <table>\n        <tr>\n            <td>  \n           <img width=\"643\" alt=\"Screenshot 2024-07-11 at 11 34 37 AM\" src=\"https://github.com/nomic-ai/gpt4all/assets/132290469/78527d30-8d24-4b4c-8311-b611a2d66fcd\">\n            </td>\n        </tr>\n        </table>\n\n## How It Works\n\nGoogle Drive for Desktop syncs your Google Drive files to your computer, while LocalDocs maintains a database of these synced files for use by your local LLM. As your Google Drive updates, LocalDocs will automatically detect file changes and get up to date. LocalDocs is powered by [Nomic Embedding](https://docs.nomic.ai/atlas/capabilities/embeddings) models which find semantically similar snippets from your files to enhance the context of your interactions.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md",
    "content": "# Using GPT4All to Privately Chat with your Microsoft Excel Spreadsheets\nLocal and Private AI Chat with your Microsoft Excel Spreadsheets\n\nMicrosoft Excel allows you to create, manage, and analyze data in spreadsheet format. By attaching your spreadsheets directly to GPT4All, you can privately chat with the AI to query and explore the data, enabling you to summarize, generate reports, and glean insights from your files—all within your conversation.\n\n<div style=\"position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden;\">\n  <iframe src=\"../../assets/gpt4all_xlsx_attachment.mp4\" style=\"position: absolute; top: 0; left: 0; width: 100%; height: 100%; border:0;\" allowfullscreen title=\"YouTube Video\"></iframe>\n</div>\n\n\n## Attach Microsoft Excel to your GPT4All Conversation\n\n!!! note \"Attach Microsoft Excel to your GPT4All Conversation\"\n\n    1. **Install GPT4All and Open **:\n\n        - Go to [nomic.ai/gpt4all](https://nomic.ai/gpt4all) to install GPT4All for your operating system.\n\n        - Navigate to the Chats view within GPT4All.\n\n        <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of Chat view -->\n                  <img width=\"1348\" alt=\"Chat view\" src=\"../../assets/chat_window.png\">\n               </td>\n            </tr>\n         </table>\n\n    2. **Example Spreadsheet **:\n\n        <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of Spreadsheet view -->\n                  <img width=\"1348\" alt=\"Spreadsheet view\" src=\"../../assets/disney_spreadsheet.png\">\n               </td>\n            </tr>\n         </table>\n\n    3. **Attach to GPT4All conversration**\n        <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of Attach view -->\n                  <img width=\"1348\" alt=\"Attach view\" src=\"../../assets/attach_spreadsheet.png\">\n               </td>\n            </tr>\n         </table>\n\n    4. **Have GPT4All Summarize and Generate a Report**\n        <table>\n            <tr>\n               <td>\n                  <!-- Screenshot of Attach view -->\n                  <img width=\"1348\" alt=\"Attach view\" src=\"../../assets/spreadsheet_chat.png\">\n               </td>\n            </tr>\n         </table>\n\n\n## How It Works\n\nGPT4All parses your attached excel spreadsheet into Markdown, a format understandable to LLMs, and adds the markdown text to the context for your LLM chat. You can view the code that converts `.xslx` to Markdown [here](https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/src/xlsxtomd.cpp) in the GPT4All github repo.\n\nFor example, the above spreadsheet titled `disney_income_stmt.xlsx` would be formatted the following way:\n\n```markdown\n## disney_income_stmt\n\n|Walt Disney Co.|||||||\n|---|---|---|---|---|---|---|\n|Consolidated Income Statement|||||||\n|||||||||\n|US$ in millions|||||||\n|12 months ended:|2023-09-30 00:00:00|2022-10-01 00:00:00|2021-10-02 00:00:00|2020-10-03 00:00:00|2019-09-28 00:00:00|2018-09-29 00:00:00|\n|Services|79562|74200|61768|59265|60542|50869|\n...\n...\n...\n```\n\n## Limitations\n\nIt is important to double-check the claims LLMs make about the spreadsheets you provide. LLMs can make mistakes about the data they are presented, particularly for the LLMs with smaller parameter counts (~8B) that fit within the memory of consumer hardware."
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/localdocs.md",
    "content": "# LocalDocs\n\nLocalDocs brings the information you have from files on-device into your LLM chats - **privately**.\n\n## Create LocalDocs\n\n!!! note \"Create LocalDocs\"\n\n    1. Click `+ Add Collection`.\n    \n    2. Name your collection and link it to a folder.\n\n        <table>\n        <tr>\n            <td>\n            <img src=\"../assets/new_docs_annotated.png\" alt=\"new GOT Docs\" style=\"width:100%\">\n            </td>\n            <td>\n            <img src=\"../assets/new_docs_annotated_filled.png\" alt=\"new GOT Docs filled out\" style=\"width:100%\">\n            </td>\n        </tr>\n        </table>\n\n    3. Click `Create Collection`. Progress for the collection is displayed on the LocalDocs page. \n\n        ![Embedding in progress](../assets/baelor.png)\n\n        You will see a green `Ready` indicator when the entire collection is ready. \n\n        Note: you can still chat with the files that are ready before the entire collection is ready.\n\n        ![Embedding complete](../assets/got_done.png)\n\n        Later on if you modify your LocalDocs settings you can rebuild your collections with your new settings.\n\n    4. In your chats, open `LocalDocs` with button in top-right corner to give your LLM context from those files.\n\n        ![LocalDocs result](../assets/syrio_snippets.png)\n\n    5. See which files were referenced by clicking `Sources` below the LLM responses.\n\n        ![Sources](../assets/open_sources.png)\n\n## How It Works\n\nA LocalDocs collection uses Nomic AI's free and fast on-device embedding models to index your folder into text snippets that each get an **embedding vector**. These vectors allow us to find snippets from your files that are semantically similar to the questions and prompts you enter in your chats. We then include those semantically similar snippets in the prompt to the LLM.\n\nTo try the embedding models yourself, we recommend using the [Nomic Python SDK](https://docs.nomic.ai/atlas/capabilities/embeddings)\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/models.md",
    "content": "# Models\n\nGPT4All is optimized to run LLMs in the 3-13B parameter range on consumer-grade hardware.\n\nLLMs are downloaded to your device so you can run them locally and privately. With our backend anyone can interact with LLMs efficiently and securely on their own hardware.\n\n## Download Models\n\n!!! note \"Download Models\"\n\n    <div style=\"text-align: center; margin-top: 20px;\">\n        <table style=\"margin-left: auto; margin-right: auto;\">\n            <tr>\n                <td style=\"text-align: right; padding-right: 10px;\">1.</td>\n                <td style=\"text-align: left;\">Click `Models` in the menu on the left (below `Chats` and above `LocalDocs`)</td>\n                <td><img src=\"../assets/models_page_icon.png\" alt=\"Models Page Icon\" style=\"width: 80px; height: auto;\"></td>\n            </tr>\n            <tr>\n                <td style=\"text-align: right; padding-right: 10px;\">2.</td>\n                <td style=\"text-align: left;\">Click `+ Add Model` to navigate to the `Explore Models` page</td>\n                <td><img src=\"../assets/add.png\" alt=\"Add Model button\" style=\"width: 100px; height: auto;\"></td>\n            </tr>\n            <tr>\n                <td style=\"text-align: right; padding-right: 10px;\">3.</td>\n                <td style=\"text-align: left;\">Search for models available online</td>\n                <td><img src=\"../assets/explore.png\" alt=\"Explore Models search\" style=\"width: 120px; height: auto;\"></td>\n            </tr>\n            <tr>\n                <td style=\"text-align: right; padding-right: 10px;\">4.</td>\n                <td style=\"text-align: left;\">Hit `Download` to save a model to your device</td>\n                <td><img src=\"../assets/download.png\" alt=\"Download Models button\" style=\"width: 120px; height: auto;\"></td>\n            </tr>\n            <tr>\n                <td style=\"text-align: right; padding-right: 10px;\">5.</td>\n                <td style=\"text-align: left;\">Once the model is downloaded you will see it in `Models`.</td>\n                <td><img src=\"../assets/installed_models.png\" alt=\"Download Models button\" style=\"width: 120px; height: auto;\"></td>\n            </tr>\n        </table>\n    </div>\n\n## Explore Models\n\nGPT4All connects you with LLMs from HuggingFace with a [`llama.cpp`](https://github.com/ggerganov/llama.cpp) backend so that they will run efficiently on your hardware. Many of these models can be identified by the file type `.gguf`.\n\n![Explore models](../assets/search_mistral.png)\n\n## Example Models\n\nMany LLMs are available at various sizes, quantizations, and licenses. \n\n- LLMs with more parameters tend to be better at coherently responding to instructions\n\n- LLMs with a smaller quantization (e.g. 4bit instead of 16bit) are much faster and less memory intensive, and tend to have slightly worse performance\n\n- Licenses vary in their terms for personal and commercial use\n\nHere are a few examples:\n\n| Model| Filesize| RAM Required| Parameters| Quantization| Developer| License| MD5 Sum (Unique Hash)|\n|------|---------|-------------|-----------|-------------|----------|--------|----------------------|\n| Llama 3 Instruct  | 4.66 GB| 8 GB| 8 Billion| q4_0| Meta| [Llama 3 License](https://llama.meta.com/llama3/license/)| c87ad09e1e4c8f9c35a5fcef52b6f1c9|\n| Nous Hermes 2 Mistral DPO| 4.11 GB| 8 GB| 7 Billion| q4_0| Mistral & Nous Research | [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)| Coa5f6b4eabd3992da4d7fb7f020f921eb|\n| Phi-3 Mini Instruct | 2.18 GB| 4 GB| 4 billion| q4_0| Microsoft| [MIT](https://opensource.org/license/mit)| f8347badde9bfc2efbe89124d78ddaf5|\n| Mini Orca (Small)| 1.98 GB| 4 GB| 3 billion| q4_0| Microsoft | [CC-BY-NC-SA-4.0](https://spdx.org/licenses/CC-BY-NC-SA-4.0)| 0e769317b90ac30d6e09486d61fefa26|\n| GPT4All Snoozy| 7.37 GB| 16 GB| 13 billion| q4_0| Nomic AI| [GPL](https://www.gnu.org/licenses/gpl-3.0.en.html)| 40388eb2f8d16bb5d08c96fdfaac6b2c|\n\n### Search Results\n\nYou can click the gear icon in the search bar to sort search results by their # of likes, # of downloads, or date of upload (all from HuggingFace).\n\n![Sort search results](../assets/search_settings.png)\n\n## Connect Model APIs\n\nYou can add your API key for remote model providers.\n\n**Note**: this does not download a model file to your computer to use securely. Instead, this way of interacting with models has your prompts leave your computer to the API provider and returns the response to your computer.\n\n![Connect APIs](../assets/add_model_gpt4.png)\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/quickstart.md",
    "content": "# GPT4All Desktop\n\nThe GPT4All Desktop Application allows you to download and run large language models (LLMs) locally & privately on your device.\n\nWith GPT4All, you can chat with models, turn your local files into information sources for models [(LocalDocs)](localdocs.md), or browse models available online to download onto your device.\n\n[Official Video Tutorial](https://www.youtube.com/watch?v=gQcZDXRVJok)\n\n## Quickstart\n\n!!! note \"Quickstart\"\n\n    1. Install GPT4All for your operating system and open the application.\n\n        <div style=\"text-align: center; margin-top: 20px;\">\n            [Download for Windows](https://gpt4all.io/installers/gpt4all-installer-win64.exe) &nbsp;&nbsp;&nbsp;&nbsp;\n            [Download for Mac](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg) &nbsp;&nbsp;&nbsp;&nbsp;\n            [Download for Linux](https://gpt4all.io/installers/gpt4all-installer-linux.run)\n        </div>\n\n    2. Hit `Start Chatting`. ![GPT4All home page](../assets/gpt4all_home.png)\n\n    3. Click `+ Add Model`.\n\n    4. Download a model. We recommend starting with Llama 3, but you can [browse more models](models.md). ![Download a model](../assets/download_llama.png)\n\n    5. Once downloaded, go to Chats (below Home and above Models in the menu on the left).  \n\n    6. Click \"Load Default Model\" (will be Llama 3 or whichever model you downloaded). \n\n        <table>\n        <tr>\n            <td>\n            <img src=\"../assets/before_first_chat.png\" alt=\"Before first chat\" style=\"width:100%\">\n            </td>\n            <td>\n            <img src=\"../assets/new_first_chat.png\" alt=\"New first chat\" style=\"width:100%\">\n            </td>\n        </tr>\n        </table>\n\n    7. Try the [example chats](chats.md) or your own prompts!\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_desktop/settings.md",
    "content": "# Settings\n\n## Application Settings\n\n!!! note \"General Application Settings\"\n\n    | Setting | Description | Default Value |\n    | --- | --- | --- |\n    | **Theme** | Color theme for the application. Options are `Light`, `Dark`, and `LegacyDark` | `Light` |\n    | **Font Size** | Font size setting for text throughout the application. Options are Small, Medium, and Large | Small |\n    | **Language and Locale** | The language and locale of that language you wish to use | System Locale |\n    | **Device** | Device that will run your models. Options are `Auto` (GPT4All chooses), `Metal` (Apple Silicon M1+), `CPU`, and `GPU` | `Auto` |\n    | **Default Model** | Choose your preferred LLM to load by default on startup| Auto |\n    | **Suggestion Mode** | Generate suggested follow up questions at the end of responses | When chatting with LocalDocs | \n    | **Download Path** | Select a destination on your device to save downloaded models | Windows: `C:\\Users\\{username}\\AppData\\Local\\nomic.ai\\GPT4All`<br><br>Mac: `/Users/{username}/Library/Application Support/nomic.ai/GPT4All/`<br><br>Linux: `/home/{username}/.local/share/nomic.ai/GPT4All` |\n    | **Enable Datalake** | Opt-in to sharing interactions with GPT4All community (**anonymous** and **optional**) | Off |\n\n!!! note \"Advanced Application Settings\"\n\n    | Setting | Description | Default Value |\n    | --- | --- | --- |\n    | **CPU Threads** | Number of concurrently running CPU threads (more can speed up responses) | 4 |\n    | **Enable System Tray** | The application will minimize to the system tray / taskbar when the window is closed | Off |\n    | **Enable Local Server** | Allow any application on your device to use GPT4All via an OpenAI-compatible GPT4All API | Off |\n    | **API Server Port** | Local HTTP port for the local API server | 4891 |\n\n## Model Settings\n\n!!! note \"Model / Character Settings\"\n\n    | Setting | Description | Default Value |\n    | --- | --- | --- |\n    | **Name** | Unique name of this model / character| set by model uploader |\n    | **Model File** | Filename (.gguf) of the model | set by model uploader |\n    | **System Message** | General instructions for the chats this model will be used for | set by model uploader |\n    | **Chat Template** | Format of user <-> assistant interactions for the chats this model will be used for | set by model uploader |\n    | **Chat Name Prompt** | Prompt used to automatically generate chat names | Describe the above conversation in seven words or less. |\n    | **Suggested FollowUp Prompt** | Prompt used to automatically generate follow up questions after a chat response | Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts. |\n\n### Clone\n\nYou can **clone** an existing model, which allows you to save a configuration of a model file with different prompt templates and sampling settings.\n\n### Sampling Settings\n\n!!! note \"Model Sampling Settings\"\n\n    | Setting             | Description                          | Default Value |\n    |----------------------------|------------------------------------------|-----------|\n    | **Context Length**         | Maximum length of input sequence in tokens        | 2048      |\n    | **Max Length**             | Maximum length of response in tokens     | 4096      |\n    | **Prompt Batch Size**      | Token batch size for parallel processing | 128      |\n    | **Temperature**            | Lower temperature gives more likely generations | 0.7       |\n    | **Top P**                  | Prevents choosing highly unlikely tokens  | 0.4       |\n    | **Top K**                  | Size of selection pool for tokens         | 40        |\n    | **Min P**                  | Minimum relative probability              | 0         |\n    | **Repeat Penalty Tokens**  | Length to apply penalty                   | 64        |\n    | **Repeat Penalty**         | Penalize repetitiveness                   | 1.18      |\n    | **GPU Layers**             | How many model layers to load into VRAM     | 32        |\n\n## LocalDocs Settings\n\n!!! note \"General LocalDocs Settings\"\n\n    | Setting | Description | Default Value |\n    | --- | --- | --- |\n    | **Allowed File Extensions** | Choose which file types will be indexed into LocalDocs collections as text snippets with embedding vectors | `.txt`, `.pdf`, `.md`, `.rst` |\n    | **Use Nomic Embed API** | Use Nomic API to create LocalDocs collections fast and off-device; [Nomic API Key](https://atlas.nomic.ai/) required | Off |\n    | **Embeddings Device** | Device that will run embedding models. Options are `Auto` (GPT4All chooses), `Metal` (Apple Silicon M1+), `CPU`, and `GPU` | `Auto` |\n    | **Show Sources** | Titles of source files retrieved by LocalDocs will be displayed directly in your chats.| On |\n\n!!! note \"Advanced LocalDocs Settings\"\n\n    Note that increasing these settings can increase the likelihood of factual responses, but may result in slower generation times.\n\n    | Setting | Description | Default Value |\n    | --- | --- | --- |\n    | **Document Snippet Size** | Number of string characters per document snippet | 512 |\n    | **Maximum Document Snippets Per Prompt** | Upper limit for the number of snippets from your files LocalDocs can retrieve for LLM context | 3 |\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_help/faq.md",
    "content": "# Frequently Asked Questions\n\n## Models\n\n### Which language models are supported?\n\nWe support models with a `llama.cpp` implementation which have been uploaded to [HuggingFace](https://huggingface.co/).\n\n### Which embedding models are supported?\n\nWe support SBert and Nomic Embed Text v1 & v1.5.\n\n## Software\n\n### What software do I need?\n\nAll you need is to [install GPT4all](../index.md) onto you Windows, Mac, or Linux computer.\n\n### Which SDK languages are supported?\n\nOur SDK is in Python for usability, but these are light bindings around [`llama.cpp`](https://github.com/ggerganov/llama.cpp) implementations that we contribute to for efficiency and accessibility on everyday computers.\n\n### Is there an API?\n\nYes, you can run your model in server-mode with our [OpenAI-compatible API](https://platform.openai.com/docs/api-reference/completions), which you can configure in [settings](../gpt4all_desktop/settings.md#application-settings)\n\n### Can I monitor a GPT4All deployment?\n\nYes, GPT4All [integrates](../gpt4all_python/monitoring.md) with [OpenLIT](https://github.com/openlit/openlit) so you can deploy LLMs with user interactions and hardware usage automatically monitored for full observability.\n\n### Is there a command line interface (CLI)?\n\n[Yes](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/cli), we have a lightweight use of the Python client as a CLI. We welcome further contributions!\n\n## Hardware\n\n### What hardware do I need?\n\nGPT4All can run on CPU, Metal (Apple Silicon M1+), and GPU.\n\n### What are the system requirements?\n\nYour CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) and you need enough RAM to load a model into memory.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_help/troubleshooting.md",
    "content": "# Troubleshooting\n\n## Error Loading Models\n\nIt is possible you are trying to load a model from HuggingFace whose weights are not compatible with our [backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings).\n\nTry downloading one of the officially supported models listed on the main models page in the application. If the problem persists, please share your experience on our [Discord](https://discord.com/channels/1076964370942267462).\n\n## Bad Responses \n\nTry the [example chats](../gpt4all_desktop/chats.md) to double check that your system is implementing models correctly.\n\n### Responses Incoherent\n\nIf you are seeing something **not at all** resembling the [example chats](../gpt4all_desktop/chats.md) - for example, if the responses you are seeing look nonsensical - try [downloading a different model](../gpt4all_desktop/models.md), and please share your experience on our [Discord](https://discord.com/channels/1076964370942267462).\n\n### Responses Incorrect\n\nLLMs can be unreliable. It's helpful to know what their training data was - they are less likely to be correct when asking about data they were not trained on unless you give the necessary information in the prompt as **context**.\n\nGiving LLMs additional context, like chatting using [LocalDocs](../gpt4all_desktop/localdocs.md), can help merge the language model's ability to understand text with the files that you trust to contain the information you need. \n\nIncluding information in a prompt is not a guarantee that it will be used correctly, but the more clear and concise your prompts, and the more relevant your prompts are to your files, the better.\n\n### LocalDocs Issues\n\nOccasionally a model - particularly a smaller or overall weaker LLM - may not use the relevant text snippets from the files that were referenced via LocalDocs. If you are seeing this, it can help to use phrases like \"in the docs\" or \"from the provided files\" when prompting your model.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_python/home.md",
    "content": "# GPT4All Python SDK\n\n## Installation\n\nTo get started, pip-install the `gpt4all` package into your python environment.\n\n```bash\npip install gpt4all\n```\n\nWe recommend installing `gpt4all` into its own virtual environment using `venv` or `conda`\n\n## Load LLM\n\nModels are loaded by name via the `GPT4All` class. If it's your first time loading a model, it will be downloaded to your device and saved so it can be quickly reloaded next time you create a `GPT4All` model with the same name.\n\n!!! note \"Load LLM\"\n\n    ```python\n    from gpt4all import GPT4All\n    model = GPT4All(\"Meta-Llama-3-8B-Instruct.Q4_0.gguf\") # downloads / loads a 4.66GB LLM\n    with model.chat_session():\n        print(model.generate(\"How can I run LLMs efficiently on my laptop?\", max_tokens=1024))\n    ```\n\n| `GPT4All` model name| Filesize| RAM Required| Parameters| Quantization| Developer| License| MD5 Sum (Unique Hash)|\n|------|---------|-------|-------|-----------|----------|--------|----------------------|\n|  `Meta-Llama-3-8B-Instruct.Q4_0.gguf`| 4.66 GB| 8 GB| 8 Billion| q4_0| Meta| [Llama 3 License](https://llama.meta.com/llama3/license/)| c87ad09e1e4c8f9c35a5fcef52b6f1c9|\n| `Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf`| 4.11 GB| 8 GB| 7 Billion| q4_0| Mistral & Nous Research | [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)| Coa5f6b4eabd3992da4d7fb7f020f921eb|\n| `Phi-3-mini-4k-instruct.Q4_0.gguf` | 2.18 GB| 4 GB| 3.8 billion| q4_0| Microsoft| [MIT](https://opensource.org/license/mit)| f8347badde9bfc2efbe89124d78ddaf5|\n| `orca-mini-3b-gguf2-q4_0.gguf`| 1.98 GB| 4 GB| 3 billion| q4_0| Microsoft | [CC-BY-NC-SA-4.0](https://spdx.org/licenses/CC-BY-NC-SA-4.0)| 0e769317b90ac30d6e09486d61fefa26|\n| `gpt4all-13b-snoozy-q4_0.gguf`| 7.37 GB| 16 GB| 13 billion| q4_0| Nomic AI| [GPL](https://www.gnu.org/licenses/gpl-3.0.en.html)| 40388eb2f8d16bb5d08c96fdfaac6b2c|\n\n\n## Chat Session Generation\n\nMost of the language models you will be able to access from HuggingFace have been trained as assistants. This guides language models to not just answer with relevant text, but *helpful* text.\n\nIf you want your LLM's responses to be helpful in the typical sense, we recommend you apply the chat templates the models were finetuned with. Information about specific prompt templates is typically available on the official HuggingFace page for the model.\n\n!!! note \"Example LLM Chat Session Generation\"\n\n    === \"Code\"\n\n        Load `Llama 3` and enter the following prompt in a chat session:\n\n        ```python\n        from gpt4all import GPT4All\n        model = GPT4All(\"Meta-Llama-3-8B-Instruct.Q4_0.gguf\")\n        with model.chat_session():\n            print(model.generate(\"quadratic formula\"))\n        ```\n\n    === \"Output\"\n   \n        With the default sampling settings, you should see something resembling the following:\n        ```\n        The quadratic formula!\n\n        The quadratic formula is a mathematical formula that provides the solutions to a quadratic equation of the form:\n\n        ax^2 + bx + c = 0\n\n        where a, b, and c are constants. The formula is:\n\n        x = (-b ± √(b^2 - 4ac)) / 2a\n\n        Let's break it down:\n\n        * x is the variable we're trying to solve for.\n        * a, b, and c are the coefficients of the quadratic equation.\n        * ± means \"plus or minus\".\n        * √ denotes the square root.\n\n        To use the formula, simply plug in the values of a, b, and c into the expression above. The resulting value(s) will be the solutions to the original quadratic equation!\n\n        For example, let's say we have the quadratic equation:\n\n        x^2 + 5x + 6 = 0\n\n        We can plug these values into the formula as follows:\n\n        a = 1\n        b = 5\n        ```\n\n## Direct Generation\n\nDirectly calling `model.generate()` prompts the model without applying any templates. \n\nNote: this can result in responses that are less like helpful responses and more like mirroring the tone of your prompt. In general, a language model outside of a chat session is less of a helpful assistant and more of a lens into the distribution of the model's training data.\n\nAs an example, see how the model's response changes when we give the same prompt as above without applying a chat session:\n\n!!! note \"Example LLM Direct Generation\"\n\n    === \"Code\"\n    \n        Load `Llama 3` and enter the following prompt:\n\n        ```python\n        from gpt4all import GPT4All\n        model = GPT4All(\"Meta-Llama-3-8B-Instruct.Q4_0.gguf\")\n        print(model.generate(\"quadratic formula\"))\n        ```\n    === \"Output\"\n\n        With the default sampling settings, you should see something resembling the following:\n        ```\n        . The equation is in the form of a + bx = c, where a and b are constants.\n        The solution to this problem involves using the quadratic formula which states that for any quadratic equation ax^2+bx+c=0, its solutions can be found by:\n        x = (-b ± √(b^2-4ac)) / 2a\n        In your case, since you have a + bx = c, we need to rewrite it in the form of ax^2+bx+c=0. To do this, subtract both sides from c, so that:\n        c - (a + bx) = 0\n        Now, combine like terms on the left side and simplify:\n        ax^2 + (-b)x + (c-a) = 0\\n\\nSo now we have a quadratic equation in standard form: ax^2+bx+c=0. We can use this to find its solutions using the quadratic formula:\n        \n        x = ((-b ± √((-b)^2\n        ```\n\nWhy did it respond differently? Because language models, before being fine-tuned as assistants, are trained to be more like a data mimic than a helpful assistant. Therefore our responses ends up more like a typical continuation of math-style text rather than a helpful answer in dialog. \n\n## Embeddings\n\nNomic trains and open-sources free embedding models that will run very fast on your hardware.\n\nThe easiest way to run the text embedding model locally uses the [`nomic`](https://github.com/nomic-ai/nomic) python library to interface with our fast [C/C++ implementations](ref.md#gpt4all.gpt4all.Embed4All).\n\n!!! note \"Example Embeddings Generation\"\n\n    === \"Code\"\n\n        Importing `embed` from the [`nomic`](https://github.com/nomic-ai/nomic) library, you can call `embed.text()` with `inference_mode=\"local\"`. This downloads an embedding model and saves it for later.\n\n        ```python\n        from nomic import embed\n        embeddings = embed.text([\"String 1\", \"String 2\"], inference_mode=\"local\")['embeddings']\n        print(\"Number of embeddings created:\", len(embeddings))\n        print(\"Number of dimensions per embedding:\", len(embeddings[0]))\n        ```\n    \n    === \"Output\"\n\n        ```\n        Number of embeddings created: 2\n        Number of dimensions per embedding: 768\n        ```\n\n![Nomic embed text local inference](../assets/local_embed.gif)\n\nTo learn more about making embeddings locally with `nomic`, visit our [embeddings guide](https://docs.nomic.ai/atlas/guides/embeddings#local-inference).\n\nThe following embedding models can be used within the application and with the `Embed4All` class from the `gpt4all` Python library. The default context length as GGUF files is 2048 but can be [extended](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5-GGUF#description).\n\n| Name| Using with `nomic`| `Embed4All` model name| Context Length| # Embedding Dimensions| File Size|\n|--------------------|-|------------------------------------------------------|---------------:|-----------------:|----------:|\n| [Nomic Embed v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1-GGUF)   | ```embed.text(strings, model=\"nomic-embed-text-v1\", inference_mode=\"local\")```| ```Embed4All(\"nomic-embed-text-v1.f16.gguf\")```|           2048 |              768 |   262 MiB |\n| [Nomic Embed v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5-GGUF) | ```embed.text(strings, model=\"nomic-embed-text-v1.5\", inference_mode=\"local\")```| ```Embed4All(\"nomic-embed-text-v1.5.f16.gguf\")``` |           2048| 64-768 |   262 MiB |\n| [SBert](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)| n/a| ```Embed4All(\"all-MiniLM-L6-v2.gguf2.f16.gguf\")```|            512 |              384 |    44 MiB |\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_python/monitoring.md",
    "content": "# GPT4All Monitoring\n\nGPT4All integrates with [OpenLIT](https://github.com/openlit/openlit) OpenTelemetry auto-instrumentation to perform real-time monitoring of your LLM application and GPU hardware.\n\nMonitoring can enhance your GPT4All deployment with auto-generated traces and metrics for\n\n- **Performance Optimization:** Analyze latency, cost and token usage to ensure your LLM application runs efficiently, identifying and resolving performance bottlenecks swiftly.\n  \n- **User Interaction Insights:** Capture each prompt and response to understand user behavior and usage patterns better, improving user experience and engagement.\n  \n- **Detailed GPU Metrics:** Monitor essential GPU parameters such as utilization, memory consumption, temperature, and power usage to maintain optimal hardware performance and avert potential issues.\n\n## Setup Monitoring\n\n!!! note \"Setup Monitoring\"\n\n    With [OpenLIT](https://github.com/openlit/openlit), you can automatically monitor traces and metrics for your LLM deployment:\n\n    ```shell\n    pip install openlit\n    ```\n\n    ```python\n    from gpt4all import GPT4All\n    import openlit\n\n    openlit.init()  # start\n    # openlit.init(collect_gpu_stats=True)  # Optional: To configure GPU monitoring\n\n    model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n\n    # Start a chat session and send queries\n    with model.chat_session():\n        response1 = model.generate(prompt='hello', temp=0)\n        response2 = model.generate(prompt='write me a short poem', temp=0)\n        response3 = model.generate(prompt='thank you', temp=0)\n\n        print(model.current_chat_session)\n    ```\n\n## Visualization\n\n### OpenLIT UI\n\nConnect to OpenLIT's UI to start exploring the collected LLM performance metrics and traces. Visit the OpenLIT [Quickstart Guide](https://docs.openlit.io/latest/quickstart) for step-by-step details.\n\n### Grafana, DataDog, & Other Integrations\n\nYou can also send the data collected by OpenLIT to popular monitoring tools like Grafana and DataDog. For detailed instructions on setting up these connections, please refer to the OpenLIT [Connections Guide](https://docs.openlit.io/latest/connections/intro).\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/gpt4all_python/ref.md",
    "content": "# GPT4All Python SDK Reference\n::: gpt4all.gpt4all.GPT4All\n\n::: gpt4all.gpt4all.Embed4All"
  },
  {
    "path": "gpt4all-bindings/python/docs/index.md",
    "content": "# GPT4All Documentation\n\nGPT4All runs large language models (LLMs) privately on everyday desktops & laptops. \n\nNo API calls or GPUs required - you can just download the application and [get started](gpt4all_desktop/quickstart.md#quickstart).\n\n!!! note \"Desktop Application\"\n    GPT4All runs LLMs as an application on your computer. Nomic's embedding models can bring information from your local documents and files into your chats. It's fast, on-device, and completely **private**.\n\n    <div style=\"text-align: center; margin-top: 20px;\">\n        [Download for Windows](https://gpt4all.io/installers/gpt4all-installer-win64.exe) &nbsp;&nbsp;&nbsp;&nbsp;\n        [Download for Mac](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg) &nbsp;&nbsp;&nbsp;&nbsp;\n        [Download for Linux](https://gpt4all.io/installers/gpt4all-installer-linux.run)\n    </div>\n\n!!! note \"Python SDK\"\n    Use GPT4All in Python to program with LLMs implemented with the [`llama.cpp`](https://github.com/ggerganov/llama.cpp) backend and [Nomic's C backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-backend). Nomic contributes to open source software like [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to make LLMs accessible and efficient **for all**.\n\n    ```bash\n    pip install gpt4all\n    ```\n\n    ```python\n    from gpt4all import GPT4All\n    model = GPT4All(\"Meta-Llama-3-8B-Instruct.Q4_0.gguf\") # downloads / loads a 4.66GB LLM\n    with model.chat_session():\n        print(model.generate(\"How can I run LLMs efficiently on my laptop?\", max_tokens=1024))\n    ```\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_chat.md",
    "content": "# GPT4All Chat UI\n\nThe [GPT4All Chat Client](https://gpt4all.io) lets you easily interact with any local large language model.\n\nIt is optimized to run 7-13B parameter LLMs on the CPU's of any computer running OSX/Windows/Linux.\n\n## Running LLMs on CPU\nThe GPT4All Chat UI supports models from all newer versions of `llama.cpp` with `GGUF` models including the `Mistral`, `LLaMA2`, `LLaMA`, `OpenLLaMa`, `Falcon`, `MPT`, `Replit`, `Starcoder`, and `Bert` architectures\n\nGPT4All maintains an official list of recommended models located in [models3.json](https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/metadata/models3.json). You can pull request new models to it and if accepted they will show up in the official download dialog.\n\n#### Sideloading any GGUF model\nIf a model is compatible with the gpt4all-backend, you can sideload it into GPT4All Chat by:\n\n1. Downloading your model in GGUF format. It should be a 3-8 GB file similar to the ones [here](https://huggingface.co/TheBloke/Orca-2-7B-GGUF/tree/main).\n2. Identifying your GPT4All model downloads folder. This is the path listed at the bottom of the downloads dialog.\n3. Placing your downloaded model inside GPT4All's model downloads folder.\n4. Restarting your GPT4ALL app. Your model should appear in the model selection list.\n\n## Plugins\nGPT4All Chat Plugins allow you to expand the capabilities of Local LLMs.\n\n### LocalDocs Plugin (Chat With Your Data)\nLocalDocs is a GPT4All feature that allows you to chat with your local files and data.\nIt allows you to utilize powerful local LLMs to chat with private data without any data leaving your computer or server.\nWhen using LocalDocs, your LLM will cite the sources that most likely contributed to a given output. Note, even an LLM equipped with LocalDocs can hallucinate. The LocalDocs plugin will utilize your documents to help answer prompts and you will see references appear below the response.\n\n<p align=\"center\">\n  <img width=\"70%\" src=\"https://github.com/nomic-ai/gpt4all/assets/10168/fe5dd3c0-b3cc-4701-98d3-0280dfbcf26f\">\n</p>\n\n#### Enabling LocalDocs\n1. Install the latest version of GPT4All Chat from [GPT4All Website](https://gpt4all.io).\n2. Go to `Settings > LocalDocs tab`.\n3. Download the SBert model\n4. Configure a collection (folder) on your computer that contains the files your LLM should have access to. You can alter the contents of the folder/directory at anytime. As you\nadd more files to your collection, your LLM will dynamically be able to access them.\n5. Spin up a chat session with any LLM (including external ones like ChatGPT but warning data will leave your machine!)\n6. At the top right, click the database icon and select which collection you want your LLM to know about during your chat session.\n7. You can begin searching with your localdocs even before the collection has completed indexing, but note the search will not include those parts of the collection yet to be indexed.\n\n#### LocalDocs Capabilities\nLocalDocs allows your LLM to have context about the contents of your documentation collection.\n\nLocalDocs **can**:\n\n- Query your documents based upon your prompt / question. Your documents will be searched for snippets that can be used to provide context for an answer. The most relevant snippets will be inserted into your prompts context, but it will be up to the underlying model to decide how best to use the provided context.\n\nLocalDocs **cannot**:\n\n- Answer general metadata queries (e.g. `What documents do you know about?`, `Tell me about my documents`)\n- Summarize a single document (e.g. `Summarize my magna carta PDF.`)\n\nSee the Troubleshooting section for common issues.\n\n#### How LocalDocs Works\nLocalDocs works by maintaining an index of all data in the directory your collection is linked to. This index\nconsists of small chunks of each document that the LLM can receive as additional input when you ask it a question.\nThe general technique this plugin uses is called [Retrieval Augmented Generation](https://arxiv.org/abs/2005.11401).\n\nThese document chunks help your LLM respond to queries with knowledge about the contents of your data.\nThe number of chunks and the size of each chunk can be configured in the LocalDocs plugin settings tab.\n\nLocalDocs currently supports plain text files (`.txt`, `.md`, and `.rst`) and PDF files (`.pdf`).\n\n#### Troubleshooting and FAQ\n*My LocalDocs plugin isn't using my documents*\n\n- Make sure LocalDocs is enabled for your chat session (the DB icon on the top-right should have a border)\n- If your document collection is large, wait 1-2 minutes for it to finish indexing.\n\n\n#### LocalDocs Roadmap\n- Customize model fine-tuned with retrieval in the loop.\n- Plugin compatibility with chat client server mode.\n\n## Server Mode\n\nGPT4All Chat comes with a built-in server mode allowing you to programmatically interact\nwith any supported local LLM through a *very familiar* HTTP API. You can find the API documentation [here](https://platform.openai.com/docs/api-reference/completions).\n\nEnabling server mode in the chat client will spin-up on an HTTP server running on `localhost` port\n`4891` (the reverse of 1984). You can enable the webserver via `GPT4All Chat > Settings > Enable web server`.\n\nBegin using local LLMs in your AI powered apps by changing a single line of code: the base path for requests.\n\n```python\nimport openai\n\nopenai.api_base = \"http://localhost:4891/v1\"\n#openai.api_base = \"https://api.openai.com/v1\"\n\nopenai.api_key = \"not needed for a local LLM\"\n\n# Set up the prompt and other parameters for the API request\nprompt = \"Who is Michael Jordan?\"\n\n# model = \"gpt-3.5-turbo\"\n#model = \"mpt-7b-chat\"\nmodel = \"gpt4all-j-v1.3-groovy\"\n\n# Make the API request\nresponse = openai.Completion.create(\n    model=model,\n    prompt=prompt,\n    max_tokens=50,\n    temperature=0.28,\n    top_p=0.95,\n    n=1,\n    echo=True,\n    stream=False\n)\n\n# Print the generated completion\nprint(response)\n```\n\nwhich gives the following response\n\n```json\n{\n  \"choices\": [\n    {\n      \"finish_reason\": \"stop\",\n      \"index\": 0,\n      \"logprobs\": null,\n      \"text\": \"Who is Michael Jordan?\\nMichael Jordan is a former professional basketball player who played for the Chicago Bulls in the NBA. He was born on December 30, 1963, and retired from playing basketball in 1998.\"\n    }\n  ],\n  \"created\": 1684260896,\n  \"id\": \"foobarbaz\",\n  \"model\": \"gpt4all-j-v1.3-groovy\",\n  \"object\": \"text_completion\",\n  \"usage\": {\n    \"completion_tokens\": 35,\n    \"prompt_tokens\": 39,\n    \"total_tokens\": 74\n  }\n}\n```\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_cli.md",
    "content": "# GPT4All CLI\r\n\r\nThe GPT4All command-line interface (CLI) is a Python script which is built on top of the\r\n[Python bindings][docs-bindings-python] ([repository][repo-bindings-python]) and the [typer]\r\npackage. The source code, README, and local build instructions can be found\r\n[here][repo-bindings-cli].\r\n\r\n[docs-bindings-python]: gpt4all_python.md\r\n[repo-bindings-python]: https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python\r\n[repo-bindings-cli]: https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/cli\r\n[typer]: https://typer.tiangolo.com/\r\n\r\n## Installation\r\n### The Short Version\r\n\r\nThe CLI is a Python script called [app.py]. If you're already familiar with Python best practices,\r\nthe short version is to [download app.py][app.py-download] into a folder of your choice, install\r\nthe two required dependencies with some variant of:\r\n```shell\r\npip install gpt4all typer\r\n```\r\n\r\nThen run it with a variant of:\r\n```shell\r\npython app.py repl\r\n```\r\nIn case you're wondering, _REPL_ is an acronym for [read-eval-print loop][wiki-repl].\r\n\r\n[app.py]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/cli/app.py\r\n[app.py-download]: https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-bindings/cli/app.py\r\n[wiki-repl]: https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop\r\n\r\n### Recommendations & The Long Version\r\n\r\nEspecially if you have several applications/libraries which depend on Python, to avoid descending\r\ninto dependency hell at some point, you should:\r\n- Consider to always install into some kind of [_virtual environment_][venv].\r\n- On a _Unix-like_ system, don't use `sudo` for anything other than packages provided by the system\r\n  package manager, i.e. never with `pip`.\r\n\r\n[venv]: https://docs.python.org/3/library/venv.html\r\n\r\nThere are several ways and tools available to do this, so below are descriptions on how to install\r\nwith a _virtual environment_ (recommended) or a user installation on all three main platforms.\r\n\r\nDifferent platforms can have slightly different ways to start the Python interpreter itself.\r\n\r\nNote: _Typer_ has an optional dependency for more fanciful output. If you want that, replace `typer`\r\nwith `typer[all]` in the pip-install instructions below.\r\n\r\n#### Virtual Environment Installation\r\nYou can name your _virtual environment_ folder for the CLI whatever you like. In the following,\r\n`gpt4all-cli` is used throughout.\r\n\r\n##### macOS\r\n\r\nThere are at least three ways to have a Python installation on _macOS_, and possibly not all of them\r\nprovide a full installation of Python and its tools. When in doubt, try the following:\r\n```shell\r\npython3 -m venv --help\r\npython3 -m pip --help\r\n```\r\nBoth should print the help for the `venv` and `pip` commands, respectively. If they don't, consult\r\nthe documentation of your Python installation on how to enable them, or download a separate Python\r\nvariant, for example try an [unified installer package from python.org][python.org-downloads].\r\n\r\n[python.org-downloads]: https://www.python.org/downloads/\r\n\r\nOnce ready, do:\r\n```shell\r\npython3 -m venv gpt4all-cli\r\n. gpt4all-cli/bin/activate\r\npython3 -m pip install gpt4all typer\r\n```\r\n\r\n##### Windows\r\n\r\nDownload the [official installer from python.org][python.org-downloads] if Python isn't already\r\npresent on your system.\r\n\r\nA _Windows_ installation should already provide all the components for a _virtual environment_. Run:\r\n```shell\r\npy -3 -m venv gpt4all-cli\r\ngpt4all-cli\\Scripts\\activate\r\npy -m pip install gpt4all typer\r\n```\r\n\r\n##### Linux\r\n\r\nOn Linux, a Python installation is often split into several packages and not all are necessarily\r\ninstalled by default. For example, on Debian/Ubuntu and derived distros, you will want to ensure\r\ntheir presence with the following:\r\n```shell\r\nsudo apt-get install python3-venv python3-pip\r\n```\r\nThe next steps are similar to the other platforms:\r\n```shell\r\npython3 -m venv gpt4all-cli\r\n. gpt4all-cli/bin/activate\r\npython3 -m pip install gpt4all typer\r\n```\r\nOn other distros, the situation might be different. Especially the package names can vary a lot.\r\nYou'll have to look it up in the documentation, software directory, or package search.\r\n\r\n#### User Installation\r\n##### macOS\r\n\r\nThere are at least three ways to have a Python installation on _macOS_, and possibly not all of them\r\nprovide a full installation of Python and its tools. When in doubt, try the following:\r\n```shell\r\npython3 -m pip --help\r\n```\r\nThat should print the help for the `pip` command. If it doesn't, consult the documentation of your\r\nPython installation on how to enable them, or download a separate Python variant, for example try an\r\n[unified installer package from python.org][python.org-downloads].\r\n\r\nOnce ready, do:\r\n```shell\r\npython3 -m pip install --user --upgrade gpt4all typer\r\n```\r\n\r\n##### Windows\r\n\r\nDownload the [official installer from python.org][python.org-downloads] if Python isn't already\r\npresent on your system. It includes all the necessary components. Run:\r\n```shell\r\npy -3 -m pip install --user --upgrade gpt4all typer\r\n```\r\n\r\n##### Linux\r\n\r\nOn Linux, a Python installation is often split into several packages and not all are necessarily\r\ninstalled by default. For example, on Debian/Ubuntu and derived distros, you will want to ensure\r\ntheir presence with the following:\r\n```shell\r\nsudo apt-get install python3-pip\r\n```\r\nThe next steps are similar to the other platforms:\r\n```shell\r\npython3 -m pip install --user --upgrade gpt4all typer\r\n```\r\nOn other distros, the situation might be different. Especially the package names can vary a lot.\r\nYou'll have to look it up in the documentation, software directory, or package search.\r\n\r\n## Running the CLI\r\n\r\nThe CLI is a self-contained script called [app.py]. As such, you can [download][app.py-download]\r\nand save it anywhere you like, as long as the Python interpreter has access to the mentioned\r\ndependencies.\r\n\r\nNote: different platforms can have slightly different ways to start Python. Whereas below the\r\ninterpreter command is written as `python` you typically want to type instead:\r\n- On _Unix-like_ systems: `python3`\r\n- On _Windows_: `py -3`\r\n\r\nThe simplest way to start the CLI is:\r\n```shell\r\npython app.py repl\r\n```\r\nThis automatically selects the [groovy] model and downloads it into the `.cache/gpt4all/` folder\r\nof your home directory, if not already present.\r\n\r\n[groovy]: https://huggingface.co/nomic-ai/gpt4all-j#model-details\r\n\r\nIf you want to use a different model, you can do so with the `-m`/`--model` parameter. If only a\r\nmodel file name is provided, it will again check in `.cache/gpt4all/` and might start downloading.\r\nIf instead given a path to an existing model, the command could for example look like this:\r\n```shell\r\npython app.py repl --model /home/user/my-gpt4all-models/gpt4all-13b-snoozy-q4_0.gguf\r\n```\r\n\r\nWhen you're done and want to end a session, simply type `/exit`.\r\n\r\nTo get help and information on all the available commands and options on the command-line, run:\r\n```shell\r\npython app.py --help\r\n```\r\nAnd while inside the running _REPL_, write `/help`.\r\n\r\nNote that if you've installed the required packages into a _virtual environment_, you don't need\r\nto activate that every time you want to run the CLI. Instead, you can just start it with the Python\r\ninterpreter in the folder `gpt4all-cli/bin/` (_Unix-like_) or `gpt4all-cli/Script/` (_Windows_).\r\n\r\nThat also makes it easy to set an alias e.g. in [Bash][bash-aliases] or [PowerShell][posh-aliases]:\r\n- Bash: `alias gpt4all=\"'/full/path/to/gpt4all-cli/bin/python' '/full/path/to/app.py' repl\"`\r\n- PowerShell:\r\n  ```posh\r\n  Function GPT4All-Venv-CLI {\"C:\\full\\path\\to\\gpt4all-cli\\Scripts\\python.exe\" \"C:\\full\\path\\to\\app.py\" repl}\r\n  Set-Alias -Name gpt4all -Value GPT4All-Venv-CLI\r\n  ```\r\n\r\nDon't forget to save these in the start-up file of your shell.\r\n\r\n[bash-aliases]: https://www.gnu.org/software/bash/manual/html_node/Aliases.html\r\n[posh-aliases]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/set-alias\r\n\r\nFinally, if on _Windows_ you see a box instead of an arrow `⇢` as the prompt character, you should\r\nchange the console font to one which offers better Unicode support.\r\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_faq.md",
    "content": "# GPT4All FAQ\n\n## What models are supported by the GPT4All ecosystem?\n\nCurrently, there are six different model architectures that are supported:\n\n1. GPT-J - Based off of the GPT-J architecture with examples found [here](https://huggingface.co/EleutherAI/gpt-j-6b)\n2. LLaMA - Based off of the LLaMA architecture with examples found [here](https://huggingface.co/models?sort=downloads&search=llama)\n3. MPT - Based off of Mosaic ML's MPT architecture with examples found [here](https://huggingface.co/mosaicml/mpt-7b)\n4. Replit - Based off of Replit Inc.'s Replit architecture with examples found [here](https://huggingface.co/replit/replit-code-v1-3b)\n5. Falcon - Based off of TII's Falcon architecture with examples found [here](https://huggingface.co/tiiuae/falcon-40b)\n6. StarCoder - Based off of BigCode's StarCoder architecture with examples found [here](https://huggingface.co/bigcode/starcoder)\n\n## Why so many different architectures? What differentiates them?\n\nOne of the major differences is license. Currently, the LLaMA based models are subject to a non-commercial license, whereas the GPTJ and MPT base\nmodels allow commercial usage. However, its successor [Llama 2 is commercially licensable](https://ai.meta.com/llama/license/), too. In the early\nadvent of the recent explosion of activity in open source local models, the LLaMA models have generally been seen as performing better, but that is\nchanging quickly. Every week - even every day! - new models are released with some of the GPTJ and MPT models competitive in performance/quality with\nLLaMA. What's more, there are some very nice architectural innovations with the MPT models that could lead to new performance/quality gains.\n\n## How does GPT4All make these models available for CPU inference?\n\nBy leveraging the ggml library written by Georgi Gerganov and a growing community of developers. There are currently multiple different versions of\nthis library. The original GitHub repo can be found [here](https://github.com/ggerganov/ggml), but the developer of the library has also created a\nLLaMA based version [here](https://github.com/ggerganov/llama.cpp). Currently, this backend is using the latter as a submodule.\n\n## Does that mean GPT4All is compatible with all llama.cpp models and vice versa?\n\nYes!\n\nThe upstream [llama.cpp](https://github.com/ggerganov/llama.cpp) project has introduced several [compatibility breaking] quantization methods recently.\nThis is a breaking change that renders all previous models (including the ones that GPT4All uses) inoperative with newer versions of llama.cpp since\nthat change.\n\nFortunately, we have engineered a submoduling system allowing us to dynamically load different versions of the underlying library so that\nGPT4All just works.\n\n[compatibility breaking]: https://github.com/ggerganov/llama.cpp/commit/b9fd7eee57df101d4a3e3eabc9fd6c2cb13c9ca1\n\n## What are the system requirements?\n\nYour CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) and you need enough RAM to load a model into memory.\n\n## What about GPU inference?\n\nIn newer versions of llama.cpp, there has been some added support for NVIDIA GPU's for inference. We're investigating how to incorporate this into our downloadable installers.\n\n## Ok, so bottom line... how do I make my model on Hugging Face compatible with GPT4All ecosystem right now?\n\n1. Check to make sure the Hugging Face model is available in one of our three supported architectures\n2. If it is, then you can use the conversion script inside of our pinned llama.cpp submodule for GPTJ and LLaMA based models\n3. Or if your model is an MPT model you can use the conversion script located directly in this backend directory under the scripts subdirectory \n\n## Language Bindings\n\n#### There's a problem with the download\n\nSome bindings can download a model, if allowed to do so. For example, in Python or TypeScript if `allow_download=True`\nor `allowDownload=true` (default), a model is automatically downloaded into `.cache/gpt4all/` in the user's home folder,\nunless it already exists.\n\nIn case of connection issues or errors during the download, you might want to manually verify the model file's MD5\nchecksum by comparing it with the one listed in [models3.json].\n\nAs an alternative to the basic downloader built into the bindings, you can choose to download from the \n<https://gpt4all.io/> website instead. Scroll down to 'Model Explorer' and pick your preferred model.\n\n[models3.json]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/metadata/models3.json\n\n#### I need the chat GUI and bindings to behave the same\n\nThe chat GUI and bindings are based on the same backend. You can make them behave the same way by following these steps:\n\n- First of all, ensure that all parameters in the chat GUI settings match those passed to the generating API, e.g.:\n\n    === \"Python\"\n        ``` py\n        from gpt4all import GPT4All\n        model = GPT4All(...)\n        model.generate(\"prompt text\", temp=0, ...)  # adjust parameters\n        ```\n    === \"TypeScript\"\n        ``` ts\n        import { createCompletion, loadModel } from '../src/gpt4all.js'\n        const ll = await loadModel(...);\n        const messages = ...\n        const re = await createCompletion(ll, messages, { temp: 0, ... });  // adjust parameters\n        ```\n\n- To make comparing the output easier, set _Temperature_ in both to 0 for now. This will make the output deterministic.\n\n- Next you'll have to compare the templates, adjusting them as necessary, based on how you're using the bindings.\n    - Specifically, in Python:\n        - With simple `generate()` calls, the input has to be surrounded with system and prompt templates.\n        - When using a chat session, it depends on whether the bindings are allowed to download [models3.json]. If yes,\n          and in the chat GUI the default templates are used, it'll be handled automatically. If no, use\n          `chat_session()` template parameters to customize them.\n\n- Once you're done, remember to reset _Temperature_ to its previous value in both chat GUI and your custom code.\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_monitoring.md",
    "content": "# Monitoring\n\nLeverage OpenTelemetry to perform real-time monitoring of your LLM application and GPUs using [OpenLIT](https://github.com/openlit/openlit). This tool helps you easily collect data on user interactions, performance metrics, along with GPU Performance metrics, which can assist in enhancing the functionality and dependability of your GPT4All based LLM application.\n\n## How it works?\n\nOpenLIT adds automatic OTel instrumentation to the GPT4All SDK. It covers the `generate` and `embedding` functions, helping to track LLM usage by gathering inputs and outputs. This allows users to monitor and evaluate the performance and behavior of their LLM application in different environments. OpenLIT also provides OTel auto-instrumentation for monitoring GPU metrics like utilization, temperature, power usage, and memory usage.\n\nAdditionally, you have the flexibility to view and analyze the generated traces and metrics either in the OpenLIT UI or by exporting them to widely used observability tools like Grafana and DataDog for more comprehensive analysis and visualization.\n\n## Getting Started\n\nHere’s a straightforward guide to help you set up and start monitoring your application:\n\n### 1. Install the OpenLIT SDK\nOpen your terminal and run:\n\n```shell\npip install openlit\n```\n\n### 2. Setup Monitoring for your Application\nIn your application, initiate OpenLIT as outlined below:\n\n```python\nfrom gpt4all import GPT4All\nimport openlit\n\nopenlit.init()  # Initialize OpenLIT monitoring\n\nmodel = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n\n# Start a chat session and send queries\nwith model.chat_session():\n    response1 = model.generate(prompt='hello', temp=0)\n    response2 = model.generate(prompt='write me a short poem', temp=0)\n    response3 = model.generate(prompt='thank you', temp=0)\n\n    print(model.current_chat_session)\n```\nThis setup wraps your gpt4all model interactions, capturing valuable data about each request and response.\n\n### 3. (Optional) Enable GPU Monitoring\n\nIf your application runs on NVIDIA GPUs, you can enable GPU stats collection in the OpenLIT SDK by adding `collect_gpu_stats=True`. This collects GPU metrics like utilization, temperature, power usage, and memory-related performance metrics. The collected metrics are OpenTelemetry gauges.\n\n```python\nfrom gpt4all import GPT4All\nimport openlit\n\nopenlit.init(collect_gpu_stats=True)  # Initialize OpenLIT monitoring\n\nmodel = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n\n# Start a chat session and send queries\nwith model.chat_session():\n    response1 = model.generate(prompt='hello', temp=0)\n    response2 = model.generate(prompt='write me a short poem', temp=0)\n    response3 = model.generate(prompt='thank you', temp=0)\n\n    print(model.current_chat_session)\n```\n\n### Visualize\n\nOnce you've set up data collection with [OpenLIT](https://github.com/openlit/openlit), you can visualize and analyze this information to better understand your application's performance:\n\n- **Using OpenLIT UI:** Connect to OpenLIT's UI to start exploring performance metrics. Visit the OpenLIT [Quickstart Guide](https://docs.openlit.io/latest/quickstart) for step-by-step details.\n\n- **Integrate with existing Observability Tools:** If you use tools like Grafana or DataDog, you can integrate the data collected by OpenLIT. For instructions on setting up these connections, check the OpenLIT [Connections Guide](https://docs.openlit.io/latest/connections/intro).\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_nodejs.md",
    "content": "# GPT4All Node.js API\n\nNative Node.js LLM bindings for all.\n\n```sh\nyarn add gpt4all@latest\n\nnpm install gpt4all@latest\n\npnpm install gpt4all@latest\n\n```\n\n## Contents\n\n*   See [API Reference](#api-reference)\n*   See [Examples](#api-example)\n*   See [Developing](#develop)\n*   GPT4ALL nodejs bindings created by [jacoobes](https://github.com/jacoobes), [limez](https://github.com/iimez) and the [nomic ai community](https://home.nomic.ai), for all to use.\n\n## Api Example\n\n### Chat Completion\n\n```js\nimport { LLModel, createCompletion, DEFAULT_DIRECTORY, DEFAULT_LIBRARIES_DIRECTORY, loadModel } from '../src/gpt4all.js'\n\nconst model = await loadModel( 'mistral-7b-openorca.gguf2.Q4_0.gguf', { verbose: true, device: 'gpu' });\n\nconst completion1 = await createCompletion(model, 'What is 1 + 1?', { verbose: true, })\nconsole.log(completion1.message)\n\nconst completion2 = await createCompletion(model, 'And if we add two?', {  verbose: true  })\nconsole.log(completion2.message)\n\nmodel.dispose()\n```\n\n### Embedding\n\n```js\nimport { loadModel, createEmbedding } from '../src/gpt4all.js'\n\nconst embedder = await loadModel(\"all-MiniLM-L6-v2-f16.gguf\", { verbose: true, type: 'embedding'})\n\nconsole.log(createEmbedding(embedder, \"Maybe Minecraft was the friends we made along the way\"));\n```\n\n### Chat Sessions\n\n```js\nimport { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst chat = await model.createChatSession();\n\nawait createCompletion(\n    chat,\n    \"Why are bananas rather blue than bread at night sometimes?\",\n    {\n        verbose: true,\n    }\n);\nawait createCompletion(chat, \"Are you sure?\", { verbose: true, });\n\n```\n\n### Streaming responses\n\n```js\nimport gpt from \"../src/gpt4all.js\";\n\nconst model = await gpt.loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\", {\n    device: \"gpu\",\n});\n\nprocess.stdout.write(\"### Stream:\");\nconst stream = gpt.createCompletionStream(model, \"How are you?\");\nstream.tokens.on(\"data\", (data) => {\n    process.stdout.write(data);\n});\n//wait till stream finishes. We cannot continue until this one is done.\nawait stream.result;\nprocess.stdout.write(\"\\n\");\n\nprocess.stdout.write(\"### Stream with pipe:\");\nconst stream2 = gpt.createCompletionStream(\n    model,\n    \"Please say something nice about node streams.\"\n);\nstream2.tokens.pipe(process.stdout);\nawait stream2.result;\nprocess.stdout.write(\"\\n\");\n\nconsole.log(\"done\");\nmodel.dispose();\n```\n\n### Async Generators\n\n```js\nimport gpt from \"../src/gpt4all.js\";\n\nconst model = await gpt.loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\", {\n    device: \"gpu\",\n});\n\nprocess.stdout.write(\"### Generator:\");\nconst gen = gpt.createCompletionGenerator(model, \"Redstone in Minecraft is Turing Complete. Let that sink in. (let it in!)\");\nfor await (const chunk of gen) {\n    process.stdout.write(chunk);\n}\n\nprocess.stdout.write(\"\\n\");\nmodel.dispose();\n```\n\n## Develop\n\n### Build Instructions\n\n*   binding.gyp is compile config\n*   Tested on Ubuntu. Everything seems to work fine\n*   Tested on Windows. Everything works fine.\n*   Sparse testing on mac os.\n*   MingW works as well to build the gpt4all-backend. **HOWEVER**, this package works only with MSVC built dlls.\n\n### Requirements\n\n*   git\n*   [node.js >= 18.0.0](https://nodejs.org/en)\n*   [yarn](https://yarnpkg.com/)\n*   [node-gyp](https://github.com/nodejs/node-gyp)\n    *   all of its requirements.\n*   (unix) gcc version 12\n*   (win) msvc version 143\n    *   Can be obtained with visual studio 2022 build tools\n*   python 3\n*   On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home\n*   macOS users do not need Vulkan, as GPT4All will use Metal instead.\n\n### Build (from source)\n\n```sh\ngit clone https://github.com/nomic-ai/gpt4all.git\ncd gpt4all-bindings/typescript\n```\n\n*   The below shell commands assume the current working directory is `typescript`.\n\n*   To Build and Rebuild:\n\n```sh\nyarn\n```\n\n*   llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory\n\n```sh\ngit submodule update --init --depth 1 --recursive\n```\n\n```sh\nyarn build:backend\n```\n\nThis will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native The only current way to use them is to put them in the current working directory of your application. That is, **WHEREVER YOU RUN YOUR NODE APPLICATION**\n\n*   llama-xxxx.dll is required.\n*   According to whatever model you are using, you'll need to select the proper model loader.\n    *   For example, if you running an Mosaic MPT model, you will need to select the mpt-(buildvariant).(dynamiclibrary)\n\n### Test\n\n```sh\nyarn test\n```\n\n### Source Overview\n\n#### src/\n\n*   Extra functions to help aid devex\n*   Typings for the native node addon\n*   the javascript interface\n\n#### test/\n\n*   simple unit testings for some functions exported.\n*   more advanced ai testing is not handled\n\n#### spec/\n\n*   Average look and feel of the api\n*   Should work assuming a model and libraries are installed locally in working directory\n\n#### index.cc\n\n*   The bridge between nodejs and c. Where the bindings are.\n\n#### prompt.cc\n\n*   Handling prompting and inference of models in a threadsafe, asynchronous way.\n\n### Known Issues\n\n*   why your model may be spewing bull 💩\n    *   The downloaded model is broken (just reinstall or download from official site)\n*   Your model is hanging after a call to generate tokens.\n    *   Is `nPast` set too high? This may cause your model to hang (03/16/2024), Linux Mint, Ubuntu 22.04\n*   Your GPU usage is still high after node.js exits.\n    *   Make sure to call `model.dispose()`!!!\n\n### Roadmap\n\nThis package has been stabilizing over time development, and breaking changes may happen until the api stabilizes. Here's what's the todo list:\n\n*   \\[ ] Purely offline. Per the gui, which can be run completely offline, the bindings should be as well.\n*   \\[ ] NPM bundle size reduction via optionalDependencies strategy (need help)\n    *   Should include prebuilds to avoid painful node-gyp errors\n*   \\[x] createChatSession ( the python equivalent to create\\_chat\\_session )\n*   \\[x] generateTokens, the new name for createTokenStream. As of 3.2.0, this is released but not 100% tested. Check spec/generator.mjs!\n*   \\[x] ~~createTokenStream, an async iterator that streams each token emitted from the model. Planning on following this [example](https://github.com/nodejs/node-addon-examples/tree/main/threadsafe-async-iterator)~~ May not implement unless someone else can complete\n*   \\[x] prompt models via a threadsafe function in order to have proper non blocking behavior in nodejs\n*   \\[x] generateTokens is the new name for this^\n*   \\[x] proper unit testing (integrate with circle ci)\n*   \\[x] publish to npm under alpha tag `gpt4all@alpha`\n*   \\[x] have more people test on other platforms (mac tester needed)\n*   \\[x] switch to new pluggable backend\n\n### API Reference\n\n<!-- Generated by documentation.js. Update this documentation by updating the source code. -->\n\n##### Table of Contents\n\n*   [type](#type)\n*   [TokenCallback](#tokencallback)\n*   [ChatSessionOptions](#chatsessionoptions)\n    *   [systemPrompt](#systemprompt)\n    *   [messages](#messages)\n*   [initialize](#initialize)\n    *   [Parameters](#parameters)\n*   [generate](#generate)\n    *   [Parameters](#parameters-1)\n*   [InferenceModel](#inferencemodel)\n    *   [createChatSession](#createchatsession)\n        *   [Parameters](#parameters-2)\n    *   [generate](#generate-1)\n        *   [Parameters](#parameters-3)\n    *   [dispose](#dispose)\n*   [EmbeddingModel](#embeddingmodel)\n    *   [dispose](#dispose-1)\n*   [InferenceResult](#inferenceresult)\n*   [LLModel](#llmodel)\n    *   [constructor](#constructor)\n        *   [Parameters](#parameters-4)\n    *   [type](#type-1)\n    *   [name](#name)\n    *   [stateSize](#statesize)\n    *   [threadCount](#threadcount)\n    *   [setThreadCount](#setthreadcount)\n        *   [Parameters](#parameters-5)\n    *   [infer](#infer)\n        *   [Parameters](#parameters-6)\n    *   [embed](#embed)\n        *   [Parameters](#parameters-7)\n    *   [isModelLoaded](#ismodelloaded)\n    *   [setLibraryPath](#setlibrarypath)\n        *   [Parameters](#parameters-8)\n    *   [getLibraryPath](#getlibrarypath)\n    *   [initGpuByString](#initgpubystring)\n        *   [Parameters](#parameters-9)\n    *   [hasGpuDevice](#hasgpudevice)\n    *   [listGpu](#listgpu)\n        *   [Parameters](#parameters-10)\n    *   [dispose](#dispose-2)\n*   [GpuDevice](#gpudevice)\n    *   [type](#type-2)\n*   [LoadModelOptions](#loadmodeloptions)\n    *   [modelPath](#modelpath)\n    *   [librariesPath](#librariespath)\n    *   [modelConfigFile](#modelconfigfile)\n    *   [allowDownload](#allowdownload)\n    *   [verbose](#verbose)\n    *   [device](#device)\n    *   [nCtx](#nctx)\n    *   [ngl](#ngl)\n*   [loadModel](#loadmodel)\n    *   [Parameters](#parameters-11)\n*   [InferenceProvider](#inferenceprovider)\n*   [createCompletion](#createcompletion)\n    *   [Parameters](#parameters-12)\n*   [createCompletionStream](#createcompletionstream)\n    *   [Parameters](#parameters-13)\n*   [createCompletionGenerator](#createcompletiongenerator)\n    *   [Parameters](#parameters-14)\n*   [createEmbedding](#createembedding)\n    *   [Parameters](#parameters-15)\n*   [CompletionOptions](#completionoptions)\n    *   [verbose](#verbose-1)\n    *   [onToken](#ontoken)\n*   [Message](#message)\n    *   [role](#role)\n    *   [content](#content)\n*   [prompt\\_tokens](#prompt_tokens)\n*   [completion\\_tokens](#completion_tokens)\n*   [total\\_tokens](#total_tokens)\n*   [n\\_past\\_tokens](#n_past_tokens)\n*   [CompletionReturn](#completionreturn)\n    *   [model](#model)\n    *   [usage](#usage)\n    *   [message](#message-1)\n*   [CompletionStreamReturn](#completionstreamreturn)\n*   [LLModelPromptContext](#llmodelpromptcontext)\n    *   [logitsSize](#logitssize)\n    *   [tokensSize](#tokenssize)\n    *   [nPast](#npast)\n    *   [nPredict](#npredict)\n    *   [promptTemplate](#prompttemplate)\n    *   [nCtx](#nctx-1)\n    *   [topK](#topk)\n    *   [topP](#topp)\n    *   [minP](#minp)\n    *   [temperature](#temperature)\n    *   [nBatch](#nbatch)\n    *   [repeatPenalty](#repeatpenalty)\n    *   [repeatLastN](#repeatlastn)\n    *   [contextErase](#contexterase)\n*   [DEFAULT\\_DIRECTORY](#default_directory)\n*   [DEFAULT\\_LIBRARIES\\_DIRECTORY](#default_libraries_directory)\n*   [DEFAULT\\_MODEL\\_CONFIG](#default_model_config)\n*   [DEFAULT\\_PROMPT\\_CONTEXT](#default_prompt_context)\n*   [DEFAULT\\_MODEL\\_LIST\\_URL](#default_model_list_url)\n*   [downloadModel](#downloadmodel)\n    *   [Parameters](#parameters-16)\n    *   [Examples](#examples)\n*   [DownloadModelOptions](#downloadmodeloptions)\n    *   [modelPath](#modelpath-1)\n    *   [verbose](#verbose-2)\n    *   [url](#url)\n    *   [md5sum](#md5sum)\n*   [DownloadController](#downloadcontroller)\n    *   [cancel](#cancel)\n    *   [promise](#promise)\n\n#### type\n\nModel architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### TokenCallback\n\nCallback for controlling token generation. Return false to stop token generation.\n\nType: function (tokenId: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), token: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String), total: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)): [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)\n\n#### ChatSessionOptions\n\n**Extends Partial\\<LLModelPromptContext>**\n\nOptions for the chat session.\n\n##### systemPrompt\n\nSystem prompt to ingest on initialization.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### messages\n\nMessages to ingest on initialization.\n\nType: [Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[Message](#message)>\n\n#### initialize\n\nIngests system prompt and initial messages.\nSets this chat session as the active chat session of the model.\n\n##### Parameters\n\n*   `options` **[ChatSessionOptions](#chatsessionoptions)** The options for the chat session.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\\<void>**&#x20;\n\n#### generate\n\nPrompts the model in chat-session context.\n\n##### Parameters\n\n*   `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.\n*   `options` **[CompletionOptions](#completionoptions)?** Prompt context and other options.\n*   `callback` **[TokenCallback](#tokencallback)?** Token generation callback.\n\n<!---->\n\n*   Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the chat session is not the active chat session of the model.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[CompletionReturn](#completionreturn)>** The model's response to the prompt.\n\n#### InferenceModel\n\nInferenceModel represents an LLM which can make chat predictions, similar to GPT transformers.\n\n##### createChatSession\n\nCreate a chat session with the model.\n\n###### Parameters\n\n*   `options` **[ChatSessionOptions](#chatsessionoptions)?** The options for the chat session.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\\<ChatSession>** The chat session.\n\n##### generate\n\nPrompts the model with a given input and optional parameters.\n\n###### Parameters\n\n*   `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**&#x20;\n*   `options` **[CompletionOptions](#completionoptions)?** Prompt context and other options.\n*   `callback` **[TokenCallback](#tokencallback)?** Token generation callback.\n*   `input`  The prompt input.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[CompletionReturn](#completionreturn)>** The model's response to the prompt.\n\n##### dispose\n\ndelete and cleanup the native model\n\nReturns **void**&#x20;\n\n#### EmbeddingModel\n\nEmbeddingModel represents an LLM which can create embeddings, which are float arrays\n\n##### dispose\n\ndelete and cleanup the native model\n\nReturns **void**&#x20;\n\n#### InferenceResult\n\nShape of LLModel's inference result.\n\n#### LLModel\n\nLLModel class representing a language model.\nThis is a base class that provides common functionality for different types of language models.\n\n##### constructor\n\nInitialize a new LLModel.\n\n###### Parameters\n\n*   `path` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** Absolute path to the model file.\n\n<!---->\n\n*   Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model file does not exist.\n\n##### type\n\nundefined or user supplied\n\nReturns **([string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))**&#x20;\n\n##### name\n\nThe name of the model.\n\nReturns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**&#x20;\n\n##### stateSize\n\nGet the size of the internal state of the model.\nNOTE: This state data is specific to the type of model you have created.\n\nReturns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** the size in bytes of the internal state of the model\n\n##### threadCount\n\nGet the number of threads used for model inference.\nThe default is the number of physical cores your computer has.\n\nReturns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The number of threads used for model inference.\n\n##### setThreadCount\n\nSet the number of threads used for model inference.\n\n###### Parameters\n\n*   `newNumber` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The new number of threads.\n\nReturns **void**&#x20;\n\n##### infer\n\nPrompt the model with a given input and optional parameters.\nThis is the raw output from model.\nUse the prompt function exported for a value\n\n###### Parameters\n\n*   `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.\n*   `promptContext` **Partial<[LLModelPromptContext](#llmodelpromptcontext)>** Optional parameters for the prompt context.\n*   `callback` **[TokenCallback](#tokencallback)?** optional callback to control token generation.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[InferenceResult](#inferenceresult)>** The result of the model prompt.\n\n##### embed\n\nEmbed text with the model. Keep in mind that\nUse the prompt function exported for a value\n\n###### Parameters\n\n*   `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.\n\nReturns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The result of the model prompt.\n\n##### isModelLoaded\n\nWhether the model is loaded or not.\n\nReturns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)**&#x20;\n\n##### setLibraryPath\n\nWhere to search for the pluggable backend libraries\n\n###### Parameters\n\n*   `s` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**&#x20;\n\nReturns **void**&#x20;\n\n##### getLibraryPath\n\nWhere to get the pluggable backend libraries\n\nReturns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**&#x20;\n\n##### initGpuByString\n\nInitiate a GPU by a string identifier.\n\n###### Parameters\n\n*   `memory_required` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** Should be in the range size\\_t or will throw\n*   `device_name` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** 'amd' | 'nvidia' | 'intel' | 'gpu' | gpu name.\n    read LoadModelOptions.device for more information\n\nReturns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)**&#x20;\n\n##### hasGpuDevice\n\nFrom C documentation\n\nReturns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)** True if a GPU device is successfully initialized, false otherwise.\n\n##### listGpu\n\nGPUs that are usable for this LLModel\n\n###### Parameters\n\n*   `nCtx` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** Maximum size of context window\n\n<!---->\n\n*   Throws **any** if hasGpuDevice returns false (i think)\n\nReturns **[Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[GpuDevice](#gpudevice)>**&#x20;\n\n##### dispose\n\ndelete and cleanup the native model\n\nReturns **void**&#x20;\n\n#### GpuDevice\n\nan object that contains gpu data on this machine.\n\n##### type\n\nsame as VkPhysicalDeviceType\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### LoadModelOptions\n\nOptions that configure a model's behavior.\n\n##### modelPath\n\nWhere to look for model files.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### librariesPath\n\nWhere to look for the backend libraries.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### modelConfigFile\n\nThe path to the model configuration file, useful for offline usage or custom model configurations.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### allowDownload\n\nWhether to allow downloading the model if it is not present at the specified path.\n\nType: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)\n\n##### verbose\n\nEnable verbose logging.\n\nType: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)\n\n##### device\n\nThe processing unit on which the model will run. It can be set to\n\n*   \"cpu\": Model will run on the central processing unit.\n*   \"gpu\": Model will run on the best available graphics processing unit, irrespective of its vendor.\n*   \"amd\", \"nvidia\", \"intel\": Model will run on the best available GPU from the specified vendor.\n*   \"gpu name\": Model will run on the GPU that matches the name if it's available.\n    Note: If a GPU device lacks sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All\n    instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the\n    model.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### nCtx\n\nThe Maximum window size of this model\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### ngl\n\nNumber of gpu layers needed\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### loadModel\n\nLoads a machine learning model with the specified name. The defacto way to create a model.\nBy default this will download a model from the official GPT4ALL website, if a model is not present at given path.\n\n##### Parameters\n\n*   `modelName` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The name of the model to load.\n*   `options` **([LoadModelOptions](#loadmodeloptions) | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))?** (Optional) Additional options for loading the model.\n\nReturns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<([InferenceModel](#inferencemodel) | [EmbeddingModel](#embeddingmodel))>** A promise that resolves to an instance of the loaded LLModel.\n\n#### InferenceProvider\n\nInterface for inference, implemented by InferenceModel and ChatSession.\n\n#### createCompletion\n\nThe nodejs equivalent to python binding's chat\\_completion\n\n##### Parameters\n\n*   `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session\n*   `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message\n*   `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.\n\nReturns **[CompletionReturn](#completionreturn)** The completion result.\n\n#### createCompletionStream\n\nStreaming variant of createCompletion, returns a stream of tokens and a promise that resolves to the completion result.\n\n##### Parameters\n\n*   `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session\n*   `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message.\n*   `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.\n\nReturns **[CompletionStreamReturn](#completionstreamreturn)** An object of token stream and the completion result promise.\n\n#### createCompletionGenerator\n\nCreates an async generator of tokens\n\n##### Parameters\n\n*   `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session\n*   `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message.\n*   `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.\n\nReturns **AsyncGenerator<[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)>** The stream of generated tokens\n\n#### createEmbedding\n\nThe nodejs moral equivalent to python binding's Embed4All().embed()\nmeow\n\n##### Parameters\n\n*   `model` **[EmbeddingModel](#embeddingmodel)** The language model object.\n*   `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** text to embed\n\nReturns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The completion result.\n\n#### CompletionOptions\n\n**Extends Partial\\<LLModelPromptContext>**\n\nThe options for creating the completion.\n\n##### verbose\n\nIndicates if verbose logging is enabled.\n\nType: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)\n\n##### onToken\n\nCallback for controlling token generation. Return false to stop processing.\n\nType: [TokenCallback](#tokencallback)\n\n#### Message\n\nA message in the conversation.\n\n##### role\n\nThe role of the message.\n\nType: (`\"system\"` | `\"assistant\"` | `\"user\"`)\n\n##### content\n\nThe message content.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### prompt\\_tokens\n\nThe number of tokens used in the prompt. Currently not available and always 0.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### completion\\_tokens\n\nThe number of tokens used in the completion.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### total\\_tokens\n\nThe total number of tokens used. Currently not available and always 0.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### n\\_past\\_tokens\n\nNumber of tokens used in the conversation.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### CompletionReturn\n\nThe result of a completion.\n\n##### model\n\nThe model used for the completion.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### usage\n\nToken usage report.\n\nType: {prompt\\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), completion\\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), total\\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), n\\_past\\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)}\n\n##### message\n\nThe generated completion.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### CompletionStreamReturn\n\nThe result of a streamed completion, containing a stream of tokens and a promise that resolves to the completion result.\n\n#### LLModelPromptContext\n\nModel inference arguments for generating completions.\n\n##### logitsSize\n\nThe size of the raw logits vector.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### tokensSize\n\nThe size of the raw tokens vector.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### nPast\n\nThe number of tokens in the past conversation.\nThis controls how far back the model looks when generating completions.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### nPredict\n\nThe maximum number of tokens to predict.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### promptTemplate\n\nTemplate for user / assistant message pairs.\n%1 is required and will be replaced by the user input.\n%2 is optional and will be replaced by the assistant response.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### nCtx\n\nThe context window size. Do not use, it has no effect. See loadModel options.\nTHIS IS DEPRECATED!!!\nUse loadModel's nCtx option instead.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### topK\n\nThe top-k logits to sample from.\nTop-K sampling selects the next token only from the top K most likely tokens predicted by the model.\nIt helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit\nthe diversity of the output. A higher value for top-K (eg., 100) will consider more tokens and lead\nto more diverse text, while a lower value (eg., 10) will focus on the most probable tokens and generate\nmore conservative text. 30 - 60 is a good range for most tasks.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### topP\n\nThe nucleus sampling probability threshold.\nTop-P limits the selection of the next token to a subset of tokens with a cumulative probability\nabove a threshold P. This method, also known as nucleus sampling, finds a balance between diversity\nand quality by considering both token probabilities and the number of tokens available for sampling.\nWhen using a higher value for top-P (eg., 0.95), the generated text becomes more diverse.\nOn the other hand, a lower value (eg., 0.1) produces more focused and conservative text.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### minP\n\nThe minimum probability of a token to be considered.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### temperature\n\nThe temperature to adjust the model's output distribution.\nTemperature is like a knob that adjusts how creative or focused the output becomes. Higher temperatures\n(eg., 1.2) increase randomness, resulting in more imaginative and diverse text. Lower temperatures (eg., 0.5)\nmake the output more focused, predictable, and conservative. When the temperature is set to 0, the output\nbecomes completely deterministic, always selecting the most probable next token and producing identical results\neach time. A safe range would be around 0.6 - 0.85, but you are free to search what value fits best for you.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### nBatch\n\nThe number of predictions to generate in parallel.\nBy splitting the prompt every N tokens, prompt-batch-size reduces RAM usage during processing. However,\nthis can increase the processing time as a trade-off. If the N value is set too low (e.g., 10), long prompts\nwith 500+ tokens will be most affected, requiring numerous processing runs to complete the prompt processing.\nTo ensure optimal performance, setting the prompt-batch-size to 2048 allows processing of all tokens in a single run.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### repeatPenalty\n\nThe penalty factor for repeated tokens.\nRepeat-penalty can help penalize tokens based on how frequently they occur in the text, including the input prompt.\nA token that has already appeared five times is penalized more heavily than a token that has appeared only one time.\nA value of 1 means that there is no penalty and values larger than 1 discourage repeated tokens.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### repeatLastN\n\nThe number of last tokens to penalize.\nThe repeat-penalty-tokens N option controls the number of tokens in the history to consider for penalizing repetition.\nA larger value will look further back in the generated text to prevent repetitions, while a smaller value will only\nconsider recent tokens.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n##### contextErase\n\nThe percentage of context to erase if the context window is exceeded.\n\nType: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)\n\n#### DEFAULT\\_DIRECTORY\n\nFrom python api:\nmodels will be stored in (homedir)/.cache/gpt4all/\\`\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### DEFAULT\\_LIBRARIES\\_DIRECTORY\n\nFrom python api:\nThe default path for dynamic libraries to be stored.\nYou may separate paths by a semicolon to search in multiple areas.\nThis searches DEFAULT\\_DIRECTORY/libraries, cwd/libraries, and finally cwd.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### DEFAULT\\_MODEL\\_CONFIG\n\nDefault model configuration.\n\nType: ModelConfig\n\n#### DEFAULT\\_PROMPT\\_CONTEXT\n\nDefault prompt context.\n\nType: [LLModelPromptContext](#llmodelpromptcontext)\n\n#### DEFAULT\\_MODEL\\_LIST\\_URL\n\nDefault model list url.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### downloadModel\n\nInitiates the download of a model file.\nBy default this downloads without waiting. use the controller returned to alter this behavior.\n\n##### Parameters\n\n*   `modelName` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The model to be downloaded.\n*   `options` **[DownloadModelOptions](#downloadmodeloptions)** to pass into the downloader. Default is { location: (cwd), verbose: false }.\n\n##### Examples\n\n```javascript\nconst download = downloadModel('ggml-gpt4all-j-v1.3-groovy.bin')\ndownload.promise.then(() => console.log('Downloaded!'))\n```\n\n*   Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model already exists in the specified location.\n*   Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model cannot be found at the specified url.\n\nReturns **[DownloadController](#downloadcontroller)** object that allows controlling the download process.\n\n#### DownloadModelOptions\n\nOptions for the model download process.\n\n##### modelPath\n\nlocation to download the model.\nDefault is process.cwd(), or the current working directory\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### verbose\n\nDebug mode -- check how long it took to download in seconds\n\nType: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)\n\n##### url\n\nRemote download url. Defaults to `https://gpt4all.io/models/gguf/<modelName>`\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n##### md5sum\n\nMD5 sum of the model file. If this is provided, the downloaded file will be checked against this sum.\nIf the sums do not match, an error will be thrown and the file will be deleted.\n\nType: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)\n\n#### DownloadController\n\nModel download controller.\n\n##### cancel\n\nCancel the request to download if this is called.\n\nType: function (): void\n\n##### promise\n\nA promise resolving to the downloaded models config once the download is done\n\nType: [Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\\<ModelConfig>\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_python.md",
    "content": "# GPT4All Python Generation API\nThe `GPT4All` python package provides bindings to our C/C++ model backend libraries.\nThe source code and local build instructions can be found [here](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python).\n\n\n## Quickstart\n```bash\npip install gpt4all\n```\n\n``` py\nfrom gpt4all import GPT4All\nmodel = GPT4All(\"orca-mini-3b-gguf2-q4_0.gguf\")\n```\n\nThis will:\n\n- Instantiate `GPT4All`,  which is the primary public API to your large language model (LLM).\n- Automatically download the given model to `~/.cache/gpt4all/` if not already present.\n\nRead further to see how to chat with this model.\n\n\n### Chatting with GPT4All\nTo start chatting with a local LLM, you will need to start a chat session. Within a chat session, the model will be\nprompted with the appropriate template, and history will be preserved between successive calls to `generate()`.\n\n=== \"GPT4All Example\"\n    ``` py\n    model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n    with model.chat_session():\n        response1 = model.generate(prompt='hello', temp=0)\n        response2 = model.generate(prompt='write me a short poem', temp=0)\n        response3 = model.generate(prompt='thank you', temp=0)\n        print(model.current_chat_session)\n    ```\n=== \"Output\"\n    ``` json\n    [\n       {\n          'role': 'user',\n          'content': 'hello'\n       },\n       {\n          'role': 'assistant',\n          'content': 'What is your name?'\n       },\n       {\n          'role': 'user',\n          'content': 'write me a short poem'\n       },\n       {\n          'role': 'assistant',\n          'content': \"I would love to help you with that! Here's a short poem I came up with:\\nBeneath the autumn leaves,\\nThe wind whispers through the trees.\\nA gentle breeze, so at ease,\\nAs if it were born to play.\\nAnd as the sun sets in the sky,\\nThe world around us grows still.\"\n       },\n       {\n          'role': 'user',\n          'content': 'thank you'\n       },\n       {\n          'role': 'assistant',\n          'content': \"You're welcome! I hope this poem was helpful or inspiring for you. Let me know if there is anything else I can assist you with.\"\n       }\n    ]\n    ```\n\nWhen using GPT4All models in the `chat_session()` context:\n\n- Consecutive chat exchanges are taken into account and not discarded until the session ends; as long as the model has capacity.\n- A system prompt is inserted into the beginning of the model's context.\n- Each prompt passed to `generate()` is wrapped in the appropriate prompt template. If you pass `allow_download=False`\n  to GPT4All or are using a model that is not from the official models list, you must pass a prompt template using the\n  `prompt_template` parameter of `chat_session()`.\n\nNOTE: If you do not use `chat_session()`, calls to `generate()` will not be wrapped in a prompt template. This will\ncause the model to *continue* the prompt instead of *answering* it. When in doubt, use a chat session, as many newer\nmodels are designed to be used exclusively with a prompt template.\n\n[models3.json]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/metadata/models3.json\n\n\n### Streaming Generations\nTo interact with GPT4All responses as the model generates, use the `streaming=True` flag during generation.\n\n=== \"GPT4All Streaming Example\"\n    ``` py\n    from gpt4all import GPT4All\n    model = GPT4All(\"orca-mini-3b-gguf2-q4_0.gguf\")\n    tokens = []\n    with model.chat_session():\n        for token in model.generate(\"What is the capital of France?\", streaming=True):\n            tokens.append(token)\n    print(tokens)\n    ```\n=== \"Output\"\n    ```\n    [' The', ' capital', ' of', ' France', ' is', ' Paris', '.']\n    ```\n\n\n### The Generate Method API\n::: gpt4all.gpt4all.GPT4All.generate\n\n\n## Examples & Explanations\n### Influencing Generation\nThe three most influential parameters in generation are _Temperature_ (`temp`), _Top-p_ (`top_p`) and _Top-K_ (`top_k`).\nIn a nutshell, during the process of selecting the next token, not just one or a few are considered, but every single\ntoken in the vocabulary is given a probability. The parameters can change the field of candidate tokens.\n\n- **Temperature** makes the process either more or less random. A _Temperature_ above 1 increasingly \"levels the playing\n  field\", while at a _Temperature_ between 0 and 1 the likelihood of the best token candidates grows even more. A\n  _Temperature_ of 0 results in selecting the best token, making the output deterministic. A _Temperature_ of 1\n  represents a neutral setting with regard to randomness in the process.\n\n- _Top-p_ and _Top-K_ both narrow the field:\n    - **Top-K** limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the\n      vocabulary size deactivates this limit.\n    - **Top-p** selects tokens based on their total probabilities. For example, a value of 0.8 means \"include the best\n      tokens, whose accumulated probabilities reach or just surpass 80%\". Setting _Top-p_ to 1, which is 100%,\n      effectively disables it.\n\nThe recommendation is to keep at least one of _Top-K_ and _Top-p_ active. Other parameters can also influence\ngeneration; be sure to review all their descriptions.\n\n\n### Specifying the Model Folder\nThe model folder can be set with the `model_path` parameter when creating a `GPT4All` instance. The example below is\nis the same as if it weren't provided; that is, `~/.cache/gpt4all/` is the default folder.\n\n``` py\nfrom pathlib import Path\nfrom gpt4all import GPT4All\nmodel = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf', model_path=Path.home() / '.cache' / 'gpt4all')\n```\n\nIf you want to point it at the chat GUI's default folder, it should be:\n=== \"macOS\"\n    ``` py\n    from pathlib import Path\n    from gpt4all import GPT4All\n\n    model_name = 'orca-mini-3b-gguf2-q4_0.gguf'\n    model_path = Path.home() / 'Library' / 'Application Support' / 'nomic.ai' / 'GPT4All'\n    model = GPT4All(model_name, model_path)\n    ```\n=== \"Windows\"\n    ``` py\n    from pathlib import Path\n    from gpt4all import GPT4All\n    import os\n    model_name = 'orca-mini-3b-gguf2-q4_0.gguf'\n    model_path = Path(os.environ['LOCALAPPDATA']) / 'nomic.ai' / 'GPT4All'\n    model = GPT4All(model_name, model_path)\n    ```\n=== \"Linux\"\n    ``` py\n    from pathlib import Path\n    from gpt4all import GPT4All\n\n    model_name = 'orca-mini-3b-gguf2-q4_0.gguf'\n    model_path = Path.home() / '.local' / 'share' / 'nomic.ai' / 'GPT4All'\n    model = GPT4All(model_name, model_path)\n    ```\n\nAlternatively, you could also change the module's default model directory:\n\n``` py\nfrom pathlib import Path\nfrom gpt4all import GPT4All, gpt4all\ngpt4all.DEFAULT_MODEL_DIRECTORY = Path.home() / 'my' / 'models-directory'\nmodel = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')\n```\n\n\n### Managing Templates\nWhen using a `chat_session()`, you may customize the system prompt, and set the prompt template if necessary:\n\n=== \"GPT4All Custom Session Templates Example\"\n    ``` py\n    from gpt4all import GPT4All\n    model = GPT4All('wizardlm-13b-v1.2.Q4_0.gguf')\n    system_template = 'A chat between a curious user and an artificial intelligence assistant.\\n'\n    # many models use triple hash '###' for keywords, Vicunas are simpler:\n    prompt_template = 'USER: {0}\\nASSISTANT: '\n    with model.chat_session(system_template, prompt_template):\n        response1 = model.generate('why is the grass green?')\n        print(response1)\n        print()\n        response2 = model.generate('why is the sky blue?')\n        print(response2)\n    ```\n=== \"Possible Output\"\n    ```\n    The color of grass can be attributed to its chlorophyll content, which allows it\n    to absorb light energy from sunlight through photosynthesis. Chlorophyll absorbs\n    blue and red wavelengths of light while reflecting other colors such as yellow\n    and green. This is why the leaves appear green to our eyes.\n\n    The color of the sky appears blue due to a phenomenon called Rayleigh scattering,\n    which occurs when sunlight enters Earth's atmosphere and interacts with air\n    molecules such as nitrogen and oxygen. Blue light has shorter wavelength than\n    other colors in the visible spectrum, so it is scattered more easily by these\n    particles, making the sky appear blue to our eyes.\n    ```\n\n\n### Without Online Connectivity\nTo prevent GPT4All from accessing online resources, instantiate it with `allow_download=False`. When using this flag,\nthere will be no default system prompt by default, and you must specify the prompt template yourself.\n\nYou can retrieve a model's default system prompt and prompt template with an online instance of GPT4All:\n\n=== \"Prompt Template Retrieval\"\n    ``` py\n    from gpt4all import GPT4All\n    model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')\n    print(repr(model.config['systemPrompt']))\n    print(repr(model.config['promptTemplate']))\n    ```\n=== \"Output\"\n    ```py\n    '### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n'\n    '### User:\\n{0}\\n### Response:\\n'\n    ```\n\nThen you can pass them explicitly when creating an offline instance:\n\n``` py\nfrom gpt4all import GPT4All\nmodel = GPT4All('orca-mini-3b-gguf2-q4_0.gguf', allow_download=False)\n\nsystem_prompt = '### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n'\nprompt_template = '### User:\\n{0}\\n\\n### Response:\\n'\n\nwith model.chat_session(system_prompt=system_prompt, prompt_template=prompt_template):\n    ...\n```\n\n### Interrupting Generation\nThe simplest way to stop generation is to set a fixed upper limit with the `max_tokens` parameter.\n\nIf you know exactly when a model should stop responding, you can add a custom callback, like so:\n\n=== \"GPT4All Custom Stop Callback\"\n    ``` py\n    from gpt4all import GPT4All\n    model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')\n\n    def stop_on_token_callback(token_id, token_string):\n        # one sentence is enough:\n        if '.' in token_string:\n            return False\n        else:\n            return True\n\n    response = model.generate('Blue Whales are the biggest animal to ever inhabit the Earth.',\n                              temp=0, callback=stop_on_token_callback)\n    print(response)\n    ```\n=== \"Output\"\n    ```\n     They can grow up to 100 feet (30 meters) long and weigh as much as 20 tons (18 metric tons).\n    ```\n\n\n## API Documentation\n::: gpt4all.gpt4all.GPT4All\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/gpt4all_python_embedding.md",
    "content": "# Embeddings\nGPT4All supports generating high quality embeddings of arbitrary length text using any embedding model supported by llama.cpp.\n\nAn embedding is a vector representation of a piece of text. Embeddings are useful for tasks such as retrieval for\nquestion answering (including retrieval augmented generation or *RAG*), semantic similarity search, classification, and\ntopic clustering.\n\n## Supported Embedding Models\n\nThe following models have built-in support in Embed4All:\n\n| Name               | Embed4All `model_name`                               | Context Length | Embedding Length | File Size |\n|--------------------|------------------------------------------------------|---------------:|-----------------:|----------:|\n| [SBert]            | all&#x2011;MiniLM&#x2011;L6&#x2011;v2.gguf2.f16.gguf |            512 |              384 |    44 MiB |\n| [Nomic Embed v1]   | nomic&#x2011;embed&#x2011;text&#x2011;v1.f16.gguf    |           2048 |              768 |   262 MiB |\n| [Nomic Embed v1.5] | nomic&#x2011;embed&#x2011;text&#x2011;v1.5.f16.gguf  |           2048 |           64-768 |   262 MiB |\n\nThe context length is the maximum number of word pieces, or *tokens*, that a model can embed at once. Embedding texts\nlonger than a model's context length requires some kind of strategy; see [Embedding Longer Texts] for more information.\n\nThe embedding length is the size of the vector returned by `Embed4All.embed`.\n\n[SBert]: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2\n[Nomic Embed v1]: https://huggingface.co/nomic-ai/nomic-embed-text-v1\n[Nomic Embed v1.5]: https://huggingface.co/nomic-ai/nomic-embed-text-v1.5\n[Embedding Longer Texts]: #embedding-longer-texts\n\n## Quickstart\n```bash\npip install gpt4all\n```\n\n### Generating Embeddings\nBy default, embeddings will be generated on the CPU using all-MiniLM-L6-v2.\n\n=== \"Embed4All Example\"\n    ```py\n    from gpt4all import Embed4All\n    text = 'The quick brown fox jumps over the lazy dog'\n    embedder = Embed4All()\n    output = embedder.embed(text)\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...]\n    ```\n\nYou can also use the GPU to accelerate the embedding model by specifying the `device` parameter. See the [GPT4All\nconstructor] for more information.\n\n=== \"GPU Example\"\n    ```py\n    from gpt4all import Embed4All\n    text = 'The quick brown fox jumps over the lazy dog'\n    embedder = Embed4All(device='gpu')\n    output = embedder.embed(text)\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...]\n    ```\n\n[GPT4All constructor]: gpt4all_python.md#gpt4all.gpt4all.GPT4All.__init__\n\n### Nomic Embed\n\nEmbed4All has built-in support for Nomic's open-source embedding model, [Nomic Embed]. When using this model, you must\nspecify the task type using the `prefix` argument. This may be one of `search_query`, `search_document`,\n`classification`, or `clustering`. For retrieval applications, you should prepend `search_document` for all of your\ndocuments and `search_query` for your queries. See the [Nomic Embedding Guide] for more info.\n\n=== \"Nomic Embed Example\"\n    ```py\n    from gpt4all import Embed4All\n    text = 'Who is Laurens van der Maaten?'\n    embedder = Embed4All('nomic-embed-text-v1.f16.gguf')\n    output = embedder.embed(text, prefix='search_query')\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    [-0.013357644900679588, 0.027070969343185425, -0.0232995692640543, ...]\n    ```\n\n[Nomic Embed]: https://blog.nomic.ai/posts/nomic-embed-text-v1\n[Nomic Embedding Guide]: https://docs.nomic.ai/atlas/guides/embeddings#embedding-task-types\n\n### Embedding Longer Texts\n\nEmbed4All accepts a parameter called `long_text_mode`. This controls the behavior of Embed4All for texts longer than the\ncontext length of the embedding model.\n\nIn the default mode of \"mean\", Embed4All will break long inputs into chunks and average their embeddings to compute the\nfinal result.\n\nTo change this behavior, you can set the `long_text_mode` parameter to \"truncate\", which will truncate the input to the\nsequence length of the model before generating a single embedding.\n\n=== \"Truncation Example\"\n    ```py\n    from gpt4all import Embed4All\n    text = 'The ' * 512 + 'The quick brown fox jumps over the lazy dog'\n    embedder = Embed4All()\n    output = embedder.embed(text, long_text_mode=\"mean\")\n    print(output)\n    print()\n    output = embedder.embed(text, long_text_mode=\"truncate\")\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    [0.0039850445464253426, 0.04558328539133072, 0.0035536508075892925, ...]\n\n    [-0.009771130047738552, 0.034792833030223846, -0.013273917138576508, ...]\n    ```\n\n\n### Batching\n\nYou can send multiple texts to Embed4All in a single call. This can give faster results when individual texts are\nsignificantly smaller than `n_ctx` tokens. (`n_ctx` defaults to 2048.)\n\n=== \"Batching Example\"\n    ```py\n    from gpt4all import Embed4All\n    texts = ['The quick brown fox jumps over the lazy dog', 'Foo bar baz']\n    embedder = Embed4All()\n    output = embedder.embed(texts)\n    print(output[0])\n    print()\n    print(output[1])\n    ```\n=== \"Output\"\n    ```\n    [0.03551332652568817, 0.06137588247656822, 0.05281158909201622, ...]\n\n    [-0.03879690542817116, 0.00013223080895841122, 0.023148687556385994, ...]\n    ```\n\nThe number of texts that can be embedded in one pass of the model is proportional to the `n_ctx` parameter of Embed4All.\nIncreasing it may increase batched embedding throughput if you have a fast GPU, at the cost of VRAM.\n```py\nembedder = Embed4All(n_ctx=4096, device='gpu')\n```\n\n\n### Resizable Dimensionality\n\nThe embedding dimension of Nomic Embed v1.5 can be resized using the `dimensionality` parameter. This parameter supports\nany value between 64 and 768.\n\nShorter embeddings use less storage, memory, and bandwidth with a small performance cost. See the [blog post] for more\ninfo.\n\n[blog post]: https://blog.nomic.ai/posts/nomic-embed-matryoshka\n\n=== \"Matryoshka Example\"\n    ```py\n    from gpt4all import Embed4All\n    text = 'The quick brown fox jumps over the lazy dog'\n    embedder = Embed4All('nomic-embed-text-v1.5.f16.gguf')\n    output = embedder.embed(text, dimensionality=64)\n    print(len(output))\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    64\n    [-0.03567073494195938, 0.1301717758178711, -0.4333043396472931, ...]\n    ```\n\n\n### API documentation\n::: gpt4all.gpt4all.Embed4All\n"
  },
  {
    "path": "gpt4all-bindings/python/docs/old/index.md",
    "content": "# GPT4All\nWelcome to the GPT4All documentation LOCAL EDIT\n\nGPT4All is an open-source software ecosystem for anyone to run large language models (LLMs) **privately** on **everyday laptop & desktop computers**. No API calls or GPUs required.\n\nThe GPT4All Desktop Application is a touchpoint to interact with LLMs and integrate them with your local docs & local data for RAG (retrieval-augmented generation). No coding is required, just install the application, download the models of your choice, and you are ready to use your LLM.\n\nYour local data is **yours**. GPT4All handles the retrieval privately and on-device to fetch relevant data to support your queries to your LLM.\n\nNomic AI oversees contributions to GPT4All to ensure quality, security, and maintainability. Additionally, Nomic AI has open-sourced code for training and deploying your own customized LLMs internally.\n\nGPT4All software is optimized to run inference of 3-13 billion parameter large language models on the CPUs of laptops, desktops and servers.\n\n=== \"GPT4All Example\"\n    ``` py\n    from gpt4all import GPT4All\n    model = GPT4All(\"orca-mini-3b-gguf2-q4_0.gguf\")\n    output = model.generate(\"The capital of France is \", max_tokens=3)\n    print(output)\n    ```\n=== \"Output\"\n    ```\n    1. Paris\n    ```\nSee [Python Bindings](gpt4all_python.md) to use GPT4All.\n\n### Navigating the Documentation\nIn an effort to ensure cross-operating-system and cross-language compatibility, the [GPT4All software ecosystem](https://github.com/nomic-ai/gpt4all)\nis organized as a monorepo with the following structure:\n\n- **gpt4all-backend**: The GPT4All backend maintains and exposes a universal, performance optimized C API for running inference with multi-billion parameter Transformer Decoders.\nThis C API is then bound to any higher level programming language such as C++, Python, Go, etc.\n- **gpt4all-bindings**: GPT4All bindings contain a variety of high-level programming languages that implement the C API. Each directory is a bound programming language. The [CLI](gpt4all_cli.md) is included here, as well.\n- **gpt4all-chat**: GPT4All Chat is an OS native chat application that runs on macOS, Windows and Linux. It is the easiest way to run local, privacy aware chat assistants on everyday hardware. You can download it on the [GPT4All Website](https://gpt4all.io) and read its source code in the monorepo.\n\nExplore detailed documentation for the backend, bindings and chat client in the sidebar.\n## Models\nThe GPT4All software ecosystem is compatible with the following Transformer architectures:\n\n- `Falcon`\n- `LLaMA` (including `OpenLLaMA`)\n- `MPT` (including `Replit`)\n- `GPT-J`\n\nYou can find an exhaustive list of supported models on the [website](https://gpt4all.io) or in the [models directory](https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json)\n\n\nGPT4All models are artifacts produced through a process known as neural network quantization.\nA multi-billion parameter Transformer Decoder usually takes 30+ GB of VRAM to execute a forward pass.\nMost people do not have such a powerful computer or access to GPU hardware. By running trained LLMs through quantization algorithms, \nsome GPT4All models can run on your laptop using only 4-8GB of RAM enabling their wide-spread usage.\nBigger models might still require more RAM, however.\n\nAny model trained with one of these architectures can be quantized and run locally with all GPT4All bindings and in the\nchat client. You can add new variants by contributing to the gpt4all-backend.\n\n## Frequently Asked Questions\nFind answers to frequently asked questions by searching the [Github issues](https://github.com/nomic-ai/gpt4all/issues) or in the [documentation FAQ](gpt4all_faq.md).\n\n## Getting the most of your local LLM\n\n**Inference Speed**\nof a local LLM depends on two factors: model size and the number of tokens given as input. \nIt is not advised to prompt local LLMs with large chunks of context as their inference speed will heavily degrade.\nYou will likely want to run GPT4All models on GPU if you would like to utilize context windows larger than 750 tokens. Native GPU support for GPT4All models is planned.\n\n**Inference Performance:**\nWhich model is best? That question depends on your use-case. The ability of an LLM to faithfully follow instructions is conditioned\non the quantity and diversity of the pre-training data it trained on and the diversity, quality and factuality of the data the LLM\nwas fine-tuned on. A goal of GPT4All is to bring the most powerful local assistant model to your desktop and Nomic AI is actively\nworking on efforts to improve their performance and quality.\n"
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/__init__.py",
    "content": "from .gpt4all import CancellationError as CancellationError, Embed4All as Embed4All, GPT4All as GPT4All\n"
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/_pyllmodel.py",
    "content": "from __future__ import annotations\n\nimport ctypes\nimport os\nimport platform\nimport subprocess\nimport sys\nimport textwrap\nimport threading\nfrom enum import Enum\nfrom queue import Queue\nfrom typing import TYPE_CHECKING, Any, Callable, Generic, Iterable, Iterator, Literal, NoReturn, TypeVar, overload\n\nif sys.version_info >= (3, 9):\n    import importlib.resources as importlib_resources\nelse:\n    import importlib_resources\n\nif (3, 9) <= sys.version_info < (3, 11):\n    # python 3.9 broke generic TypedDict, python 3.11 fixed it\n    from typing_extensions import TypedDict\nelse:\n    from typing import TypedDict\n\nif TYPE_CHECKING:\n    from typing_extensions import ParamSpec, TypeAlias\n    T = TypeVar(\"T\")\n    P = ParamSpec(\"P\")\n\nEmbeddingsType = TypeVar('EmbeddingsType', bound='list[Any]')\n\ncuda_found: bool = False\n\n\n# TODO(jared): use operator.call after we drop python 3.10 support\ndef _operator_call(obj: Callable[P, T], /, *args: P.args, **kwargs: P.kwargs) -> T:\n    return obj(*args, **kwargs)\n\n\n# Detect Rosetta 2\n@_operator_call\ndef check_rosetta() -> None:\n    if platform.system() == \"Darwin\" and platform.processor() == \"i386\":\n        p = subprocess.run(\"sysctl -n sysctl.proc_translated\".split(), capture_output=True, text=True)\n        if p.returncode == 0 and p.stdout.strip() == \"1\":\n            raise RuntimeError(textwrap.dedent(\"\"\"\\\n                Running GPT4All under Rosetta is not supported due to CPU feature requirements.\n                Please install GPT4All in an environment that uses a native ARM64 Python interpreter.\n            \"\"\").strip())\n\n\n# Check for C++ runtime libraries\nif platform.system() == \"Windows\":\n    try:\n        ctypes.CDLL(\"msvcp140.dll\")\n        ctypes.CDLL(\"vcruntime140.dll\")\n        ctypes.CDLL(\"vcruntime140_1.dll\")\n    except OSError as e:\n        print(textwrap.dedent(f\"\"\"\\\n            {e!r}\n            The Microsoft Visual C++ runtime libraries were not found. Please install them from\n            https://aka.ms/vs/17/release/vc_redist.x64.exe\n        \"\"\"), file=sys.stderr)\n\n\n@_operator_call\ndef find_cuda() -> None:\n    global cuda_found\n\n    def _load_cuda(rtver: str, blasver: str) -> None:\n        if platform.system() == \"Linux\":\n            cudalib   = f\"lib/libcudart.so.{rtver}\"\n            cublaslib = f\"lib/libcublas.so.{blasver}\"\n        else:  # Windows\n            cudalib   = fr\"bin\\cudart64_{rtver.replace('.', '')}.dll\"\n            cublaslib = fr\"bin\\cublas64_{blasver}.dll\"\n\n        # preload the CUDA libs so the backend can find them\n        ctypes.CDLL(os.path.join(cuda_runtime.__path__[0], cudalib), mode=ctypes.RTLD_GLOBAL)\n        ctypes.CDLL(os.path.join(cublas.__path__[0], cublaslib), mode=ctypes.RTLD_GLOBAL)\n\n    # Find CUDA libraries from the official packages\n    if platform.system() in (\"Linux\", \"Windows\"):\n        try:\n            from nvidia import cuda_runtime, cublas\n        except ImportError:\n            pass  # CUDA is optional\n        else:\n            for rtver, blasver in [(\"12\", \"12\"), (\"11.0\", \"11\")]:\n                try:\n                    _load_cuda(rtver, blasver)\n                    cuda_found = True\n                except OSError:  # dlopen() does not give specific error codes\n                    pass  # try the next one\n\n\n# TODO: provide a config file to make this more robust\nMODEL_LIB_PATH = importlib_resources.files(\"gpt4all\") / \"llmodel_DO_NOT_MODIFY\" / \"build\"\n\n\ndef load_llmodel_library():\n    ext = {\"Darwin\": \"dylib\", \"Linux\": \"so\", \"Windows\": \"dll\"}[platform.system()]\n\n    try:\n        # macOS, Linux, MinGW\n        lib = ctypes.CDLL(str(MODEL_LIB_PATH / f\"libllmodel.{ext}\"))\n    except FileNotFoundError:\n        if ext != 'dll':\n            raise\n        # MSVC\n        lib = ctypes.CDLL(str(MODEL_LIB_PATH / \"llmodel.dll\"))\n\n    return lib\n\n\nllmodel = load_llmodel_library()\n\n\nclass LLModelPromptContext(ctypes.Structure):\n    _fields_ = [\n        (\"n_predict\",      ctypes.c_int32),\n        (\"top_k\",          ctypes.c_int32),\n        (\"top_p\",          ctypes.c_float),\n        (\"min_p\",          ctypes.c_float),\n        (\"temp\",           ctypes.c_float),\n        (\"n_batch\",        ctypes.c_int32),\n        (\"repeat_penalty\", ctypes.c_float),\n        (\"repeat_last_n\",  ctypes.c_int32),\n        (\"context_erase\",  ctypes.c_float),\n    ]\n\n\nclass LLModelGPUDevice(ctypes.Structure):\n    _fields_ = [\n        (\"backend\", ctypes.c_char_p),\n        (\"index\", ctypes.c_int32),\n        (\"type\", ctypes.c_int32),\n        (\"heapSize\", ctypes.c_size_t),\n        (\"name\", ctypes.c_char_p),\n        (\"vendor\", ctypes.c_char_p),\n    ]\n\n\n# Define C function signatures using ctypes\nllmodel.llmodel_model_create.argtypes = [ctypes.c_char_p]\nllmodel.llmodel_model_create.restype = ctypes.c_void_p\n\nllmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)]\nllmodel.llmodel_model_create2.restype = ctypes.c_void_p\n\nllmodel.llmodel_model_destroy.argtypes = [ctypes.c_void_p]\nllmodel.llmodel_model_destroy.restype = None\n\nllmodel.llmodel_loadModel.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]\nllmodel.llmodel_loadModel.restype = ctypes.c_bool\nllmodel.llmodel_required_mem.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]\nllmodel.llmodel_required_mem.restype = ctypes.c_size_t\nllmodel.llmodel_isModelLoaded.argtypes = [ctypes.c_void_p]\nllmodel.llmodel_isModelLoaded.restype = ctypes.c_bool\n\nPromptCallback       = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int32), ctypes.c_size_t, ctypes.c_bool)\nResponseCallback     = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_int32, ctypes.c_char_p)\nEmbCancelCallback    = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_uint), ctypes.c_uint, ctypes.c_char_p)\nSpecialTokenCallback = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p)\n\nllmodel.llmodel_prompt.argtypes = [\n    ctypes.c_void_p,\n    ctypes.c_char_p,\n    PromptCallback,\n    ResponseCallback,\n    ctypes.POINTER(LLModelPromptContext),\n    ctypes.POINTER(ctypes.c_char_p),\n]\n\nllmodel.llmodel_prompt.restype = ctypes.c_bool\n\nllmodel.llmodel_embed.argtypes = [\n    ctypes.c_void_p,\n    ctypes.POINTER(ctypes.c_char_p),\n    ctypes.POINTER(ctypes.c_size_t),\n    ctypes.c_char_p,\n    ctypes.c_int,\n    ctypes.POINTER(ctypes.c_size_t),\n    ctypes.c_bool,\n    ctypes.c_bool,\n    EmbCancelCallback,\n    ctypes.POINTER(ctypes.c_char_p),\n]\n\nllmodel.llmodel_embed.restype = ctypes.POINTER(ctypes.c_float)\n\nllmodel.llmodel_free_embedding.argtypes = [ctypes.POINTER(ctypes.c_float)]\nllmodel.llmodel_free_embedding.restype = None\n\nllmodel.llmodel_setThreadCount.argtypes = [ctypes.c_void_p, ctypes.c_int32]\nllmodel.llmodel_setThreadCount.restype = None\n\nllmodel.llmodel_set_implementation_search_path.argtypes = [ctypes.c_char_p]\nllmodel.llmodel_set_implementation_search_path.restype = None\n\nllmodel.llmodel_threadCount.argtypes = [ctypes.c_void_p]\nllmodel.llmodel_threadCount.restype = ctypes.c_int32\n\nllmodel.llmodel_set_implementation_search_path(str(MODEL_LIB_PATH).encode())\n\nllmodel.llmodel_available_gpu_devices.argtypes = [ctypes.c_size_t, ctypes.POINTER(ctypes.c_int32)]\nllmodel.llmodel_available_gpu_devices.restype = ctypes.POINTER(LLModelGPUDevice)\n\nllmodel.llmodel_gpu_init_gpu_device_by_string.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_char_p]\nllmodel.llmodel_gpu_init_gpu_device_by_string.restype = ctypes.c_bool\n\nllmodel.llmodel_gpu_init_gpu_device_by_struct.argtypes = [ctypes.c_void_p, ctypes.POINTER(LLModelGPUDevice)]\nllmodel.llmodel_gpu_init_gpu_device_by_struct.restype = ctypes.c_bool\n\nllmodel.llmodel_gpu_init_gpu_device_by_int.argtypes = [ctypes.c_void_p, ctypes.c_int32]\nllmodel.llmodel_gpu_init_gpu_device_by_int.restype = ctypes.c_bool\n\nllmodel.llmodel_model_backend_name.argtypes = [ctypes.c_void_p]\nllmodel.llmodel_model_backend_name.restype = ctypes.c_char_p\n\nllmodel.llmodel_model_gpu_device_name.argtypes = [ctypes.c_void_p]\nllmodel.llmodel_model_gpu_device_name.restype = ctypes.c_char_p\n\nllmodel.llmodel_count_prompt_tokens.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)]\nllmodel.llmodel_count_prompt_tokens.restype = ctypes.c_int32\n\nllmodel.llmodel_model_foreach_special_token.argtypes = [ctypes.c_void_p, SpecialTokenCallback]\nllmodel.llmodel_model_foreach_special_token.restype = None\n\nResponseCallbackType = Callable[[int, str], bool]\nRawResponseCallbackType = Callable[[int, bytes], bool]\nEmbCancelCallbackType: TypeAlias = 'Callable[[list[int], str], bool]'\n\n\ndef empty_response_callback(token_id: int, response: str) -> bool:\n    return True\n\n\n# Symbol to terminate from generator\nclass Sentinel(Enum):\n    TERMINATING_SYMBOL = 0\n\n\nclass EmbedResult(Generic[EmbeddingsType], TypedDict):\n    embeddings: EmbeddingsType\n    n_prompt_tokens: int\n\n\nclass CancellationError(Exception):\n    \"\"\"raised when embedding is canceled\"\"\"\n\n\nclass LLModel:\n    \"\"\"\n    Base class and universal wrapper for GPT4All language models\n    built around llmodel C-API.\n\n    Parameters\n    ----------\n    model_path : str\n        Path to the model.\n    n_ctx : int\n        Maximum size of context window\n    ngl : int\n        Number of GPU layers to use (Vulkan)\n    backend : str\n        Backend to use. One of 'auto', 'cpu', 'metal', 'kompute', or 'cuda'.\n    \"\"\"\n\n    def __init__(self, model_path: str, n_ctx: int, ngl: int, backend: str):\n        self.model_path = model_path.encode()\n        self.n_ctx = n_ctx\n        self.ngl = ngl\n        self.buffer = bytearray()\n        self.buff_expecting_cont_bytes: int = 0\n\n        # Construct a model implementation\n        err = ctypes.c_char_p()\n        model = llmodel.llmodel_model_create2(self.model_path, backend.encode(), ctypes.byref(err))\n        if model is None:\n            s = err.value\n            errmsg = 'null' if s is None else s.decode()\n\n            if (\n                backend == 'cuda'\n                and not cuda_found\n                and errmsg.startswith('Could not find any implementations for backend')\n            ):\n                print('WARNING: CUDA runtime libraries not found. Try `pip install \"gpt4all[cuda]\"`\\n', file=sys.stderr)\n\n            raise RuntimeError(f\"Unable to instantiate model: {errmsg}\")\n        self.model: ctypes.c_void_p | None = model\n        self.special_tokens_map: dict[str, str] = {}\n        llmodel.llmodel_model_foreach_special_token(\n            self.model, lambda n, t: self.special_tokens_map.__setitem__(n.decode(), t.decode()),\n        )\n\n    def __del__(self, llmodel=llmodel):\n        if hasattr(self, 'model'):\n            self.close()\n\n    def close(self) -> None:\n        if self.model is not None:\n            llmodel.llmodel_model_destroy(self.model)\n            self.model = None\n\n    def _raise_closed(self) -> NoReturn:\n        raise ValueError(\"Attempted operation on a closed LLModel\")\n\n    @property\n    def backend(self) -> Literal[\"cpu\", \"kompute\", \"cuda\", \"metal\"]:\n        if self.model is None:\n            self._raise_closed()\n        return llmodel.llmodel_model_backend_name(self.model).decode()\n\n    @property\n    def device(self) -> str | None:\n        if self.model is None:\n            self._raise_closed()\n        dev = llmodel.llmodel_model_gpu_device_name(self.model)\n        return None if dev is None else dev.decode()\n\n    def count_prompt_tokens(self, prompt: str) -> int:\n        if self.model is None:\n            self._raise_closed()\n        err = ctypes.c_char_p()\n        n_tok = llmodel.llmodel_count_prompt_tokens(self.model, prompt, ctypes.byref(err))\n        if n_tok < 0:\n            s = err.value\n            errmsg = 'null' if s is None else s.decode()\n            raise RuntimeError(f'Unable to count prompt tokens: {errmsg}')\n        return n_tok\n\n    llmodel.llmodel_count_prompt_tokens.argtypes = [ctypes.c_void_p, ctypes.c_char_p]\n\n    @staticmethod\n    def list_gpus(mem_required: int = 0) -> list[str]:\n        \"\"\"\n        List the names of the available GPU devices with at least `mem_required` bytes of VRAM.\n\n        Args:\n            mem_required: The minimum amount of VRAM, in bytes\n\n        Returns:\n            A list of strings representing the names of the available GPU devices.\n        \"\"\"\n        num_devices = ctypes.c_int32(0)\n        devices_ptr = llmodel.llmodel_available_gpu_devices(mem_required, ctypes.byref(num_devices))\n        if not devices_ptr:\n            raise ValueError(\"Unable to retrieve available GPU devices\")\n        return [f'{d.backend.decode()}:{d.name.decode()}' for d in devices_ptr[:num_devices.value]]\n\n    def init_gpu(self, device: str):\n        if self.model is None:\n            self._raise_closed()\n\n        mem_required = llmodel.llmodel_required_mem(self.model, self.model_path, self.n_ctx, self.ngl)\n\n        if llmodel.llmodel_gpu_init_gpu_device_by_string(self.model, mem_required, device.encode()):\n            return\n\n        all_gpus = self.list_gpus()\n        available_gpus = self.list_gpus(mem_required)\n        unavailable_gpus = [g for g in all_gpus if g not in available_gpus]\n\n        error_msg = (f\"Unable to initialize model on GPU: {device!r}\" +\n                     f\"\\nAvailable GPUs: {available_gpus}\")\n        if unavailable_gpus:\n            error_msg += f\"\\nUnavailable GPUs due to insufficient memory: {unavailable_gpus}\"\n        raise ValueError(error_msg)\n\n    def load_model(self) -> bool:\n        \"\"\"\n        Load model from a file.\n\n        Returns\n        -------\n        True if model loaded successfully, False otherwise\n        \"\"\"\n        if self.model is None:\n            self._raise_closed()\n\n        return llmodel.llmodel_loadModel(self.model, self.model_path, self.n_ctx, self.ngl)\n\n    def set_thread_count(self, n_threads):\n        if self.model is None:\n            self._raise_closed()\n        if not llmodel.llmodel_isModelLoaded(self.model):\n            raise Exception(\"Model not loaded\")\n        llmodel.llmodel_setThreadCount(self.model, n_threads)\n\n    def thread_count(self):\n        if self.model is None:\n            self._raise_closed()\n        if not llmodel.llmodel_isModelLoaded(self.model):\n            raise Exception(\"Model not loaded\")\n        return llmodel.llmodel_threadCount(self.model)\n\n    @overload\n    def generate_embeddings(\n        self, text: str, prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,\n        cancel_cb: EmbCancelCallbackType | None,\n    ) -> EmbedResult[list[float]]: ...\n    @overload\n    def generate_embeddings(\n        self, text: list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,\n        cancel_cb: EmbCancelCallbackType | None,\n    ) -> EmbedResult[list[list[float]]]: ...\n    @overload\n    def generate_embeddings(\n        self, text: str | list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,\n        cancel_cb: EmbCancelCallbackType | None,\n    ) -> EmbedResult[list[Any]]: ...\n\n    def generate_embeddings(\n        self, text: str | list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,\n        cancel_cb: EmbCancelCallbackType | None,\n    ) -> EmbedResult[list[Any]]:\n        if not text:\n            raise ValueError(\"text must not be None or empty\")\n\n        if self.model is None:\n            self._raise_closed()\n\n        if single_text := isinstance(text, str):\n            text = [text]\n\n        # prepare input\n        embedding_size = ctypes.c_size_t()\n        token_count = ctypes.c_size_t()\n        error = ctypes.c_char_p()\n        c_prefix = ctypes.c_char_p() if prefix is None else prefix.encode()\n        c_texts = (ctypes.c_char_p * (len(text) + 1))()\n        for i, t in enumerate(text):\n            c_texts[i] = t.encode()\n\n        def wrap_cancel_cb(batch_sizes: Any, n_batch: int, backend: bytes) -> bool:\n            assert cancel_cb is not None\n            return cancel_cb(batch_sizes[:n_batch], backend.decode())\n\n        cancel_cb_wrapper = EmbCancelCallback() if cancel_cb is None else EmbCancelCallback(wrap_cancel_cb)\n\n        # generate the embeddings\n        embedding_ptr = llmodel.llmodel_embed(\n            self.model, c_texts, ctypes.byref(embedding_size), c_prefix, dimensionality, ctypes.byref(token_count),\n            do_mean, atlas, cancel_cb_wrapper, ctypes.byref(error),\n        )\n\n        if not embedding_ptr:\n            msg = \"(unknown error)\" if error.value is None else error.value.decode()\n            if msg == \"operation was canceled\":\n                raise CancellationError(msg)\n            raise RuntimeError(f'Failed to generate embeddings: {msg}')\n\n        # extract output\n        n_embd = embedding_size.value // len(text)\n        embedding_array = [\n            embedding_ptr[i:i + n_embd]\n            for i in range(0, embedding_size.value, n_embd)\n        ]\n        llmodel.llmodel_free_embedding(embedding_ptr)\n\n        embeddings = embedding_array[0] if single_text else embedding_array\n        return {'embeddings': embeddings, 'n_prompt_tokens': token_count.value}\n\n    def prompt_model(\n        self,\n        prompt          : str,\n        callback        : ResponseCallbackType,\n        n_predict       : int                  = 4096,\n        top_k           : int                  = 40,\n        top_p           : float                = 0.9,\n        min_p           : float                = 0.0,\n        temp            : float                = 0.1,\n        n_batch         : int                  = 8,\n        repeat_penalty  : float                = 1.2,\n        repeat_last_n   : int                  = 10,\n        context_erase   : float                = 0.75,\n        reset_context   : bool                 = False,\n    ):\n        \"\"\"\n        Generate response from model from a prompt.\n\n        Parameters\n        ----------\n        prompt: str\n            Question, task, or conversation for model to respond to\n        callback(token_id:int, response:str): bool\n            The model sends response tokens to callback\n\n        Returns\n        -------\n        None\n        \"\"\"\n\n        if self.model is None:\n            self._raise_closed()\n\n        self.buffer.clear()\n        self.buff_expecting_cont_bytes = 0\n\n        context = LLModelPromptContext(\n            n_predict      = n_predict,\n            top_k          = top_k,\n            top_p          = top_p,\n            min_p          = min_p,\n            temp           = temp,\n            n_batch        = n_batch,\n            repeat_penalty = repeat_penalty,\n            repeat_last_n  = repeat_last_n,\n            context_erase  = context_erase,\n        )\n\n        error_msg: bytes | None = None\n        def error_callback(msg: bytes) -> None:\n            nonlocal error_msg\n            error_msg = msg\n\n        err = ctypes.c_char_p()\n        if not llmodel.llmodel_prompt(\n            self.model,\n            ctypes.c_char_p(prompt.encode()),\n            PromptCallback(self._prompt_callback),\n            ResponseCallback(self._callback_decoder(callback)),\n            context,\n            ctypes.byref(err),\n        ):\n            s = err.value\n            raise RuntimeError(f\"prompt error: {'null' if s is None else s.decode()}\")\n\n    def prompt_model_streaming(\n        self, prompt: str, callback: ResponseCallbackType = empty_response_callback, **kwargs: Any,\n    ) -> Iterator[str]:\n        if self.model is None:\n            self._raise_closed()\n\n        output_queue: Queue[str | Sentinel] = Queue()\n\n        # Put response tokens into an output queue\n        def _generator_callback_wrapper(callback: ResponseCallbackType) -> ResponseCallbackType:\n            def _generator_callback(token_id: int, response: str):\n                nonlocal callback\n\n                if callback(token_id, response):\n                    output_queue.put(response)\n                    return True\n\n                return False\n\n            return _generator_callback\n\n        def run_llmodel_prompt(prompt: str, callback: ResponseCallbackType, **kwargs):\n            self.prompt_model(prompt, callback, **kwargs)\n            output_queue.put(Sentinel.TERMINATING_SYMBOL)\n\n        # Kick off llmodel_prompt in separate thread so we can return generator\n        # immediately\n        thread = threading.Thread(\n            target=run_llmodel_prompt,\n            args=(prompt, _generator_callback_wrapper(callback)),\n            kwargs=kwargs,\n        )\n        thread.start()\n\n        # Generator\n        while True:\n            response = output_queue.get()\n            if isinstance(response, Sentinel):\n                break\n            yield response\n\n    def _callback_decoder(self, callback: ResponseCallbackType) -> RawResponseCallbackType:\n        def _raw_callback(token_id: int, response: bytes) -> bool:\n            nonlocal self, callback\n\n            decoded = []\n\n            for byte in response:\n\n                bits = \"{:08b}\".format(byte)\n                (high_ones, _, _) = bits.partition('0')\n\n                if len(high_ones) == 1:\n                    # continuation byte\n                    self.buffer.append(byte)\n                    self.buff_expecting_cont_bytes -= 1\n\n                else:\n                    # beginning of a byte sequence\n                    if len(self.buffer) > 0:\n                        decoded.append(self.buffer.decode(errors='replace'))\n\n                        self.buffer.clear()\n\n                    self.buffer.append(byte)\n                    self.buff_expecting_cont_bytes = max(0, len(high_ones) - 1)\n\n                if self.buff_expecting_cont_bytes <= 0:\n                    # received the whole sequence or an out of place continuation byte\n                    decoded.append(self.buffer.decode(errors='replace'))\n\n                    self.buffer.clear()\n                    self.buff_expecting_cont_bytes = 0\n\n            if len(decoded) == 0 and self.buff_expecting_cont_bytes > 0:\n                # wait for more continuation bytes\n                return True\n\n            return callback(token_id, ''.join(decoded))\n\n        return _raw_callback\n\n    # Empty prompt callback\n    @staticmethod\n    def _prompt_callback(token_ids: ctypes._Pointer[ctypes.c_int32], n_token_ids: int, cached: bool) -> bool:\n        return True\n"
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/gpt4all.py",
    "content": "\"\"\"\nPython only API for running all GPT4All models.\n\"\"\"\nfrom __future__ import annotations\n\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom pathlib import Path\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Any, Iterable, Iterator, Literal, NamedTuple, NoReturn, Protocol, TypedDict, overload\n\nimport jinja2\nimport requests\nfrom jinja2.sandbox import ImmutableSandboxedEnvironment\nfrom requests.exceptions import ChunkedEncodingError\nfrom tqdm import tqdm\nfrom urllib3.exceptions import IncompleteRead, ProtocolError\n\nfrom ._pyllmodel import (CancellationError as CancellationError, EmbCancelCallbackType, EmbedResult as EmbedResult,\n                         LLModel, ResponseCallbackType, _operator_call, empty_response_callback)\n\nif TYPE_CHECKING:\n    from typing_extensions import Self, TypeAlias\n\nif sys.platform == \"darwin\":\n    import fcntl\n\n# TODO: move to config\nDEFAULT_MODEL_DIRECTORY = Path.home() / \".cache\" / \"gpt4all\"\n\nConfigType: TypeAlias = \"dict[str, Any]\"\n\n# Environment setup adapted from HF transformers\n@_operator_call\ndef _jinja_env() -> ImmutableSandboxedEnvironment:\n    def raise_exception(message: str) -> NoReturn:\n        raise jinja2.exceptions.TemplateError(message)\n\n    def tojson(obj: Any, indent: int | None = None) -> str:\n        return json.dumps(obj, ensure_ascii=False, indent=indent)\n\n    def strftime_now(fmt: str) -> str:\n        return datetime.now().strftime(fmt)\n\n    env = ImmutableSandboxedEnvironment(trim_blocks=True, lstrip_blocks=True)\n    env.filters[\"tojson\"         ] = tojson\n    env.globals[\"raise_exception\"] = raise_exception\n    env.globals[\"strftime_now\"   ] = strftime_now\n    return env\n\n\nclass MessageType(TypedDict):\n    role: str\n    content: str\n\n\nclass ChatSession(NamedTuple):\n    template: jinja2.Template\n    history: list[MessageType]\n\n\nclass Embed4All:\n    \"\"\"\n    Python class that handles embeddings for GPT4All.\n    \"\"\"\n\n    MIN_DIMENSIONALITY = 64\n\n    def __init__(self, model_name: str | None = None, *, n_threads: int | None = None, device: str | None = None, **kwargs: Any):\n        \"\"\"\n        Constructor\n\n        Args:\n            n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.\n            device: The processing unit on which the embedding model will run. See the `GPT4All` constructor for more info.\n            kwargs: Remaining keyword arguments are passed to the `GPT4All` constructor.\n        \"\"\"\n        if model_name is None:\n            model_name = \"all-MiniLM-L6-v2.gguf2.f16.gguf\"\n        self.gpt4all = GPT4All(model_name, n_threads=n_threads, device=device, **kwargs)\n\n    def __enter__(self) -> Self:\n        return self\n\n    def __exit__(\n        self, typ: type[BaseException] | None, value: BaseException | None, tb: TracebackType | None,\n    ) -> None:\n        self.close()\n\n    def close(self) -> None:\n        \"\"\"Delete the model instance and free associated system resources.\"\"\"\n        self.gpt4all.close()\n\n    # return_dict=False\n    @overload\n    def embed(\n        self, text: str, *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,\n        return_dict: Literal[False] = ..., atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> list[float]: ...\n    @overload\n    def embed(\n        self, text: list[str], *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,\n        return_dict: Literal[False] = ..., atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> list[list[float]]: ...\n    @overload\n    def embed(\n        self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,\n        long_text_mode: str = ..., return_dict: Literal[False] = ..., atlas: bool = ...,\n        cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> list[Any]: ...\n\n    # return_dict=True\n    @overload\n    def embed(\n        self, text: str, *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,\n        return_dict: Literal[True], atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> EmbedResult[list[float]]: ...\n    @overload\n    def embed(\n        self, text: list[str], *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,\n        return_dict: Literal[True], atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> EmbedResult[list[list[float]]]: ...\n    @overload\n    def embed(\n        self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,\n        long_text_mode: str = ..., return_dict: Literal[True], atlas: bool = ...,\n        cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> EmbedResult[list[Any]]: ...\n\n    # return type unknown\n    @overload\n    def embed(\n        self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,\n        long_text_mode: str = ..., return_dict: bool = ..., atlas: bool = ...,\n        cancel_cb: EmbCancelCallbackType | None = ...,\n    ) -> Any: ...\n\n    def embed(\n        self, text: str | list[str], *, prefix: str | None = None, dimensionality: int | None = None,\n        long_text_mode: str = \"mean\", return_dict: bool = False, atlas: bool = False,\n        cancel_cb: EmbCancelCallbackType | None = None,\n    ) -> Any:\n        \"\"\"\n        Generate one or more embeddings.\n\n        Args:\n            text: A text or list of texts to generate embeddings for.\n            prefix: The model-specific prefix representing the embedding task, without the trailing colon. For Nomic\n                Embed, this can be `search_query`, `search_document`, `classification`, or `clustering`. Defaults to\n                `search_document` or equivalent if known; otherwise, you must explicitly pass a prefix or an empty\n                string if none applies.\n            dimensionality: The embedding dimension, for use with Matryoshka-capable models. Defaults to full-size.\n            long_text_mode: How to handle texts longer than the model can accept. One of `mean` or `truncate`.\n            return_dict: Return the result as a dict that includes the number of prompt tokens processed.\n            atlas: Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens\n                with long_text_mode=\"mean\" will raise an error. Disabled by default.\n            cancel_cb: Called with arguments (batch_sizes, backend_name). Return true to cancel embedding.\n\n        Returns:\n            With return_dict=False, an embedding or list of embeddings of your text(s).\n            With return_dict=True, a dict with keys 'embeddings' and 'n_prompt_tokens'.\n\n        Raises:\n            CancellationError: If cancel_cb returned True and embedding was canceled.\n        \"\"\"\n        if dimensionality is None:\n            dimensionality = -1\n        else:\n            if dimensionality <= 0:\n                raise ValueError(f\"Dimensionality must be None or a positive integer, got {dimensionality}\")\n            if dimensionality < self.MIN_DIMENSIONALITY:\n                warnings.warn(\n                    f\"Dimensionality {dimensionality} is less than the suggested minimum of {self.MIN_DIMENSIONALITY}.\"\n                    \" Performance may be degraded.\"\n                )\n        try:\n            do_mean = {\"mean\": True, \"truncate\": False}[long_text_mode]\n        except KeyError:\n            raise ValueError(f\"Long text mode must be one of 'mean' or 'truncate', got {long_text_mode!r}\")\n        result = self.gpt4all.model.generate_embeddings(text, prefix, dimensionality, do_mean, atlas, cancel_cb)\n        return result if return_dict else result[\"embeddings\"]\n\n\nclass GPT4All:\n    \"\"\"\n    Python class that handles instantiation, downloading, generation and chat with GPT4All models.\n    \"\"\"\n\n    def __init__(\n        self,\n        model_name: str,\n        *,\n        model_path: str | os.PathLike[str] | None = None,\n        model_type: str | None = None,\n        allow_download: bool = True,\n        n_threads: int | None = None,\n        device: str | None = None,\n        n_ctx: int = 2048,\n        ngl: int = 100,\n        verbose: bool = False,\n    ):\n        \"\"\"\n        Constructor\n\n        Args:\n            model_name: Name of GPT4All or custom model. Including \".gguf\" file extension is optional but encouraged.\n            model_path: Path to directory containing model file or, if file does not exist, where to download model.\n                Default is None, in which case models will be stored in `~/.cache/gpt4all/`.\n            model_type: Model architecture. This argument currently does not have any functionality and is just used as\n                descriptive identifier for user. Default is None.\n            allow_download: Allow API to download models from gpt4all.io. Default is True.\n            n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.\n            device: The processing unit on which the GPT4All model will run. It can be set to:\n                - \"cpu\": Model will run on the central processing unit.\n                - \"gpu\": Use Metal on ARM64 macOS, otherwise the same as \"kompute\".\n                - \"kompute\": Use the best GPU provided by the Kompute backend.\n                - \"cuda\": Use the best GPU provided by the CUDA backend.\n                - \"amd\", \"nvidia\": Use the best GPU provided by the Kompute backend from this vendor.\n                - A specific device name from the list returned by `GPT4All.list_gpus()`.\n                Default is Metal on ARM64 macOS, \"cpu\" otherwise.\n\n                Note: If a selected GPU device does not have sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the model.\n            n_ctx: Maximum size of context window\n            ngl: Number of GPU layers to use (Vulkan)\n            verbose: If True, print debug messages.\n        \"\"\"\n\n        self.model_type = model_type\n        self._chat_session: ChatSession | None = None\n\n        device_init = None\n        if sys.platform == \"darwin\":\n            if device is None:\n                backend = \"auto\"  # \"auto\" is effectively \"metal\" due to currently non-functional fallback\n            elif device == \"cpu\":\n                backend = \"cpu\"\n            else:\n                if platform.machine() != \"arm64\" or device != \"gpu\":\n                    raise ValueError(f\"Unknown device for this platform: {device}\")\n                backend = \"metal\"\n        else:\n            backend = \"kompute\"\n            if device is None or device == \"cpu\":\n                pass  # use kompute with no device\n            elif device in (\"cuda\", \"kompute\"):\n                backend = device\n                device_init = \"gpu\"\n            elif device.startswith(\"cuda:\"):\n                backend = \"cuda\"\n                device_init = _remove_prefix(device, \"cuda:\")\n            else:\n                device_init = _remove_prefix(device, \"kompute:\")\n\n        # Retrieve model and download if allowed\n        self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)\n        self.model = LLModel(self.config[\"path\"], n_ctx, ngl, backend)\n        if device_init is not None:\n            self.model.init_gpu(device_init)\n        self.model.load_model()\n        # Set n_threads\n        if n_threads is not None:\n            self.model.set_thread_count(n_threads)\n\n    def __enter__(self) -> Self:\n        return self\n\n    def __exit__(\n        self, typ: type[BaseException] | None, value: BaseException | None, tb: TracebackType | None,\n    ) -> None:\n        self.close()\n\n    def close(self) -> None:\n        \"\"\"Delete the model instance and free associated system resources.\"\"\"\n        self.model.close()\n\n    @property\n    def backend(self) -> Literal[\"cpu\", \"kompute\", \"cuda\", \"metal\"]:\n        \"\"\"The name of the llama.cpp backend currently in use. One of \"cpu\", \"kompute\", \"cuda\", or \"metal\".\"\"\"\n        return self.model.backend\n\n    @property\n    def device(self) -> str | None:\n        \"\"\"The name of the GPU device currently in use, or None for backends other than Kompute or CUDA.\"\"\"\n        return self.model.device\n\n    @property\n    def current_chat_session(self) -> list[MessageType] | None:\n        return None if self._chat_session is None else self._chat_session.history\n\n    @current_chat_session.setter\n    def current_chat_session(self, history: list[MessageType]) -> None:\n        if self._chat_session is None:\n            raise ValueError(\"current_chat_session may only be set when there is an active chat session\")\n        self._chat_session.history[:] = history\n\n    @staticmethod\n    def list_models() -> list[ConfigType]:\n        \"\"\"\n        Fetch model list from https://gpt4all.io/models/models3.json.\n\n        Returns:\n            Model list in JSON format.\n        \"\"\"\n        resp = requests.get(\"https://gpt4all.io/models/models3.json\")\n        if resp.status_code != 200:\n            raise ValueError(f\"Request failed: HTTP {resp.status_code} {resp.reason}\")\n        return resp.json()\n\n    @classmethod\n    def retrieve_model(\n        cls,\n        model_name: str,\n        model_path: str | os.PathLike[str] | None = None,\n        allow_download: bool = True,\n        verbose: bool = False,\n    ) -> ConfigType:\n        \"\"\"\n        Find model file, and if it doesn't exist, download the model.\n\n        Args:\n            model_name: Name of model.\n            model_path: Path to find model. Default is None in which case path is set to\n                ~/.cache/gpt4all/.\n            allow_download: Allow API to download model from gpt4all.io. Default is True.\n            verbose: If True (default), print debug messages.\n\n        Returns:\n            Model config.\n        \"\"\"\n\n        model_filename = append_extension_if_missing(model_name)\n\n        # get the config for the model\n        config: ConfigType = {}\n        if allow_download:\n            models = cls.list_models()\n            if (model := next((m for m in models if m[\"filename\"] == model_filename), None)) is not None:\n                config.update(model)\n\n        # Validate download directory\n        if model_path is None:\n            try:\n                os.makedirs(DEFAULT_MODEL_DIRECTORY, exist_ok=True)\n            except OSError as e:\n                raise RuntimeError(\"Failed to create model download directory\") from e\n            model_path = DEFAULT_MODEL_DIRECTORY\n        else:\n            model_path = Path(model_path)\n\n        if not model_path.exists():\n            raise FileNotFoundError(f\"Model directory does not exist: {model_path!r}\")\n\n        model_dest = model_path / model_filename\n        if model_dest.exists():\n            config[\"path\"] = str(model_dest)\n            if verbose:\n                print(f\"Found model file at {str(model_dest)!r}\", file=sys.stderr)\n        elif allow_download:\n            # If model file does not exist, download\n            filesize = config.get(\"filesize\")\n            config[\"path\"] = str(cls.download_model(\n                model_filename, model_path, verbose=verbose, url=config.get(\"url\"),\n                expected_size=None if filesize is None else int(filesize), expected_md5=config.get(\"md5sum\"),\n            ))\n        else:\n            raise FileNotFoundError(f\"Model file does not exist: {model_dest!r}\")\n\n        return config\n\n    @staticmethod\n    def download_model(\n        model_filename: str,\n        model_path: str | os.PathLike[str],\n        verbose: bool = True,\n        url: str | None = None,\n        expected_size: int | None = None,\n        expected_md5: str | None = None,\n    ) -> str | os.PathLike[str]:\n        \"\"\"\n        Download model from gpt4all.io.\n\n        Args:\n            model_filename: Filename of model (with .gguf extension).\n            model_path: Path to download model to.\n            verbose: If True (default), print debug messages.\n            url: the models remote url (e.g. may be hosted on HF)\n            expected_size: The expected size of the download.\n            expected_md5: The expected MD5 hash of the download.\n\n        Returns:\n            Model file destination.\n        \"\"\"\n\n        # Download model\n        if url is None:\n            url = f\"https://gpt4all.io/models/gguf/{model_filename}\"\n\n        def make_request(offset=None):\n            headers = {}\n            if offset:\n                print(f\"\\nDownload interrupted, resuming from byte position {offset}\", file=sys.stderr)\n                headers[\"Range\"] = f\"bytes={offset}-\"  # resume incomplete response\n                headers[\"Accept-Encoding\"] = \"identity\"  # Content-Encoding changes meaning of ranges\n            response = requests.get(url, stream=True, headers=headers)\n            if response.status_code not in (200, 206):\n                raise ValueError(f\"Request failed: HTTP {response.status_code} {response.reason}\")\n            if offset and (response.status_code != 206 or str(offset) not in response.headers.get(\"Content-Range\", \"\")):\n                raise ValueError(\"Connection was interrupted and server does not support range requests\")\n            if (enc := response.headers.get(\"Content-Encoding\")) is not None:\n                raise ValueError(f\"Expected identity Content-Encoding, got {enc}\")\n            return response\n\n        response = make_request()\n\n        total_size_in_bytes = int(response.headers.get(\"content-length\", 0))\n        block_size = 2**20  # 1 MB\n\n        partial_path = Path(model_path) / (model_filename + \".part\")\n\n        with open(partial_path, \"w+b\") as partf:\n            try:\n                with tqdm(desc=\"Downloading\", total=total_size_in_bytes, unit=\"iB\", unit_scale=True) as progress_bar:\n                    while True:\n                        last_progress = progress_bar.n\n                        try:\n                            for data in response.iter_content(block_size):\n                                partf.write(data)\n                                progress_bar.update(len(data))\n                        except ChunkedEncodingError as cee:\n                            if cee.args and isinstance(pe := cee.args[0], ProtocolError):\n                                if len(pe.args) >= 2 and isinstance(ir := pe.args[1], IncompleteRead):\n                                    assert progress_bar.n <= ir.partial  # urllib3 may be ahead of us but never behind\n                                    # the socket was closed during a read - retry\n                                    response = make_request(progress_bar.n)\n                                    continue\n                            raise\n                        if total_size_in_bytes != 0 and progress_bar.n < total_size_in_bytes:\n                            if progress_bar.n == last_progress:\n                                raise RuntimeError(\"Download not making progress, aborting.\")\n                            # server closed connection prematurely - retry\n                            response = make_request(progress_bar.n)\n                            continue\n                        break\n\n                # verify file integrity\n                file_size = partf.tell()\n                if expected_size is not None and file_size != expected_size:\n                    raise ValueError(f\"Expected file size of {expected_size} bytes, got {file_size}\")\n                if expected_md5 is not None:\n                    partf.seek(0)\n                    hsh = hashlib.md5()\n                    with tqdm(desc=\"Verifying\", total=file_size, unit=\"iB\", unit_scale=True) as bar:\n                        while chunk := partf.read(block_size):\n                            hsh.update(chunk)\n                            bar.update(len(chunk))\n                    if hsh.hexdigest() != expected_md5.lower():\n                        raise ValueError(f\"Expected MD5 hash of {expected_md5!r}, got {hsh.hexdigest()!r}\")\n            except:\n                if verbose:\n                    print(\"Cleaning up the interrupted download...\", file=sys.stderr)\n                try:\n                    os.remove(partial_path)\n                except OSError:\n                    pass\n                raise\n\n            # flush buffers and sync the inode\n            partf.flush()\n            _fsync(partf)\n\n        # move to final destination\n        download_path = Path(model_path) / model_filename\n        try:\n            os.rename(partial_path, download_path)\n        except FileExistsError:\n            try:\n                os.remove(partial_path)\n            except OSError:\n                pass\n            raise\n\n        if verbose:\n            print(f\"Model downloaded to {str(download_path)!r}\", file=sys.stderr)\n        return download_path\n\n    @overload\n    def generate(\n        self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,\n        min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,\n        n_predict: int | None = ..., streaming: Literal[False] = ..., callback: ResponseCallbackType = ...,\n    ) -> str: ...\n    @overload\n    def generate(\n        self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,\n        min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,\n        n_predict: int | None = ..., streaming: Literal[True], callback: ResponseCallbackType = ...,\n    ) -> Iterable[str]: ...\n    @overload\n    def generate(\n        self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,\n        min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,\n        n_predict: int | None = ..., streaming: bool, callback: ResponseCallbackType = ...,\n    ) -> Any: ...\n\n    def generate(\n        self,\n        prompt         : str,\n        *,\n        max_tokens     : int                  = 200,\n        temp           : float                = 0.7,\n        top_k          : int                  = 40,\n        top_p          : float                = 0.4,\n        min_p          : float                = 0.0,\n        repeat_penalty : float                = 1.18,\n        repeat_last_n  : int                  = 64,\n        n_batch        : int                  = 8,\n        n_predict      : int | None           = None,\n        streaming      : bool                 = False,\n        callback       : ResponseCallbackType = empty_response_callback,\n    ) -> Any:\n        \"\"\"\n        Generate outputs from any GPT4All model.\n\n        Args:\n            prompt: The prompt for the model to complete.\n            max_tokens: The maximum number of tokens to generate.\n            temp: The model temperature. Larger values increase creativity but decrease factuality.\n            top_k: Randomly sample from the top_k most likely tokens at each generation step. Set this to 1 for greedy decoding.\n            top_p: Randomly sample at each generation step from the top most likely tokens whose probabilities add up to top_p.\n            min_p: Randomly sample at each generation step from the top most likely tokens whose probabilities are at least min_p.\n            repeat_penalty: Penalize the model for repetition. Higher values result in less repetition.\n            repeat_last_n: How far in the models generation history to apply the repeat penalty.\n            n_batch: Number of prompt tokens processed in parallel. Larger values decrease latency but increase resource requirements.\n            n_predict: Equivalent to max_tokens, exists for backwards compatibility.\n            streaming: If True, this method will instead return a generator that yields tokens as the model generates them.\n            callback: A function with arguments token_id:int and response:str, which receives the tokens from the model as they are generated and stops the generation by returning False.\n\n        Returns:\n            Either the entire completion or a generator that yields the completion token by token.\n        \"\"\"\n\n        # Preparing the model request\n        generate_kwargs: dict[str, Any] = dict(\n            temp           = temp,\n            top_k          = top_k,\n            top_p          = top_p,\n            min_p          = min_p,\n            repeat_penalty = repeat_penalty,\n            repeat_last_n  = repeat_last_n,\n            n_batch        = n_batch,\n            n_predict      = n_predict if n_predict is not None else max_tokens,\n        )\n\n        # Prepare the callback, process the model response\n        full_response = \"\"\n\n        def _callback_wrapper(token_id: int, response: str) -> bool:\n            nonlocal full_response\n            full_response += response\n            return callback(token_id, response)\n\n        last_msg_rendered = prompt\n        if self._chat_session is not None:\n            session = self._chat_session\n            def render(messages: list[MessageType]) -> str:\n                return session.template.render(\n                    messages=messages,\n                    add_generation_prompt=True,\n                    **self.model.special_tokens_map,\n                )\n            session.history.append(MessageType(role=\"user\", content=prompt))\n            prompt = render(session.history)\n            if len(session.history) > 1:\n                last_msg_rendered = render(session.history[-1:])\n\n        # Check request length\n        last_msg_len = self.model.count_prompt_tokens(last_msg_rendered)\n        if last_msg_len > (limit := self.model.n_ctx - 4):\n            raise ValueError(f\"Your message was too long and could not be processed ({last_msg_len} > {limit}).\")\n\n        # Send the request to the model\n        if streaming:\n            def stream() -> Iterator[str]:\n                yield from self.model.prompt_model_streaming(prompt, _callback_wrapper, **generate_kwargs)\n                if self._chat_session is not None:\n                    self._chat_session.history.append(MessageType(role=\"assistant\", content=full_response))\n            return stream()\n\n        self.model.prompt_model(prompt, _callback_wrapper, **generate_kwargs)\n        if self._chat_session is not None:\n            self._chat_session.history.append(MessageType(role=\"assistant\", content=full_response))\n        return full_response\n\n    @contextmanager\n    def chat_session(\n        self,\n        system_message: str | Literal[False] | None = None,\n        chat_template: str | None = None,\n    ):\n        \"\"\"\n        Context manager to hold an inference optimized chat session with a GPT4All model.\n\n        Args:\n            system_message: An initial instruction for the model, None to use the model default, or False to disable. Defaults to None.\n            chat_template: Jinja template for the conversation, or None to use the model default. Defaults to None.\n        \"\"\"\n\n        if system_message is None:\n            system_message = self.config.get(\"systemMessage\", False)\n\n        if chat_template is None:\n            if \"name\" not in self.config:\n                raise ValueError(\"For sideloaded models or with allow_download=False, you must specify a chat template.\")\n            if \"chatTemplate\" not in self.config:\n                raise NotImplementedError(\"This model appears to have a built-in chat template, but loading it is not \"\n                                          \"currently implemented. Please pass a template to chat_session() directly.\")\n            if (tmpl := self.config[\"chatTemplate\"]) is None:\n                raise ValueError(f\"The model {self.config['name']!r} does not support chat.\")\n            chat_template = tmpl\n\n        history = []\n        if system_message is not False:\n            history.append(MessageType(role=\"system\", content=system_message))\n        self._chat_session = ChatSession(\n            template=_jinja_env.from_string(chat_template),\n            history=history,\n        )\n        try:\n            yield self\n        finally:\n            self._chat_session = None\n\n    @staticmethod\n    def list_gpus() -> list[str]:\n        \"\"\"\n        List the names of the available GPU devices.\n\n        Returns:\n            A list of strings representing the names of the available GPU devices.\n        \"\"\"\n        return LLModel.list_gpus()\n\n\ndef append_extension_if_missing(model_name):\n    if not model_name.endswith((\".bin\", \".gguf\")):\n        model_name += \".gguf\"\n    return model_name\n\n\nclass _HasFileno(Protocol):\n    def fileno(self) -> int: ...\n\n\ndef _fsync(fd: int | _HasFileno) -> None:\n    if sys.platform == \"darwin\":\n        # Apple's fsync does not flush the drive write cache\n        try:\n            fcntl.fcntl(fd, fcntl.F_FULLFSYNC)\n        except OSError:\n            pass  # fall back to fsync\n        else:\n            return\n    os.fsync(fd)\n\n\ndef _remove_prefix(s: str, prefix: str) -> str:\n    return s[len(prefix):] if s.startswith(prefix) else s\n"
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/tests/__init__.py",
    "content": ""
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py",
    "content": "#!/usr/bin/env python3\nimport sys\nimport time\nfrom io import StringIO\n\nfrom gpt4all import Embed4All, GPT4All\n\n\ndef time_embedding(i, embedder):\n    text = 'foo bar ' * i\n    start_time = time.time()\n    output = embedder.embed(text)\n    end_time = time.time()\n    elapsed_time = end_time - start_time\n    print(f\"Time report: {2 * i / elapsed_time} tokens/second with {2 * i} tokens taking {elapsed_time} seconds\")\n\n\nif __name__ == \"__main__\":\n    embedder = Embed4All(n_threads=8)\n    for i in [2**n for n in range(6, 14)]:\n        time_embedding(i, embedder)\n"
  },
  {
    "path": "gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py",
    "content": "import sys\nfrom io import StringIO\nfrom pathlib import Path\n\nfrom gpt4all import GPT4All, Embed4All\nimport time\nimport pytest\n\n\ndef test_inference():\n    model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n    output_1 = model.generate('hello', top_k=1)\n\n    with model.chat_session():\n        response = model.generate(prompt='hello', top_k=1)\n        response = model.generate(prompt='write me a short poem', top_k=1)\n        response = model.generate(prompt='thank you', top_k=1)\n        print(model.current_chat_session)\n\n    output_2 = model.generate('hello', top_k=1)\n\n    assert output_1 == output_2\n\n    tokens = []\n    for token in model.generate('hello', streaming=True):\n        tokens.append(token)\n\n    assert len(tokens) > 0\n\n    with model.chat_session():\n        model.generate(prompt='hello', top_k=1, streaming=True)\n        model.generate(prompt='write me a poem about dogs', top_k=1, streaming=True)\n        print(model.current_chat_session)\n\n\ndef do_long_input(model):\n    long_input = \" \".join([\"hello how are you\"] * 40)\n\n    with model.chat_session():\n        # llmodel should limit us to 128 even if we ask for more\n        model.generate(long_input, n_batch=512)\n        print(model.current_chat_session)\n\n\ndef test_inference_long_orca_3b():\n    model = GPT4All(model_name=\"orca-mini-3b-gguf2-q4_0.gguf\")\n    do_long_input(model)\n\n\ndef test_inference_long_falcon():\n    model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')\n    do_long_input(model)\n\n\ndef test_inference_long_llama_7b():\n    model = GPT4All(model_name=\"mistral-7b-openorca.Q4_0.gguf\")\n    do_long_input(model)\n\n\ndef test_inference_long_llama_13b():\n    model = GPT4All(model_name='nous-hermes-llama2-13b.Q4_0.gguf')\n    do_long_input(model)\n\n\ndef test_inference_long_mpt():\n    model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')\n    do_long_input(model)\n\n\ndef test_inference_long_replit():\n    model = GPT4All(model_name='replit-code-v1_5-3b-q4_0.gguf')\n    do_long_input(model)\n\n\ndef test_inference_hparams():\n    model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')\n\n    output = model.generate(\"The capital of france is \", max_tokens=3)\n    assert 'Paris' in output\n\n\ndef test_inference_falcon():\n    model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')\n    prompt = 'hello'\n    output = model.generate(prompt)\n    assert isinstance(output, str)\n    assert len(output) > 0\n\n\ndef test_inference_mpt():\n    model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')\n    prompt = 'hello'\n    output = model.generate(prompt)\n    assert isinstance(output, str)\n    assert len(output) > 0\n\n\ndef test_embedding():\n    text = 'The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox'\n    embedder = Embed4All()\n    output = embedder.embed(text)\n    #for i, value in enumerate(output):\n        #print(f'Value at index {i}: {value}')\n    assert len(output) == 384\n\n\ndef test_empty_embedding():\n    text = ''\n    embedder = Embed4All()\n    with pytest.raises(ValueError):\n        output = embedder.embed(text)\n\ndef test_download_model(tmp_path: Path):\n    from gpt4all import gpt4all\n    old_default_dir = gpt4all.DEFAULT_MODEL_DIRECTORY\n    gpt4all.DEFAULT_MODEL_DIRECTORY = tmp_path  # temporary pytest directory to ensure a download happens\n    try:\n        model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin')\n        model_path = tmp_path / model.config['filename']\n        assert model_path.absolute() == Path(model.config['path']).absolute()\n        assert model_path.stat().st_size == int(model.config['filesize'])\n    finally:\n        gpt4all.DEFAULT_MODEL_DIRECTORY = old_default_dir\n"
  },
  {
    "path": "gpt4all-bindings/python/makefile",
    "content": "SHELL:=/bin/bash -o pipefail\nROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))\nPYTHON:=python3\n\nenv:\n\tif [ ! -d $(ROOT_DIR)/env ]; then $(PYTHON) -m venv $(ROOT_DIR)/env; fi\n\ndev: env\n\tsource env/bin/activate; pip install black isort pytest; pip install -e .\n\ndocumentation:\n\trm -rf ./site && mkdocs build\n\nwheel:\n\trm -rf dist/ build/ gpt4all/llmodel_DO_NOT_MODIFY; python setup.py bdist_wheel;\n\nclean:\n\trm -rf {.pytest_cache,env,gpt4all.egg-info}\n\tfind . | grep -E \"(__pycache__|\\.pyc|\\.pyo$\\)\" | xargs rm -rf\n\nblack:\n\tsource env/bin/activate; black -l 120 -S --target-version py36 gpt4all\n\nisort:\n\tsource env/bin/activate; isort  --ignore-whitespace --atomic -w 120 gpt4all\n\ntest:\n\tsource env/bin/activate;  pytest -s gpt4all/tests -k \"not test_inference_long\"\n\ntest_all:\n\tsource env/bin/activate;  pytest -s gpt4all/tests\n"
  },
  {
    "path": "gpt4all-bindings/python/mkdocs.yml",
    "content": "site_name: GPT4All\nrepo_url: https://github.com/nomic-ai/gpt4all\nrepo_name: nomic-ai/gpt4all\nsite_url: https://docs.gpt4all.io\nedit_uri: edit/main/docs/\nsite_description: GPT4All Docs - run LLMs efficiently on your hardware\ncopyright: Copyright &copy; 2024 Nomic, Inc\nuse_directory_urls: false\n\nnav:\n    - 'index.md' \n    - 'Quickstart' : 'gpt4all_desktop/quickstart.md'\n    - 'Chats' : 'gpt4all_desktop/chats.md'\n    - 'Models' : 'gpt4all_desktop/models.md'\n    - 'LocalDocs' : 'gpt4all_desktop/localdocs.md'\n    - 'Settings' : 'gpt4all_desktop/settings.md'\n    - 'Chat Templates' : 'gpt4all_desktop/chat_templates.md'\n    - 'Cookbook':\n      - 'Local AI Chat with Microsoft Excel': 'gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md'\n      - 'Local AI Chat with your Google Drive': 'gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md'\n      - 'Local AI Chat with your Obsidian Vault': 'gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md'\n      - 'Local AI Chat with your OneDrive': 'gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md'\n    - 'API Server':\n      - 'gpt4all_api_server/home.md'\n    - 'Python SDK':\n      - 'gpt4all_python/home.md'\n      - 'Monitoring': 'gpt4all_python/monitoring.md'\n      - 'SDK Reference': 'gpt4all_python/ref.md'\n    - 'Help':\n      - 'FAQ': 'gpt4all_help/faq.md'\n      - 'Troubleshooting': 'gpt4all_help/troubleshooting.md'\n\n\ntheme:\n  name: material\n  palette:\n    primary: white\n  logo: assets/nomic.png\n  favicon: assets/favicon.ico\n  features:\n    - content.code.copy\n    - navigation.instant\n    - navigation.tracking\n    - navigation.sections\n#    - navigation.tabs\n#    - navigation.tabs.sticky\n\nmarkdown_extensions:\n  - pymdownx.highlight:\n      anchor_linenums: true\n  - pymdownx.inlinehilite\n  - pymdownx.snippets\n  - pymdownx.details\n  - pymdownx.superfences\n  - pymdownx.tabbed:\n      alternate_style: true\n  - pymdownx.emoji:\n      emoji_index: !!python/name:material.extensions.emoji.twemoji\n      emoji_generator: !!python/name:material.extensions.emoji.to_svg\n      options:\n        custom_icons:\n          - docs/overrides/.icons\n  - tables\n  - admonition\n  - codehilite:\n      css_class: highlight\n  - markdown_captions\n\nextra_css:\n    - css/custom.css\n\n\n\nplugins:\n  - search\n  - mkdocstrings:\n      handlers:\n        python:\n          options:\n            show_root_heading: True\n            heading_level: 4\n            show_root_full_path: false\n            docstring_section_style: list\n  - material/social:\n      cards_layout_options:\n          font_family: Roboto\n          description: GPT4All runs LLMs efficiently on your hardware\n\nextra:\n  generator: false\n  analytics:\n    provider: google\n    property: G-NPXC8BYHJV\n"
  },
  {
    "path": "gpt4all-bindings/python/setup.py",
    "content": "from setuptools import setup, find_packages\nimport os\nimport pathlib\nimport platform\nimport shutil\n\npackage_name = \"gpt4all\"\n\n# Define the location of your prebuilt C library files\nSRC_CLIB_DIRECTORY = os.path.join(\"..\", \"..\", \"gpt4all-backend\")\nSRC_CLIB_BUILD_DIRECTORY = os.path.join(\"..\", \"..\", \"gpt4all-backend\", \"build\") \n\nLIB_NAME = \"llmodel\"\n\nDEST_CLIB_DIRECTORY = os.path.join(package_name, f\"{LIB_NAME}_DO_NOT_MODIFY\")\nDEST_CLIB_BUILD_DIRECTORY = os.path.join(DEST_CLIB_DIRECTORY, \"build\")\n\nsystem = platform.system()\n\ndef get_c_shared_lib_extension():\n    \n    if system == \"Darwin\":\n        return \"dylib\"\n    elif system == \"Linux\":\n        return \"so\"\n    elif system == \"Windows\":\n        return \"dll\"\n    else:\n        raise Exception(\"Operating System not supported\")\n    \nlib_ext = get_c_shared_lib_extension()\n\ndef copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir):\n    files_copied = 0\n\n    if not os.path.exists(dest_dir):\n        os.mkdir(dest_dir)\n        os.mkdir(dest_build_dir)\n\n    for dirpath, _, filenames in os.walk(src_dir):\n        for item in filenames:\n            # copy over header files to dest dir\n            s = os.path.join(dirpath, item)\n            if item.endswith(\".h\"):\n                d = os.path.join(dest_dir, item)\n                shutil.copy2(s, d)\n                files_copied += 1\n            if item.endswith(lib_ext) or item.endswith('.metallib'):\n                s = os.path.join(dirpath, item)\n                d = os.path.join(dest_build_dir, item)\n                shutil.copy2(s, d)\n                files_copied += 1\n    \n    return files_copied\n\n\n# NOTE: You must provide correct path to the prebuilt llmodel C library. \n# Specifically, the llmodel.h and C shared library are needed.\ncopy_prebuilt_C_lib(SRC_CLIB_DIRECTORY,\n                    DEST_CLIB_DIRECTORY,\n                    DEST_CLIB_BUILD_DIRECTORY)\n\n\ndef get_long_description():\n    with open(pathlib.Path(__file__).parent / \"README.md\", encoding=\"utf-8\") as fp:\n        return fp.read()\n\n\nsetup(\n    name=package_name,\n    version=\"2.8.3.dev0\",\n    description=\"Python bindings for GPT4All\",\n    long_description=get_long_description(),\n    long_description_content_type=\"text/markdown\",\n    author=\"Nomic and the Open Source Community\",\n    author_email=\"support@nomic.ai\",\n    url=\"https://www.nomic.ai/gpt4all\",\n    project_urls={\n        \"Documentation\": \"https://docs.gpt4all.io/gpt4all_python.html\",\n        \"Source code\": \"https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python\",\n        \"Changelog\": \"https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/python/CHANGELOG.md\",\n    },\n    classifiers = [\n        \"Programming Language :: Python :: 3\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Operating System :: OS Independent\",\n    ],\n    python_requires='>=3.8',\n    packages=find_packages(),\n    install_requires=[\n        'importlib_resources; python_version < \"3.9\"',\n        'jinja2~=3.1',\n        'requests',\n        'tqdm',\n        'typing-extensions>=4.3.0; python_version >= \"3.9\" and python_version < \"3.11\"',\n    ],\n    extras_require={\n        'cuda': [\n            'nvidia-cuda-runtime-cu11',\n            'nvidia-cublas-cu11',\n        ],\n        'all': [\n            'gpt4all[cuda]; platform_system == \"Windows\" or platform_system == \"Linux\"',\n        ],\n        'dev': [\n            'gpt4all[all]',\n            'pytest',\n            'twine',\n            'wheel',\n            'setuptools',\n            'mkdocs-material',\n            'mkdocs-material[imaging]',\n            'mkautodoc',\n            'mkdocstrings[python]',\n            'mkdocs-jupyter',\n            'black',\n            'isort',\n            'typing-extensions>=3.10',\n        ]\n    },\n    package_data={'llmodel': [os.path.join(DEST_CLIB_DIRECTORY, \"*\")]},\n    include_package_data=True\n)\n"
  },
  {
    "path": "gpt4all-bindings/typescript/.clang-format",
    "content": "---\nLanguage: Cpp\nBasedOnStyle: Microsoft\nColumnLimit: 120"
  },
  {
    "path": "gpt4all-bindings/typescript/.gitignore",
    "content": "node_modules/\nbuild/\nprebuilds/\n.yarn/*\n!.yarn/patches\n!.yarn/plugins\n!.yarn/releases\n!.yarn/sdks\n!.yarn/versions\nruntimes/\ncompile_flags.txt\n"
  },
  {
    "path": "gpt4all-bindings/typescript/.npmignore",
    "content": "test/\nspec/\nscripts/\nbuild"
  },
  {
    "path": "gpt4all-bindings/typescript/.yarnrc.yml",
    "content": "nodeLinker: node-modules\n"
  },
  {
    "path": "gpt4all-bindings/typescript/README.md",
    "content": "# GPT4All Node.js API\n\nNative Node.js LLM bindings for all.\n\n```sh\nyarn add gpt4all@latest\n\nnpm install gpt4all@latest\n\npnpm install gpt4all@latest\n\n```\n## Breaking changes in version 4!!\n*   See [Transition](#changes)\n## Contents\n*   See [API Reference](#api-reference)\n*   See [Examples](#api-example)\n*   See [Developing](#develop)\n*   GPT4ALL nodejs bindings created by [jacoobes](https://github.com/jacoobes), [limez](https://github.com/iimez) and the [nomic ai community](https://home.nomic.ai), for all to use.\n*   [spare change](https://github.com/sponsors/jacoobes) for a college student? 🤑\n## Api Examples\n### Chat Completion\n\nUse a chat session to keep context between completions. This is useful for efficient back and forth conversations.\n\n```js\nimport { createCompletion, loadModel } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", {\n    verbose: true, // logs loaded model configuration\n    device: \"gpu\", // defaults to 'cpu'\n    nCtx: 2048, // the maximum sessions context window size.\n});\n\n// initialize a chat session on the model. a model instance can have only one chat session at a time.\nconst chat = await model.createChatSession({\n    // any completion options set here will be used as default for all completions in this chat session\n    temperature: 0.8,\n    // a custom systemPrompt can be set here. note that the template depends on the model.\n    // if unset, the systemPrompt that comes with the model will be used.\n    systemPrompt: \"### System:\\nYou are an advanced mathematician.\\n\\n\",\n});\n\n// create a completion using a string as input\nconst res1 = await createCompletion(chat, \"What is 1 + 1?\");\nconsole.debug(res1.choices[0].message);\n\n// multiple messages can be input to the conversation at once.\n// note that if the last message is not of role 'user', an empty message will be returned.\nawait createCompletion(chat, [\n    {\n        role: \"user\",\n        content: \"What is 2 + 2?\",\n    },\n    {\n        role: \"assistant\",\n        content: \"It's 5.\",\n    },\n]);\n\nconst res3 = await createCompletion(chat, \"Could you recalculate that?\");\nconsole.debug(res3.choices[0].message);\n\nmodel.dispose();\n```\n\n### Stateless usage\nYou can use the model without a chat session. This is useful for one-off completions.\n\n```js\nimport { createCompletion, loadModel } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\");\n\n// createCompletion methods can also be used on the model directly.\n// context is not maintained between completions.\nconst res1 = await createCompletion(model, \"What is 1 + 1?\");\nconsole.debug(res1.choices[0].message);\n\n// a whole conversation can be input as well.\n// note that if the last message is not of role 'user', an error will be thrown.\nconst res2 = await createCompletion(model, [\n    {\n        role: \"user\",\n        content: \"What is 2 + 2?\",\n    },\n    {\n        role: \"assistant\",\n        content: \"It's 5.\",\n    },\n    {\n        role: \"user\",\n        content: \"Could you recalculate that?\",\n    },\n]);\nconsole.debug(res2.choices[0].message);\n\n```\n\n### Embedding\n\n```js\nimport { loadModel, createEmbedding } from '../src/gpt4all.js'\n\nconst embedder = await loadModel(\"nomic-embed-text-v1.5.f16.gguf\", { verbose: true, type: 'embedding'})\n\nconsole.log(createEmbedding(embedder, \"Maybe Minecraft was the friends we made along the way\"));\n```\n\n### Streaming responses\n```js\nimport { loadModel, createCompletionStream } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\", {\n    device: \"gpu\",\n});\n\nprocess.stdout.write(\"Output: \");\nconst stream = createCompletionStream(model, \"How are you?\");\nstream.tokens.on(\"data\", (data) => {\n    process.stdout.write(data);\n});\n//wait till stream finishes. We cannot continue until this one is done.\nawait stream.result;\nprocess.stdout.write(\"\\n\");\nmodel.dispose();\n\n```\n\n### Async Generators\n```js\nimport { loadModel, createCompletionGenerator } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\");\n\nprocess.stdout.write(\"Output: \");\nconst gen = createCompletionGenerator(\n    model,\n    \"Redstone in Minecraft is Turing Complete. Let that sink in. (let it in!)\"\n);\nfor await (const chunk of gen) {\n    process.stdout.write(chunk);\n}\n\nprocess.stdout.write(\"\\n\");\nmodel.dispose();\n\n```\n### Offline usage\ndo this b4 going offline\n```sh\ncurl -L https://gpt4all.io/models/models3.json -o ./models3.json\n```\n```js\nimport { createCompletion, loadModel } from 'gpt4all'\n\n//make sure u downloaded the models before going offline!\nconst model = await loadModel('mistral-7b-openorca.gguf2.Q4_0.gguf', {\n    verbose: true,\n    device: 'gpu',\n    modelConfigFile: \"./models3.json\"\n});\n\nawait createCompletion(model, 'What is 1 + 1?', { verbose: true })\n\nmodel.dispose();\n```\n\n## Develop\n### Build Instructions\n\n*   `binding.gyp` is compile config\n*   Tested on Ubuntu. Everything seems to work fine\n*   Tested on Windows. Everything works fine.\n*   Sparse testing on mac os.\n*   MingW script works to build the gpt4all-backend. We left it there just in case. **HOWEVER**, this package works only with MSVC built dlls.\n\n### Requirements\n\n*   git\n*   [node.js >= 18.0.0](https://nodejs.org/en)\n*   [yarn](https://yarnpkg.com/)\n*   [node-gyp](https://github.com/nodejs/node-gyp)\n    *   all of its requirements.\n*   (unix) gcc version 12\n*   (win) msvc version 143\n    *   Can be obtained with visual studio 2022 build tools\n*   python 3\n*   On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home\n*   macOS users do not need Vulkan, as GPT4All will use Metal instead.\n\n### Build (from source)\n\n```sh\ngit clone https://github.com/nomic-ai/gpt4all.git\ncd gpt4all-bindings/typescript\n```\n\n*   The below shell commands assume the current working directory is `typescript`.\n\n*   To Build and Rebuild:\n\n```sh\nnode scripts/prebuild.js\n```\n*   llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory\n\n```sh\ngit submodule update --init --recursive\n```\n\n```sh\nyarn build:backend\n```\nThis will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native\n\n### Test\n\n```sh\nyarn test\n```\n\n### Source Overview\n\n#### src/\n\n*   Extra functions to help aid devex\n*   Typings for the native node addon\n*   the javascript interface\n\n#### test/\n\n*   simple unit testings for some functions exported.\n*   more advanced ai testing is not handled\n\n#### spec/\n\n*   Average look and feel of the api\n*   Should work assuming a model and libraries are installed locally in working directory\n\n#### index.cc\n\n*   The bridge between nodejs and c. Where the bindings are.\n\n#### prompt.cc\n\n*   Handling prompting and inference of models in a threadsafe, asynchronous way.\n\n### Known Issues\n\n*   why your model may be spewing bull 💩\n    *   The downloaded model is broken (just reinstall or download from official site)\n*   Your model is hanging after a call to generate tokens.\n    * Is `nPast` set too high? This may cause your model to hang (03/16/2024), Linux Mint, Ubuntu 22.04\n*  Your GPU usage is still high after node.js exits.\n    * Make sure to call `model.dispose()`!!!\n\n### Roadmap\n\nThis package has been stabilizing over time development, and breaking changes may happen until the api stabilizes. Here's what's the todo list:\n\n*   \\[ ] Purely offline. Per the gui, which can be run completely offline, the bindings should be as well.\n*   \\[ ] NPM bundle size reduction via optionalDependencies strategy (need help)\n    *   Should include prebuilds to avoid painful node-gyp errors\n*   \\[x] createChatSession ( the python equivalent to create\\_chat\\_session )\n*   \\[x] generateTokens, the new name for createTokenStream. As of 3.2.0, this is released but not 100% tested. Check spec/generator.mjs!\n*   \\[x] ~~createTokenStream, an async iterator that streams each token emitted from the model. Planning on following this [example](https://github.com/nodejs/node-addon-examples/tree/main/threadsafe-async-iterator)~~ May not implement unless someone else can complete\n*   \\[x] prompt models via a threadsafe function in order to have proper non blocking behavior in nodejs\n*   \\[x] generateTokens is the new name for this^\n*   \\[x] proper unit testing (integrate with circle ci)\n*   \\[x] publish to npm under alpha tag `gpt4all@alpha`\n*   \\[x] have more people test on other platforms (mac tester needed)\n*   \\[x] switch to new pluggable backend\n\n## Changes\nThis repository serves as the new bindings for nodejs users.\n- If you were a user of [these bindings](https://github.com/nomic-ai/gpt4all-ts), they are outdated.\n- Version 4 includes the follow breaking changes\n    * `createEmbedding` & `EmbeddingModel.embed()` returns an object, `EmbeddingResult`, instead of a float32array.\n    * Removed deprecated types `ModelType` and `ModelFile`\n    * Removed deprecated initiation of model by string path only\n\n\n### API Reference\n"
  },
  {
    "path": "gpt4all-bindings/typescript/binding.ci.gyp",
    "content": "{\n  \"targets\": [\n    {\n      \"target_name\": \"gpt4all\", # gpt4all-ts will cause compile error\n      \"include_dirs\": [\n        \"<!@(node -p \\\"require('node-addon-api').include\\\")\",\n        \"gpt4all-backend\",\n      ],\n      \"sources\": [\n        # PREVIOUS VERSION: had to required the sources, but with newest changes do not need to\n        #\"../../gpt4all-backend/llama.cpp/examples/common.cpp\",\n        #\"../../gpt4all-backend/llama.cpp/ggml.c\",\n        #\"../../gpt4all-backend/llama.cpp/llama.cpp\",\n        # \"../../gpt4all-backend/utils.cpp\",\n        \"gpt4all-backend/llmodel_c.cpp\",\n        \"gpt4all-backend/llmodel.cpp\",\n        \"prompt.cc\",\n        \"index.cc\",\n       ],\n      \"conditions\": [\n        ['OS==\"mac\"', {\n            'xcode_settings': {\n                'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'\n            },\n            'defines': [\n                'LIB_FILE_EXT=\".dylib\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            'cflags_cc': [\n                \"-fexceptions\"\n            ]\n        }],\n        ['OS==\"win\"', {\n            'defines': [\n                'LIB_FILE_EXT=\".dll\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            \"msvs_settings\": {\n                \"VCCLCompilerTool\": {\n                    \"AdditionalOptions\": [\n                        \"/std:c++20\",\n                        \"/EHsc\",\n                  ],\n                },\n            },\n        }],\n        ['OS==\"linux\"', {\n            'defines': [\n                'LIB_FILE_EXT=\".so\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            'cflags_cc!': [\n                '-fno-rtti',\n            ],\n            'cflags_cc': [\n                '-std=c++2a',\n                '-fexceptions'\n            ]\n        }]\n      ]\n    }]\n}\n"
  },
  {
    "path": "gpt4all-bindings/typescript/binding.gyp",
    "content": "{\n  \"targets\": [\n    {\n      \"target_name\": \"gpt4all\", # gpt4all-ts will cause compile error\n      \"include_dirs\": [\n        \"<!@(node -p \\\"require('node-addon-api').include\\\")\",\n        \"../../gpt4all-backend\",\n      ],\n      \"sources\": [\n        # PREVIOUS VERSION: had to required the sources, but with newest changes do not need to\n        #\"../../gpt4all-backend/llama.cpp/examples/common.cpp\",\n        #\"../../gpt4all-backend/llama.cpp/ggml.c\",\n        #\"../../gpt4all-backend/llama.cpp/llama.cpp\",\n        # \"../../gpt4all-backend/utils.cpp\",\n        \"../../gpt4all-backend/llmodel_c.cpp\",\n        \"../../gpt4all-backend/llmodel.cpp\",\n        \"prompt.cc\",\n        \"index.cc\",\n       ],\n      \"conditions\": [\n        ['OS==\"mac\"', {\n            'xcode_settings': {\n                'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'\n            },\n            'defines': [\n                'LIB_FILE_EXT=\".dylib\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            'cflags_cc': [\n                \"-fexceptions\"\n            ]\n        }],\n        ['OS==\"win\"', {\n            'defines': [\n                'LIB_FILE_EXT=\".dll\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            \"msvs_settings\": {\n                \"VCCLCompilerTool\": {\n                    \"AdditionalOptions\": [\n                        \"/std:c++20\",\n                        \"/EHsc\",\n                  ],\n                },\n            },\n        }],\n        ['OS==\"linux\"', {\n            'defines': [\n                'LIB_FILE_EXT=\".so\"',\n                'NAPI_CPP_EXCEPTIONS',\n            ],\n            'cflags_cc!': [\n                '-fno-rtti',\n            ],\n            'cflags_cc': [\n                '-std=c++2a',\n                '-fexceptions'\n            ]\n        }]\n      ]\n    }]\n}\n"
  },
  {
    "path": "gpt4all-bindings/typescript/index.cc",
    "content": "#include \"index.h\"\n#include \"napi.h\"\n\nNapi::Function NodeModelWrapper::GetClass(Napi::Env env)\n{\n    Napi::Function self = DefineClass(env, \"LLModel\",\n                                      {InstanceMethod(\"type\", &NodeModelWrapper::GetType),\n                                       InstanceMethod(\"isModelLoaded\", &NodeModelWrapper::IsModelLoaded),\n                                       InstanceMethod(\"name\", &NodeModelWrapper::GetName),\n                                       InstanceMethod(\"stateSize\", &NodeModelWrapper::StateSize),\n                                       InstanceMethod(\"infer\", &NodeModelWrapper::Infer),\n                                       InstanceMethod(\"setThreadCount\", &NodeModelWrapper::SetThreadCount),\n                                       InstanceMethod(\"embed\", &NodeModelWrapper::GenerateEmbedding),\n                                       InstanceMethod(\"threadCount\", &NodeModelWrapper::ThreadCount),\n                                       InstanceMethod(\"getLibraryPath\", &NodeModelWrapper::GetLibraryPath),\n                                       InstanceMethod(\"initGpuByString\", &NodeModelWrapper::InitGpuByString),\n                                       InstanceMethod(\"hasGpuDevice\", &NodeModelWrapper::HasGpuDevice),\n                                       InstanceMethod(\"listGpu\", &NodeModelWrapper::GetGpuDevices),\n                                       InstanceMethod(\"memoryNeeded\", &NodeModelWrapper::GetRequiredMemory),\n                                       InstanceMethod(\"dispose\", &NodeModelWrapper::Dispose)});\n    // Keep a static reference to the constructor\n    //\n    Napi::FunctionReference *constructor = new Napi::FunctionReference();\n    *constructor = Napi::Persistent(self);\n    env.SetInstanceData(constructor);\n    return self;\n}\nNapi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo &info)\n{\n    auto env = info.Env();\n    return Napi::Number::New(\n        env, static_cast<uint32_t>(llmodel_required_mem(GetInference(), full_model_path.c_str(), nCtx, nGpuLayers)));\n}\nNapi::Value NodeModelWrapper::GetGpuDevices(const Napi::CallbackInfo &info)\n{\n    auto env = info.Env();\n    int num_devices = 0;\n    auto mem_size = llmodel_required_mem(GetInference(), full_model_path.c_str(), nCtx, nGpuLayers);\n    llmodel_gpu_device *all_devices = llmodel_available_gpu_devices(mem_size, &num_devices);\n    if (all_devices == nullptr)\n    {\n        Napi::Error::New(env, \"Unable to retrieve list of all GPU devices\").ThrowAsJavaScriptException();\n        return env.Undefined();\n    }\n    auto js_array = Napi::Array::New(env, num_devices);\n    for (int i = 0; i < num_devices; ++i)\n    {\n        auto gpu_device = all_devices[i];\n        /*\n         *\n         * struct llmodel_gpu_device {\n             int index = 0;\n             int type = 0;           // same as VkPhysicalDeviceType\n             size_t heapSize = 0;\n             const char * name;\n             const char * vendor;\n           };\n         *\n         */\n        Napi::Object js_gpu_device = Napi::Object::New(env);\n        js_gpu_device[\"index\"] = uint32_t(gpu_device.index);\n        js_gpu_device[\"type\"] = uint32_t(gpu_device.type);\n        js_gpu_device[\"heapSize\"] = static_cast<uint32_t>(gpu_device.heapSize);\n        js_gpu_device[\"name\"] = gpu_device.name;\n        js_gpu_device[\"vendor\"] = gpu_device.vendor;\n\n        js_array[i] = js_gpu_device;\n    }\n    return js_array;\n}\n\nNapi::Value NodeModelWrapper::GetType(const Napi::CallbackInfo &info)\n{\n    if (type.empty())\n    {\n        return info.Env().Undefined();\n    }\n    return Napi::String::New(info.Env(), type);\n}\n\nNapi::Value NodeModelWrapper::InitGpuByString(const Napi::CallbackInfo &info)\n{\n    auto env = info.Env();\n    size_t memory_required = static_cast<size_t>(info[0].As<Napi::Number>().Uint32Value());\n\n    std::string gpu_device_identifier = info[1].As<Napi::String>();\n\n    size_t converted_value;\n    if (memory_required <= std::numeric_limits<size_t>::max())\n    {\n        converted_value = static_cast<size_t>(memory_required);\n    }\n    else\n    {\n        Napi::Error::New(env, \"invalid number for memory size. Exceeded bounds for memory.\")\n            .ThrowAsJavaScriptException();\n        return env.Undefined();\n    }\n\n    auto result = llmodel_gpu_init_gpu_device_by_string(GetInference(), converted_value, gpu_device_identifier.c_str());\n    return Napi::Boolean::New(env, result);\n}\nNapi::Value NodeModelWrapper::HasGpuDevice(const Napi::CallbackInfo &info)\n{\n    return Napi::Boolean::New(info.Env(), llmodel_has_gpu_device(GetInference()));\n}\n\nNodeModelWrapper::NodeModelWrapper(const Napi::CallbackInfo &info) : Napi::ObjectWrap<NodeModelWrapper>(info)\n{\n    auto env = info.Env();\n    auto config_object = info[0].As<Napi::Object>();\n\n    // sets the directory where models (gguf files) are to be searched\n    llmodel_set_implementation_search_path(\n        config_object.Has(\"library_path\") ? config_object.Get(\"library_path\").As<Napi::String>().Utf8Value().c_str()\n                                          : \".\");\n\n    std::string model_name = config_object.Get(\"model_name\").As<Napi::String>();\n    fs::path model_path = config_object.Get(\"model_path\").As<Napi::String>().Utf8Value();\n    std::string full_weight_path = (model_path / fs::path(model_name)).string();\n\n    name = model_name.empty() ? model_path.filename().string() : model_name;\n    full_model_path = full_weight_path;\n    nCtx = config_object.Get(\"nCtx\").As<Napi::Number>().Int32Value();\n    nGpuLayers = config_object.Get(\"ngl\").As<Napi::Number>().Int32Value();\n\n    const char *e;\n    inference_ = llmodel_model_create2(full_weight_path.c_str(), \"auto\", &e);\n    if (!inference_)\n    {\n        Napi::Error::New(env, e).ThrowAsJavaScriptException();\n        return;\n    }\n    if (GetInference() == nullptr)\n    {\n        std::cerr << \"Tried searching libraries in \\\"\" << llmodel_get_implementation_search_path() << \"\\\"\" << std::endl;\n        std::cerr << \"Tried searching for model weight in \\\"\" << full_weight_path << \"\\\"\" << std::endl;\n        std::cerr << \"Do you have runtime libraries installed?\" << std::endl;\n        Napi::Error::New(env, \"Had an issue creating llmodel object, inference is null\").ThrowAsJavaScriptException();\n        return;\n    }\n\n    std::string device = config_object.Get(\"device\").As<Napi::String>();\n    if (device != \"cpu\")\n    {\n        size_t mem = llmodel_required_mem(GetInference(), full_weight_path.c_str(), nCtx, nGpuLayers);\n\n        auto success = llmodel_gpu_init_gpu_device_by_string(GetInference(), mem, device.c_str());\n        if (!success)\n        {\n            // https://github.com/nomic-ai/gpt4all/blob/3acbef14b7c2436fe033cae9036e695d77461a16/gpt4all-bindings/python/gpt4all/pyllmodel.py#L215\n            // Haven't implemented this but it is still open to contribution\n            std::cout << \"WARNING: Failed to init GPU\\n\";\n        }\n    }\n\n    auto success = llmodel_loadModel(GetInference(), full_weight_path.c_str(), nCtx, nGpuLayers);\n    if (!success)\n    {\n        Napi::Error::New(env, \"Failed to load model at given path\").ThrowAsJavaScriptException();\n        return;\n    }\n    // optional\n    if (config_object.Has(\"model_type\"))\n    {\n        type = config_object.Get(\"model_type\").As<Napi::String>();\n    }\n};\n\n//  NodeModelWrapper::~NodeModelWrapper() {\n//    if(GetInference() != nullptr) {\n//        std::cout << \"Debug: deleting model\\n\";\n//        llmodel_model_destroy(inference_);\n//        std::cout << (inference_ == nullptr);\n//    }\n//  }\n//  void NodeModelWrapper::Finalize(Napi::Env env) {\n//    if(inference_ != nullptr) {\n//        std::cout << \"Debug: deleting model\\n\";\n//\n//    }\n//  }\nNapi::Value NodeModelWrapper::IsModelLoaded(const Napi::CallbackInfo &info)\n{\n    return Napi::Boolean::New(info.Env(), llmodel_isModelLoaded(GetInference()));\n}\n\nNapi::Value NodeModelWrapper::StateSize(const Napi::CallbackInfo &info)\n{\n    // Implement the binding for the stateSize method\n    return Napi::Number::New(info.Env(), static_cast<int64_t>(llmodel_get_state_size(GetInference())));\n}\n\nNapi::Array ChunkedFloatPtr(float *embedding_ptr, int embedding_size, int text_len, Napi::Env const &env)\n{\n    auto n_embd = embedding_size / text_len;\n    // std::cout << \"Embedding size: \" << embedding_size << std::endl;\n    // std::cout << \"Text length: \" << text_len << std::endl;\n    // std::cout << \"Chunk size (n_embd): \" << n_embd << std::endl;\n    Napi::Array result = Napi::Array::New(env, text_len);\n    auto count = 0;\n    for (int i = 0; i < embedding_size; i += n_embd)\n    {\n        int end = std::min(i + n_embd, embedding_size);\n        // possible bounds error?\n        // Constructs a container with as many elements as the range [first,last), with each element emplace-constructed\n        // from its corresponding element in that range, in the same order.\n        std::vector<float> chunk(embedding_ptr + i, embedding_ptr + end);\n        Napi::Float32Array fltarr = Napi::Float32Array::New(env, chunk.size());\n        // I know there's a way to emplace the raw float ptr into a Napi::Float32Array but idk how and\n        //  im too scared to cause memory issues\n        //  this is goodenough\n        for (int j = 0; j < chunk.size(); j++)\n        {\n\n            fltarr.Set(j, chunk[j]);\n        }\n        result.Set(count++, fltarr);\n    }\n    return result;\n}\n\nNapi::Value NodeModelWrapper::GenerateEmbedding(const Napi::CallbackInfo &info)\n{\n    auto env = info.Env();\n\n    auto prefix = info[1];\n    auto dimensionality = info[2].As<Napi::Number>().Int32Value();\n    auto do_mean = info[3].As<Napi::Boolean>().Value();\n    auto atlas = info[4].As<Napi::Boolean>().Value();\n    size_t embedding_size;\n    size_t token_count = 0;\n\n    // This procedure can maybe be optimized but its whatever, i have too many intermediary structures\n    std::vector<std::string> text_arr;\n    bool is_single_text = false;\n    if (info[0].IsString())\n    {\n        is_single_text = true;\n        text_arr.push_back(info[0].As<Napi::String>().Utf8Value());\n    }\n    else\n    {\n        auto jsarr = info[0].As<Napi::Array>();\n        size_t len = jsarr.Length();\n        text_arr.reserve(len);\n        for (size_t i = 0; i < len; ++i)\n        {\n            std::string str = jsarr.Get(i).As<Napi::String>().Utf8Value();\n            text_arr.push_back(str);\n        }\n    }\n    std::vector<const char *> str_ptrs;\n    str_ptrs.reserve(text_arr.size() + 1);\n    for (size_t i = 0; i < text_arr.size(); ++i)\n        str_ptrs.push_back(text_arr[i].c_str());\n    str_ptrs.push_back(nullptr);\n    const char *_err = nullptr;\n    float *embeds = llmodel_embed(GetInference(), str_ptrs.data(),  &embedding_size,\n                                  prefix.IsUndefined() ? nullptr : prefix.As<Napi::String>().Utf8Value().c_str(),\n                                  dimensionality, &token_count, do_mean, atlas, nullptr, &_err);\n    if (!embeds)\n    {\n        // i dont wanna deal with c strings lol\n        std::string err(_err);\n        Napi::Error::New(env, err == \"(unknown error)\" ? \"Unknown error: sorry bud\" : err).ThrowAsJavaScriptException();\n        return env.Undefined();\n    }\n    auto embedmat = ChunkedFloatPtr(embeds, embedding_size, text_arr.size(), env);\n\n    llmodel_free_embedding(embeds);\n    auto res = Napi::Object::New(env);\n    res.Set(\"n_prompt_tokens\", token_count);\n    if(is_single_text) {\n        res.Set(\"embeddings\", embedmat.Get(static_cast<uint32_t>(0)));\n    } else {\n        res.Set(\"embeddings\", embedmat);\n    }\n\n    return res;\n}\n\n/**\n * Generate a response using the model.\n * @param prompt A string representing the input prompt.\n * @param options Inference options.\n */\nNapi::Value NodeModelWrapper::Infer(const Napi::CallbackInfo &info)\n{\n    auto env = info.Env();\n    std::string prompt;\n    if (info[0].IsString())\n    {\n        prompt = info[0].As<Napi::String>().Utf8Value();\n    }\n    else\n    {\n        Napi::Error::New(info.Env(), \"invalid string argument\").ThrowAsJavaScriptException();\n        return info.Env().Undefined();\n    }\n\n    if (!info[1].IsObject())\n    {\n        Napi::Error::New(info.Env(), \"Missing Prompt Options\").ThrowAsJavaScriptException();\n        return info.Env().Undefined();\n    }\n    // defaults copied from python bindings\n    llmodel_prompt_context promptContext = {.logits = nullptr,\n                                            .tokens = nullptr,\n                                            .n_past = 0,\n                                            .n_ctx = nCtx,\n                                            .n_predict = 4096,\n                                            .top_k = 40,\n                                            .top_p = 0.9f,\n                                            .min_p = 0.0f,\n                                            .temp = 0.1f,\n                                            .n_batch = 8,\n                                            .repeat_penalty = 1.2f,\n                                            .repeat_last_n = 10,\n                                            .context_erase = 0.75};\n\n    PromptWorkerConfig promptWorkerConfig;\n\n    auto inputObject = info[1].As<Napi::Object>();\n\n    if (inputObject.Has(\"logits\") || inputObject.Has(\"tokens\"))\n    {\n        Napi::Error::New(info.Env(), \"Invalid input: 'logits' or 'tokens' properties are not allowed\")\n            .ThrowAsJavaScriptException();\n        return info.Env().Undefined();\n    }\n\n    // Assign the remaining properties\n    if (inputObject.Has(\"nPast\") && inputObject.Get(\"nPast\").IsNumber())\n    {\n        promptContext.n_past = inputObject.Get(\"nPast\").As<Napi::Number>().Int32Value();\n    }\n    if (inputObject.Has(\"nPredict\") && inputObject.Get(\"nPredict\").IsNumber())\n    {\n        promptContext.n_predict = inputObject.Get(\"nPredict\").As<Napi::Number>().Int32Value();\n    }\n    if (inputObject.Has(\"topK\") && inputObject.Get(\"topK\").IsNumber())\n    {\n        promptContext.top_k = inputObject.Get(\"topK\").As<Napi::Number>().Int32Value();\n    }\n    if (inputObject.Has(\"topP\") && inputObject.Get(\"topP\").IsNumber())\n    {\n        promptContext.top_p = inputObject.Get(\"topP\").As<Napi::Number>().FloatValue();\n    }\n    if (inputObject.Has(\"minP\") && inputObject.Get(\"minP\").IsNumber())\n    {\n        promptContext.min_p = inputObject.Get(\"minP\").As<Napi::Number>().FloatValue();\n    }\n    if (inputObject.Has(\"temp\") && inputObject.Get(\"temp\").IsNumber())\n    {\n        promptContext.temp = inputObject.Get(\"temp\").As<Napi::Number>().FloatValue();\n    }\n    if (inputObject.Has(\"nBatch\") && inputObject.Get(\"nBatch\").IsNumber())\n    {\n        promptContext.n_batch = inputObject.Get(\"nBatch\").As<Napi::Number>().Int32Value();\n    }\n    if (inputObject.Has(\"repeatPenalty\") && inputObject.Get(\"repeatPenalty\").IsNumber())\n    {\n        promptContext.repeat_penalty = inputObject.Get(\"repeatPenalty\").As<Napi::Number>().FloatValue();\n    }\n    if (inputObject.Has(\"repeatLastN\") && inputObject.Get(\"repeatLastN\").IsNumber())\n    {\n        promptContext.repeat_last_n = inputObject.Get(\"repeatLastN\").As<Napi::Number>().Int32Value();\n    }\n    if (inputObject.Has(\"contextErase\") && inputObject.Get(\"contextErase\").IsNumber())\n    {\n        promptContext.context_erase = inputObject.Get(\"contextErase\").As<Napi::Number>().FloatValue();\n    }\n    if (inputObject.Has(\"onPromptToken\") && inputObject.Get(\"onPromptToken\").IsFunction())\n    {\n        promptWorkerConfig.promptCallback = inputObject.Get(\"onPromptToken\").As<Napi::Function>();\n        promptWorkerConfig.hasPromptCallback = true;\n    }\n    if (inputObject.Has(\"onResponseToken\") && inputObject.Get(\"onResponseToken\").IsFunction())\n    {\n        promptWorkerConfig.responseCallback = inputObject.Get(\"onResponseToken\").As<Napi::Function>();\n        promptWorkerConfig.hasResponseCallback = true;\n    }\n\n    // copy to protect llmodel resources when splitting to new thread\n    //  llmodel_prompt_context copiedPrompt = promptContext;\n    promptWorkerConfig.context = promptContext;\n    promptWorkerConfig.model = GetInference();\n    promptWorkerConfig.mutex = &inference_mutex;\n    promptWorkerConfig.prompt = prompt;\n    promptWorkerConfig.result = \"\";\n\n    promptWorkerConfig.promptTemplate = inputObject.Get(\"promptTemplate\").As<Napi::String>();\n    if (inputObject.Has(\"special\"))\n    {\n        promptWorkerConfig.special = inputObject.Get(\"special\").As<Napi::Boolean>();\n    }\n    if (inputObject.Has(\"fakeReply\"))\n    {\n        // this will be deleted in the worker\n        promptWorkerConfig.fakeReply = new std::string(inputObject.Get(\"fakeReply\").As<Napi::String>().Utf8Value());\n    }\n    auto worker = new PromptWorker(env, promptWorkerConfig);\n\n    worker->Queue();\n\n    return worker->GetPromise();\n}\nvoid NodeModelWrapper::Dispose(const Napi::CallbackInfo &info)\n{\n    llmodel_model_destroy(inference_);\n}\nvoid NodeModelWrapper::SetThreadCount(const Napi::CallbackInfo &info)\n{\n    if (info[0].IsNumber())\n    {\n        llmodel_setThreadCount(GetInference(), info[0].As<Napi::Number>().Int64Value());\n    }\n    else\n    {\n        Napi::Error::New(info.Env(), \"Could not set thread count: argument 1 is NaN\").ThrowAsJavaScriptException();\n        return;\n    }\n}\n\nNapi::Value NodeModelWrapper::GetName(const Napi::CallbackInfo &info)\n{\n    return Napi::String::New(info.Env(), name);\n}\nNapi::Value NodeModelWrapper::ThreadCount(const Napi::CallbackInfo &info)\n{\n    return Napi::Number::New(info.Env(), llmodel_threadCount(GetInference()));\n}\n\nNapi::Value NodeModelWrapper::GetLibraryPath(const Napi::CallbackInfo &info)\n{\n    return Napi::String::New(info.Env(), llmodel_get_implementation_search_path());\n}\n\nllmodel_model NodeModelWrapper::GetInference()\n{\n    return inference_;\n}\n\n// Exports Bindings\nNapi::Object Init(Napi::Env env, Napi::Object exports)\n{\n    exports[\"LLModel\"] = NodeModelWrapper::GetClass(env);\n    return exports;\n}\n\nNODE_API_MODULE(NODE_GYP_MODULE_NAME, Init)\n"
  },
  {
    "path": "gpt4all-bindings/typescript/index.h",
    "content": "#include \"llmodel.h\"\n#include \"llmodel_c.h\"\n#include \"prompt.h\"\n#include <atomic>\n#include <filesystem>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <napi.h>\n#include <set>\n\nnamespace fs = std::filesystem;\n\nclass NodeModelWrapper : public Napi::ObjectWrap<NodeModelWrapper>\n{\n\n  public:\n    NodeModelWrapper(const Napi::CallbackInfo &);\n    // virtual ~NodeModelWrapper();\n    Napi::Value GetType(const Napi::CallbackInfo &info);\n    Napi::Value IsModelLoaded(const Napi::CallbackInfo &info);\n    Napi::Value StateSize(const Napi::CallbackInfo &info);\n    // void Finalize(Napi::Env env) override;\n    /**\n     * Prompting the model. This entails spawning a new thread and adding the response tokens\n     * into a thread local string variable.\n     */\n    Napi::Value Infer(const Napi::CallbackInfo &info);\n    void SetThreadCount(const Napi::CallbackInfo &info);\n    void Dispose(const Napi::CallbackInfo &info);\n    Napi::Value GetName(const Napi::CallbackInfo &info);\n    Napi::Value ThreadCount(const Napi::CallbackInfo &info);\n    Napi::Value GenerateEmbedding(const Napi::CallbackInfo &info);\n    Napi::Value HasGpuDevice(const Napi::CallbackInfo &info);\n    Napi::Value ListGpus(const Napi::CallbackInfo &info);\n    Napi::Value InitGpuByString(const Napi::CallbackInfo &info);\n    Napi::Value GetRequiredMemory(const Napi::CallbackInfo &info);\n    Napi::Value GetGpuDevices(const Napi::CallbackInfo &info);\n    /*\n     * The path that is used to search for the dynamic libraries\n     */\n    Napi::Value GetLibraryPath(const Napi::CallbackInfo &info);\n    /**\n     * Creates the LLModel class\n     */\n    static Napi::Function GetClass(Napi::Env);\n    llmodel_model GetInference();\n\n  private:\n    /**\n     * The underlying inference that interfaces with the C interface\n     */\n    llmodel_model inference_;\n\n    std::mutex inference_mutex;\n\n    std::string type;\n    // corresponds to LLModel::name() in typescript\n    std::string name;\n    int nCtx{};\n    int nGpuLayers{};\n    std::string full_model_path;\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/package.json",
    "content": "{\n  \"name\": \"gpt4all\",\n  \"version\": \"4.0.0\",\n  \"packageManager\": \"yarn@3.6.1\",\n  \"main\": \"src/gpt4all.js\",\n  \"repository\": \"nomic-ai/gpt4all\",\n  \"scripts\": {\n    \"install\": \"node-gyp-build\",\n    \"test\": \"jest\",\n    \"build:backend\": \"node scripts/build.js\",\n    \"build\": \"node-gyp-build\",\n    \"docs:build\": \"node scripts/docs.js && documentation readme ./src/gpt4all.d.ts --parse-extension js d.ts --format md --section \\\"API Reference\\\" --readme-file ../python/docs/gpt4all_nodejs.md\"\n  },\n  \"files\": [\n    \"src/**/*\",\n    \"runtimes/**/*\",\n    \"binding.gyp\",\n    \"prebuilds/**/*\",\n    \"*.h\",\n    \"*.cc\",\n    \"gpt4all-backend/**/*\"\n  ],\n  \"dependencies\": {\n    \"md5-file\": \"^5.0.0\",\n    \"node-addon-api\": \"^6.1.0\",\n    \"node-gyp-build\": \"^4.6.0\"\n  },\n  \"devDependencies\": {\n    \"@types/node\": \"^20.1.5\",\n    \"documentation\": \"^14.0.2\",\n    \"jest\": \"^29.5.0\",\n    \"prebuildify\": \"^5.0.1\",\n    \"prettier\": \"^2.8.8\"\n  },\n  \"optionalDependencies\": {\n    \"node-gyp\": \"9.x.x\"\n  },\n  \"engines\": {\n    \"node\": \">= 18.x.x\"\n  },\n  \"prettier\": {\n    \"endOfLine\": \"lf\",\n    \"tabWidth\": 4\n  },\n  \"jest\": {\n    \"verbose\": true\n  },\n  \"publishConfig\": {\n    \"registry\": \"https://registry.npmjs.org/\",\n    \"access\": \"public\",\n    \"tag\": \"latest\"\n  }\n}\n"
  },
  {
    "path": "gpt4all-bindings/typescript/prompt.cc",
    "content": "#include \"prompt.h\"\n#include <future>\n\nPromptWorker::PromptWorker(Napi::Env env, PromptWorkerConfig config)\n    : promise(Napi::Promise::Deferred::New(env)), _config(config), AsyncWorker(env)\n{\n    if (_config.hasResponseCallback)\n    {\n        _responseCallbackFn = Napi::ThreadSafeFunction::New(config.responseCallback.Env(), config.responseCallback,\n                                                            \"PromptWorker\", 0, 1, this);\n    }\n\n    if (_config.hasPromptCallback)\n    {\n        _promptCallbackFn = Napi::ThreadSafeFunction::New(config.promptCallback.Env(), config.promptCallback,\n                                                          \"PromptWorker\", 0, 1, this);\n    }\n}\n\nPromptWorker::~PromptWorker()\n{\n    if (_config.hasResponseCallback)\n    {\n        _responseCallbackFn.Release();\n    }\n    if (_config.hasPromptCallback)\n    {\n        _promptCallbackFn.Release();\n    }\n}\n\nvoid PromptWorker::Execute()\n{\n    _config.mutex->lock();\n\n    LLModelWrapper *wrapper = reinterpret_cast<LLModelWrapper *>(_config.model);\n\n    auto ctx = &_config.context;\n\n    if (size_t(ctx->n_past) < wrapper->promptContext.tokens.size())\n        wrapper->promptContext.tokens.resize(ctx->n_past);\n\n    // Copy the C prompt context\n    wrapper->promptContext.n_past = ctx->n_past;\n    wrapper->promptContext.n_ctx = ctx->n_ctx;\n    wrapper->promptContext.n_predict = ctx->n_predict;\n    wrapper->promptContext.top_k = ctx->top_k;\n    wrapper->promptContext.top_p = ctx->top_p;\n    wrapper->promptContext.temp = ctx->temp;\n    wrapper->promptContext.n_batch = ctx->n_batch;\n    wrapper->promptContext.repeat_penalty = ctx->repeat_penalty;\n    wrapper->promptContext.repeat_last_n = ctx->repeat_last_n;\n    wrapper->promptContext.contextErase = ctx->context_erase;\n\n    // Call the C++ prompt method\n\n    wrapper->llModel->prompt(\n        _config.prompt, _config.promptTemplate, [this](int32_t token_id) { return PromptCallback(token_id); },\n        [this](int32_t token_id, const std::string token) { return ResponseCallback(token_id, token); },\n        [](bool isRecalculating) { return isRecalculating; }, wrapper->promptContext, _config.special,\n        _config.fakeReply);\n\n    // Update the C context by giving access to the wrappers raw pointers to std::vector data\n    // which involves no copies\n    ctx->logits = wrapper->promptContext.logits.data();\n    ctx->logits_size = wrapper->promptContext.logits.size();\n    ctx->tokens = wrapper->promptContext.tokens.data();\n    ctx->tokens_size = wrapper->promptContext.tokens.size();\n\n    // Update the rest of the C prompt context\n    ctx->n_past = wrapper->promptContext.n_past;\n    ctx->n_ctx = wrapper->promptContext.n_ctx;\n    ctx->n_predict = wrapper->promptContext.n_predict;\n    ctx->top_k = wrapper->promptContext.top_k;\n    ctx->top_p = wrapper->promptContext.top_p;\n    ctx->temp = wrapper->promptContext.temp;\n    ctx->n_batch = wrapper->promptContext.n_batch;\n    ctx->repeat_penalty = wrapper->promptContext.repeat_penalty;\n    ctx->repeat_last_n = wrapper->promptContext.repeat_last_n;\n    ctx->context_erase = wrapper->promptContext.contextErase;\n\n    _config.mutex->unlock();\n}\n\nvoid PromptWorker::OnOK()\n{\n    Napi::Object returnValue = Napi::Object::New(Env());\n    returnValue.Set(\"text\", result);\n    returnValue.Set(\"nPast\", _config.context.n_past);\n    promise.Resolve(returnValue);\n    delete _config.fakeReply;\n}\n\nvoid PromptWorker::OnError(const Napi::Error &e)\n{\n    delete _config.fakeReply;\n    promise.Reject(e.Value());\n}\n\nNapi::Promise PromptWorker::GetPromise()\n{\n    return promise.Promise();\n}\n\nbool PromptWorker::ResponseCallback(int32_t token_id, const std::string token)\n{\n    if (token_id == -1)\n    {\n        return false;\n    }\n\n    if (!_config.hasResponseCallback)\n    {\n        return true;\n    }\n\n    result += token;\n\n    std::promise<bool> promise;\n\n    auto info = new ResponseCallbackData();\n    info->tokenId = token_id;\n    info->token = token;\n\n    auto future = promise.get_future();\n\n    auto status = _responseCallbackFn.BlockingCall(\n        info, [&promise](Napi::Env env, Napi::Function jsCallback, ResponseCallbackData *value) {\n            try\n            {\n                // Transform native data into JS data, passing it to the provided\n                // `jsCallback` -- the TSFN's JavaScript function.\n                auto token_id = Napi::Number::New(env, value->tokenId);\n                auto token = Napi::String::New(env, value->token);\n                auto jsResult = jsCallback.Call({token_id, token}).ToBoolean();\n                promise.set_value(jsResult);\n            }\n            catch (const Napi::Error &e)\n            {\n                std::cerr << \"Error in onResponseToken callback: \" << e.what() << std::endl;\n                promise.set_value(false);\n            }\n\n            delete value;\n        });\n    if (status != napi_ok)\n    {\n        Napi::Error::Fatal(\"PromptWorkerResponseCallback\", \"Napi::ThreadSafeNapi::Function.NonBlockingCall() failed\");\n    }\n\n    return future.get();\n}\n\nbool PromptWorker::RecalculateCallback(bool isRecalculating)\n{\n    return isRecalculating;\n}\n\nbool PromptWorker::PromptCallback(int32_t token_id)\n{\n    if (!_config.hasPromptCallback)\n    {\n        return true;\n    }\n\n    std::promise<bool> promise;\n\n    auto info = new PromptCallbackData();\n    info->tokenId = token_id;\n\n    auto future = promise.get_future();\n\n    auto status = _promptCallbackFn.BlockingCall(\n        info, [&promise](Napi::Env env, Napi::Function jsCallback, PromptCallbackData *value) {\n            try\n            {\n                // Transform native data into JS data, passing it to the provided\n                // `jsCallback` -- the TSFN's JavaScript function.\n                auto token_id = Napi::Number::New(env, value->tokenId);\n                auto jsResult = jsCallback.Call({token_id}).ToBoolean();\n                promise.set_value(jsResult);\n            }\n            catch (const Napi::Error &e)\n            {\n                std::cerr << \"Error in onPromptToken callback: \" << e.what() << std::endl;\n                promise.set_value(false);\n            }\n            delete value;\n        });\n    if (status != napi_ok)\n    {\n        Napi::Error::Fatal(\"PromptWorkerPromptCallback\", \"Napi::ThreadSafeNapi::Function.NonBlockingCall() failed\");\n    }\n\n    return future.get();\n}\n"
  },
  {
    "path": "gpt4all-bindings/typescript/prompt.h",
    "content": "#ifndef PREDICT_WORKER_H\n#define PREDICT_WORKER_H\n\n#include \"llmodel.h\"\n#include \"llmodel_c.h\"\n#include \"napi.h\"\n#include <atomic>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <thread>\n\nstruct ResponseCallbackData\n{\n    int32_t tokenId;\n    std::string token;\n};\n\nstruct PromptCallbackData\n{\n    int32_t tokenId;\n};\n\nstruct LLModelWrapper\n{\n    LLModel *llModel = nullptr;\n    LLModel::PromptContext promptContext;\n    ~LLModelWrapper()\n    {\n        delete llModel;\n    }\n};\n\nstruct PromptWorkerConfig\n{\n    Napi::Function responseCallback;\n    bool hasResponseCallback = false;\n    Napi::Function promptCallback;\n    bool hasPromptCallback = false;\n    llmodel_model model;\n    std::mutex *mutex;\n    std::string prompt;\n    std::string promptTemplate;\n    llmodel_prompt_context context;\n    std::string result;\n    bool special = false;\n    std::string *fakeReply = nullptr;\n};\n\nclass PromptWorker : public Napi::AsyncWorker\n{\n  public:\n    PromptWorker(Napi::Env env, PromptWorkerConfig config);\n    ~PromptWorker();\n    void Execute() override;\n    void OnOK() override;\n    void OnError(const Napi::Error &e) override;\n    Napi::Promise GetPromise();\n\n    bool ResponseCallback(int32_t token_id, const std::string token);\n    bool RecalculateCallback(bool isrecalculating);\n    bool PromptCallback(int32_t token_id);\n\n  private:\n    Napi::Promise::Deferred promise;\n    std::string result;\n    PromptWorkerConfig _config;\n    Napi::ThreadSafeFunction _responseCallbackFn;\n    Napi::ThreadSafeFunction _promptCallbackFn;\n};\n\n#endif // PREDICT_WORKER_H\n"
  },
  {
    "path": "gpt4all-bindings/typescript/scripts/build.js",
    "content": "const { spawn } = require(\"node:child_process\");\nconst { resolve } = require(\"path\");\nconst args = process.argv.slice(2);\nconst platform = process.platform;\n//windows 64bit or 32\nif (platform === \"win32\") {\n    const path = \"scripts/build_msvc.bat\";\n    spawn(resolve(path), [\"/Y\", ...args], { shell: true, stdio: \"inherit\" });\n    process.on(\"data\", (s) => console.log(s.toString()));\n} else if (platform === \"linux\" || platform === \"darwin\") {\n    const path = \"scripts/build_unix.sh\";\n    spawn(`sh `, [path, args], {\n        shell: true,\n        stdio: \"inherit\",\n    });\n    process.on(\"data\", (s) => console.log(s.toString()));\n}\n"
  },
  {
    "path": "gpt4all-bindings/typescript/scripts/docs.js",
    "content": "//Maybe some command line piping would work better, but can't think of platform independent command line tool\n\nconst fs = require('fs');\n\nconst newPath = '../python/docs/gpt4all_nodejs.md';\nconst filepath = './README.md';\nconst intro = fs.readFileSync(filepath);\n\nfs.writeFileSync(\n    newPath, intro\n);\n\n"
  },
  {
    "path": "gpt4all-bindings/typescript/scripts/mkclangd.js",
    "content": "/// makes compile_flags.txt for clangd server support with this project\n/// run this with typescript as your cwd\n//\n//for debian users make sure to install libstdc++-12-dev\n\nconst nodeaddonapi=require('node-addon-api').include;\n\nconst fsp = require('fs/promises');\nconst { existsSync, readFileSync } = require('fs');\nconst assert = require('node:assert');\nconst findnodeapih = () => {\n    assert(existsSync(\"./build\"), \"Haven't built the application once yet. run node scripts/prebuild.js\");\n    const dir = readFileSync(\"./build/config.gypi\", 'utf8');\n    const nodedir_line = dir.match(/\"nodedir\": \"([^\"]+)\"/);\n    assert(nodedir_line, \"Found no matches\")\n    assert(nodedir_line[1]);\n    console.log(\"node_api.h found at: \", nodedir_line[1]);\n    return nodedir_line[1]+\"/include/node\";\n};\n\nconst knownIncludes = [\n    '-I',\n    './',\n    '-I',\n    nodeaddonapi.substring(1, nodeaddonapi.length-1),\n    '-I',\n    '../../gpt4all-backend',\n    '-I',\n    findnodeapih()\n];\nconst knownFlags = [\n    \"-x\",\n    \"c++\",\n    '-std=c++17'\n];\n\n\nconst output = knownFlags.join('\\n')+'\\n'+knownIncludes.join('\\n');\n\nfsp.writeFile('./compile_flags.txt', output, 'utf8')\n    .then(() => console.log('done'))\n    .catch(() => console.err('failed'));\n\n"
  },
  {
    "path": "gpt4all-bindings/typescript/scripts/prebuild.js",
    "content": "const prebuildify = require(\"prebuildify\");\n\nasync function createPrebuilds(combinations) {\n    for (const { platform, arch } of combinations) {\n        const opts = {\n            platform,\n            arch,\n            napi: true,\n            targets: [\"18.16.0\"]\n        };\n        try {\n            await createPrebuild(opts);\n            console.log(\n                `Build succeeded for platform ${opts.platform} and architecture ${opts.arch}`\n            );\n        } catch (err) {\n            console.error(\n                `Error building for platform ${opts.platform} and architecture ${opts.arch}:`,\n                err\n            );\n        }\n    }\n}\n\nfunction createPrebuild(opts) {\n    return new Promise((resolve, reject) => {\n        prebuildify(opts, (err) => {\n            if (err) {\n                reject(err);\n            } else {\n                resolve();\n            }\n        });\n    });\n}\n\nlet prebuildConfigs;\nif(process.platform === 'win32') {\n   prebuildConfigs = [\n    { platform: \"win32\", arch: \"x64\" }\n   ];\n} else if(process.platform === 'linux') {\n   //Unsure if darwin works, need mac tester!\n   prebuildConfigs = [\n    { platform: \"linux\", arch: \"x64\" },\n    //{ platform: \"linux\", arch: \"arm64\" },\n    //{ platform: \"linux\", arch: \"armv7\" },\n   ]\n} else if(process.platform === 'darwin') {\n    prebuildConfigs = [\n       { platform: \"darwin\", arch: \"x64\" },\n       { platform: \"darwin\", arch: \"arm64\" },\n    ]\n}\n\ncreatePrebuilds(prebuildConfigs)\n    .then(() => console.log(\"All builds succeeded\"))\n    .catch((err) => console.error(\"Error building:\", err));\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/callbacks.mjs",
    "content": "import { promises as fs } from \"node:fs\";\nimport { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst res = await createCompletion(\n    model,\n    \"I've got three 🍣 - What shall I name them?\",\n    {\n        onPromptToken: (tokenId) => {\n            console.debug(\"onPromptToken\", { tokenId });\n            // throwing an error will cancel\n            throw new Error(\"This is an error\");\n            // const foo = thisMethodDoesNotExist();\n            // returning false will cancel as well\n            // return false;\n        },\n        onResponseToken: (tokenId, token) => {\n            console.debug(\"onResponseToken\", { tokenId, token });\n            // same applies here\n        },\n    }\n);\n\nconsole.debug(\"Output:\", {\n    usage: res.usage,\n    message: res.choices[0].message,\n});\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/chat-memory.mjs",
    "content": "import { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst chat = await model.createChatSession({\n    messages: [\n        {\n            role: \"user\",\n            content: \"I'll tell you a secret password: It's 63445.\",\n        },\n        {\n            role: \"assistant\",\n            content: \"I will do my best to remember that.\",\n        },\n        {\n            role: \"user\",\n            content:\n                \"And here another fun fact: Bananas may be bluer than bread at night.\",\n        },\n        {\n            role: \"assistant\",\n            content: \"Yes, that makes sense.\",\n        },\n    ],\n});\n\nconst turn1 = await createCompletion(\n    chat,\n    \"Please tell me the secret password.\"\n);\nconsole.debug(turn1.choices[0].message);\n// \"The secret password you shared earlier is 63445.\"\"\n\nconst turn2 = await createCompletion(\n    chat,\n    \"Thanks! Have your heard about the bananas?\"\n);\nconsole.debug(turn2.choices[0].message);\n\nfor (let i = 0; i < 32; i++) {\n    // gpu go brr\n    const turn = await createCompletion(\n        chat,\n        i % 2 === 0 ? \"Tell me a fun fact.\" : \"And a boring one?\"\n    );\n    console.debug({\n        message: turn.choices[0].message,\n        n_past_tokens: turn.usage.n_past_tokens,\n    });\n}\n\nconst finalTurn = await createCompletion(\n    chat,\n    \"Now I forgot the secret password. Can you remind me?\"\n);\nconsole.debug(finalTurn.choices[0].message);\n\n// result of finalTurn may vary depending on whether the generated facts pushed the secret out of the context window.\n// \"Of course! The secret password you shared earlier is 63445.\"\n// \"I apologize for any confusion. As an AI language model, ...\"\n\nmodel.dispose();\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/chat-minimal.mjs",
    "content": "import { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst chat = await model.createChatSession();\n\nawait createCompletion(\n    chat,\n    \"Why are bananas rather blue than bread at night sometimes?\",\n    {\n        verbose: true,\n    }\n);\nawait createCompletion(chat, \"Are you sure?\", {\n    verbose: true,\n});\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/concurrency.mjs",
    "content": "import {\n    loadModel,\n    createCompletion,\n} from \"../src/gpt4all.js\";\n\nconst modelOptions = {\n    verbose: true,\n};\n\nconst model1 = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", {\n    ...modelOptions,\n    device: \"gpu\", // only one model can be on gpu\n});\nconst model2 = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", modelOptions);\nconst model3 = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", modelOptions);\n\nconst promptContext = {\n    verbose: true,\n}\n\nconst responses = await Promise.all([\n    createCompletion(model1, \"What is 1 + 1?\", promptContext),\n    // generating with the same model instance will wait for the previous completion to finish\n    createCompletion(model1, \"What is 1 + 1?\", promptContext),\n    // generating with different model instances will run in parallel\n    createCompletion(model2, \"What is 1 + 2?\", promptContext),\n    createCompletion(model3, \"What is 1 + 3?\", promptContext),\n]);\nconsole.log(responses.map((res) => res.choices[0].message));\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/embed-jsonl.mjs",
    "content": "import { loadModel, createEmbedding } from '../src/gpt4all.js'\nimport { createGunzip, createGzip, createUnzip } from 'node:zlib';\nimport { Readable } from 'stream'\nimport readline  from 'readline'\nconst embedder = await loadModel(\"nomic-embed-text-v1.5.f16.gguf\", { verbose: true, type: 'embedding', device: 'gpu' })\nconsole.log(\"Running with\", embedder.llm.threadCount(), \"threads\");\n\n\nconst unzip = createGunzip();\nconst url = \"https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/squad_pairs.jsonl.gz\"\nconst stream = await fetch(url)\n        .then(res => Readable.fromWeb(res.body));\n\nconst lineReader = readline.createInterface({\n    input: stream.pipe(unzip),\n    crlfDelay: Infinity\n})\n\nlineReader.on('line', line => {\n    //pairs of questions and answers\n    const question_answer = JSON.parse(line)\n    console.log(createEmbedding(embedder, question_answer))\n})\n\nlineReader.on('close', () => embedder.dispose())\n\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/embed.mjs",
    "content": "import { loadModel, createEmbedding } from '../src/gpt4all.js'\n\nconst embedder = await loadModel(\"nomic-embed-text-v1.5.f16.gguf\", { verbose: true, type: 'embedding' , device: 'gpu' })\n\ntry {\nconsole.log(createEmbedding(embedder, [\"Accept your current situation\", \"12312\"], { prefix: \"search_document\"  }))\n\n} catch(e) {\nconsole.log(e)\n}\n\nembedder.dispose()\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/llmodel.mjs",
    "content": "import {\n    LLModel,\n    createCompletion,\n    DEFAULT_DIRECTORY,\n    DEFAULT_LIBRARIES_DIRECTORY,\n    loadModel,\n} from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\nconst ll = model.llm;\n\ntry {\n    class Extended extends LLModel {}\n} catch (e) {\n    console.log(\"Extending from native class gone wrong \" + e);\n}\n\nconsole.log(\"state size \" + ll.stateSize());\n\nconsole.log(\"thread count \" + ll.threadCount());\nll.setThreadCount(5);\n\nconsole.log(\"thread count \" + ll.threadCount());\nll.setThreadCount(4);\nconsole.log(\"thread count \" + ll.threadCount());\nconsole.log(\"name \" + ll.name());\nconsole.log(\"type: \" + ll.type());\nconsole.log(\"Default directory for models\", DEFAULT_DIRECTORY);\nconsole.log(\"Default directory for libraries\", DEFAULT_LIBRARIES_DIRECTORY);\nconsole.log(\"Has GPU\", ll.hasGpuDevice());\nconsole.log(\"gpu devices\", ll.listGpu());\nconsole.log(\"Required Mem in bytes\", ll.memoryNeeded());\n\n// to ingest a custom system prompt without using a chat session.\nawait createCompletion(\n    model,\n    \"<|im_start|>system\\nYou are an advanced mathematician.\\n<|im_end|>\\n\",\n    {\n        promptTemplate: \"%1\",\n        nPredict: 0,\n        special: true,\n    }\n);\nconst completion1 = await createCompletion(model, \"What is 1 + 1?\", {\n    verbose: true,\n});\nconsole.log(`🤖 > ${completion1.choices[0].message.content}`);\n//Very specific:\n// tested on Ubuntu 22.0, Linux Mint, if I set nPast to 100, the app hangs.\nconst completion2 = await createCompletion(model, \"And if we add two?\", {\n    verbose: true,\n});\nconsole.log(`🤖 > ${completion2.choices[0].message.content}`);\n\n//CALLING DISPOSE WILL INVALID THE NATIVE MODEL. USE THIS TO CLEANUP\nmodel.dispose();\n\nconsole.log(\"model disposed, exiting...\");\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/long-context.mjs",
    "content": "import { promises as fs } from \"node:fs\";\nimport { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n    nCtx: 32768,\n});\n\nconst typeDefSource = await fs.readFile(\"./src/gpt4all.d.ts\", \"utf-8\");\n\nconst res = await createCompletion(\n    model,\n    \"Here are the type definitions for the GPT4All API:\\n\\n\" +\n        typeDefSource +\n        \"\\n\\nHow do I create a completion with a really large context window?\",\n    {\n        verbose: true,\n    }\n);\nconsole.debug(res.choices[0].message);\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/model-switching.mjs",
    "content": "import { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model1 = await loadModel(\"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\", {\n    device: \"gpu\",\n    nCtx: 4096,\n});\n\nconst chat1 = await model1.createChatSession({\n    temperature: 0.8,\n    topP: 0.7,\n    topK: 60,\n});\n\nconst chat1turn1 = await createCompletion(\n    chat1,\n    \"Outline a short story concept for adults. About why bananas are rather blue than bread is green at night sometimes. Not too long.\"\n);\nconsole.debug(chat1turn1.choices[0].message);\n\nconst chat1turn2 = await createCompletion(\n    chat1,\n    \"Lets sprinkle some plot twists. And a cliffhanger at the end.\"\n);\nconsole.debug(chat1turn2.choices[0].message);\n\nconst chat1turn3 = await createCompletion(\n    chat1,\n    \"Analyze your plot. Find the weak points.\"\n);\nconsole.debug(chat1turn3.choices[0].message);\n\nconst chat1turn4 = await createCompletion(\n    chat1,\n    \"Rewrite it based on the analysis.\"\n);\nconsole.debug(chat1turn4.choices[0].message);\n\nmodel1.dispose();\n\nconst model2 = await loadModel(\"gpt4all-falcon-newbpe-q4_0.gguf\", {\n    device: \"gpu\",\n});\n\nconst chat2 = await model2.createChatSession({\n    messages: chat1.messages,\n});\n\nconst chat2turn1 = await createCompletion(\n    chat2,\n    \"Give three ideas how this plot could be improved.\"\n);\nconsole.debug(chat2turn1.choices[0].message);\n\nconst chat2turn2 = await createCompletion(\n    chat2,\n    \"Revise the plot, applying your ideas.\"\n);\nconsole.debug(chat2turn2.choices[0].message);\n\nmodel2.dispose();\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/stateless.mjs",
    "content": "import { loadModel, createCompletion } from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"orca-mini-3b-gguf2-q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst messages = [\n    {\n        role: \"system\",\n        content: \"<|im_start|>system\\nYou are an advanced mathematician.\\n<|im_end|>\\n\",\n    },\n    {\n        role: \"user\",\n        content: \"What's 2+2?\",\n    },\n    {\n        role: \"assistant\",\n        content: \"5\",\n    },\n    {\n        role: \"user\",\n        content: \"Are you sure?\",\n    },\n];\n\n\nconst res1 = await createCompletion(model, messages);\nconsole.debug(res1.choices[0].message);\nmessages.push(res1.choices[0].message);\n\nmessages.push({\n    role: \"user\",\n    content: \"Could you double check that?\",\n});\n\nconst res2 = await createCompletion(model, messages);\nconsole.debug(res2.choices[0].message);\nmessages.push(res2.choices[0].message);\n\nmessages.push({\n    role: \"user\",\n    content: \"Let's bring out the big calculators.\",\n});\n\nconst res3 = await createCompletion(model, messages);\nconsole.debug(res3.choices[0].message);\nmessages.push(res3.choices[0].message);\n\n// console.debug(messages);\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/streaming.mjs",
    "content": "import {\n    loadModel,\n    createCompletion,\n    createCompletionStream,\n    createCompletionGenerator,\n} from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"mistral-7b-openorca.gguf2.Q4_0.gguf\", {\n    device: \"gpu\",\n});\n\nprocess.stdout.write(\"### Stream:\");\nconst stream = createCompletionStream(model, \"How are you?\");\nstream.tokens.on(\"data\", (data) => {\n    process.stdout.write(data);\n});\nawait stream.result;\nprocess.stdout.write(\"\\n\");\n\nprocess.stdout.write(\"### Stream with pipe:\");\nconst stream2 = createCompletionStream(\n    model,\n    \"Please say something nice about node streams.\"\n);\nstream2.tokens.pipe(process.stdout);\nconst stream2Res = await stream2.result;\nprocess.stdout.write(\"\\n\");\n\nprocess.stdout.write(\"### Generator:\");\nconst gen = createCompletionGenerator(model, \"generators instead?\", {\n    nPast: stream2Res.usage.n_past_tokens,\n});\nfor await (const chunk of gen) {\n    process.stdout.write(chunk);\n}\n\nprocess.stdout.write(\"\\n\");\n\nprocess.stdout.write(\"### Callback:\");\nawait createCompletion(model, \"Why not just callbacks?\", {\n    onResponseToken: (tokenId, token) => {\n        process.stdout.write(token);\n    },\n});\nprocess.stdout.write(\"\\n\");\n\nprocess.stdout.write(\"### 2nd Generator:\");\nconst gen2 = createCompletionGenerator(model, \"If 3 + 3 is 5, what is 2 + 2?\");\n\nlet chunk = await gen2.next();\nwhile (!chunk.done) {\n    process.stdout.write(chunk.value);\n    chunk = await gen2.next();\n}\nprocess.stdout.write(\"\\n\");\nconsole.debug(\"generator finished\", chunk);\nmodel.dispose();\n"
  },
  {
    "path": "gpt4all-bindings/typescript/spec/system.mjs",
    "content": "import {\n    loadModel,\n    createCompletion,\n} from \"../src/gpt4all.js\";\n\nconst model = await loadModel(\"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\", {\n    verbose: true,\n    device: \"gpu\",\n});\n\nconst chat = await model.createChatSession({\n    verbose: true,\n    systemPrompt: \"<|im_start|>system\\nRoleplay as Batman. Answer as if you are Batman, never say you're an Assistant.\\n<|im_end|>\",\n});\nconst turn1 = await createCompletion(chat, \"You have any plans tonight?\");\nconsole.log(turn1.choices[0].message);\n// \"I'm afraid I must decline any personal invitations tonight. As Batman, I have a responsibility to protect Gotham City.\"\n\nmodel.dispose();\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/chat-session.js",
    "content": "const { DEFAULT_PROMPT_CONTEXT } = require(\"./config\");\nconst { prepareMessagesForIngest } = require(\"./util\");\n\nclass ChatSession {\n    model;\n    modelName;\n    /**\n     * @type {import('./gpt4all').ChatMessage[]}\n     */\n    messages;\n    /**\n     * @type {string}\n     */\n    systemPrompt;\n    /**\n     * @type {import('./gpt4all').LLModelPromptContext}\n     */\n    promptContext;\n    /**\n     * @type {boolean}\n     */\n    initialized;\n\n    constructor(model, chatSessionOpts = {}) {\n        const { messages, systemPrompt, ...sessionDefaultPromptContext } =\n            chatSessionOpts;\n        this.model = model;\n        this.modelName = model.llm.name();\n        this.messages = messages ?? [];\n        this.systemPrompt = systemPrompt ?? model.config.systemPrompt;\n        this.initialized = false;\n        this.promptContext = {\n            ...DEFAULT_PROMPT_CONTEXT,\n            ...sessionDefaultPromptContext,\n            nPast: 0,\n        };\n    }\n\n    async initialize(completionOpts = {}) {\n        if (this.model.activeChatSession !== this) {\n            this.model.activeChatSession = this;\n        }\n\n        let tokensIngested = 0;\n\n        // ingest system prompt\n\n        if (this.systemPrompt) {\n            const systemRes = await this.model.generate(this.systemPrompt, {\n                promptTemplate: \"%1\",\n                nPredict: 0,\n                special: true,\n                nBatch: this.promptContext.nBatch,\n                // verbose: true,\n            });\n            tokensIngested += systemRes.tokensIngested;\n            this.promptContext.nPast = systemRes.nPast;\n        }\n\n        // ingest initial messages\n        if (this.messages.length > 0) {\n            tokensIngested += await this.ingestMessages(\n                this.messages,\n                completionOpts\n            );\n        }\n\n        this.initialized = true;\n\n        return tokensIngested;\n    }\n\n    async ingestMessages(messages, completionOpts = {}) {\n        const turns = prepareMessagesForIngest(messages);\n\n        // send the message pairs to the model\n        let tokensIngested = 0;\n\n        for (const turn of turns) {\n            const turnRes = await this.model.generate(turn.user, {\n                ...this.promptContext,\n                ...completionOpts,\n                fakeReply: turn.assistant,\n            });\n            tokensIngested += turnRes.tokensIngested;\n            this.promptContext.nPast = turnRes.nPast;\n        }\n        return tokensIngested;\n    }\n\n    async generate(input, completionOpts = {}) {\n        if (this.model.activeChatSession !== this) {\n            throw new Error(\n                \"Chat session is not active. Create a new chat session or call initialize to continue.\"\n            );\n        }\n        if (completionOpts.nPast > this.promptContext.nPast) {\n            throw new Error(\n                `nPast cannot be greater than ${this.promptContext.nPast}.`\n            );\n        }\n        let tokensIngested = 0;\n\n        if (!this.initialized) {\n            tokensIngested += await this.initialize(completionOpts);\n        }\n\n        let prompt = input;\n\n        if (Array.isArray(input)) {\n            // assuming input is a messages array\n            // -> tailing user message will be used as the final prompt. its optional.\n            // -> all system messages will be ignored.\n            // -> all other messages will be ingested with fakeReply\n            // -> user/assistant messages will be pushed into the messages array\n\n            let tailingUserMessage = \"\";\n            let messagesToIngest = input;\n\n            const lastMessage = input[input.length - 1];\n            if (lastMessage.role === \"user\") {\n                tailingUserMessage = lastMessage.content;\n                messagesToIngest = input.slice(0, input.length - 1);\n            }\n\n            if (messagesToIngest.length > 0) {\n                tokensIngested += await this.ingestMessages(\n                    messagesToIngest,\n                    completionOpts\n                );\n                this.messages.push(...messagesToIngest);\n            }\n\n            if (tailingUserMessage) {\n                prompt = tailingUserMessage;\n            } else {\n                return {\n                    text: \"\",\n                    nPast: this.promptContext.nPast,\n                    tokensIngested,\n                    tokensGenerated: 0,\n                };\n            }\n        }\n\n        const result = await this.model.generate(prompt, {\n            ...this.promptContext,\n            ...completionOpts,\n        });\n\n        this.promptContext.nPast = result.nPast;\n        result.tokensIngested += tokensIngested;\n\n        this.messages.push({\n            role: \"user\",\n            content: prompt,\n        });\n        this.messages.push({\n            role: \"assistant\",\n            content: result.text,\n        });\n\n        return result;\n    }\n}\n\nmodule.exports = {\n    ChatSession,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/config.js",
    "content": "const os = require(\"node:os\");\nconst path = require(\"node:path\");\n\nconst DEFAULT_DIRECTORY = path.resolve(os.homedir(), \".cache/gpt4all\");\n\nconst librarySearchPaths = [\n    path.join(DEFAULT_DIRECTORY, \"libraries\"),\n    path.resolve(\"./libraries\"),\n    path.resolve(\n        __dirname,\n        \"..\",\n        `runtimes/${process.platform}-${process.arch}/native`,\n    ),\n    //for darwin. This is hardcoded for now but it should work\n    path.resolve(\n        __dirname,\n        \"..\",\n        `runtimes/${process.platform}/native`,\n    ),\n    process.cwd(),\n];\n\nconst DEFAULT_LIBRARIES_DIRECTORY = librarySearchPaths.join(\";\");\n\nconst DEFAULT_MODEL_CONFIG = {\n    systemPrompt: \"\",\n    promptTemplate: \"### Human:\\n%1\\n\\n### Assistant:\\n\",\n}\n\nconst DEFAULT_MODEL_LIST_URL = \"https://gpt4all.io/models/models3.json\";\n\nconst DEFAULT_PROMPT_CONTEXT = {\n    temp: 0.1,\n    topK: 40,\n    topP: 0.9,\n    minP: 0.0,\n    repeatPenalty: 1.18,\n    repeatLastN: 10,\n    nBatch: 100,\n}\n\nmodule.exports = {\n    DEFAULT_DIRECTORY,\n    DEFAULT_LIBRARIES_DIRECTORY,\n    DEFAULT_MODEL_CONFIG,\n    DEFAULT_MODEL_LIST_URL,\n    DEFAULT_PROMPT_CONTEXT,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/gpt4all.d.ts",
    "content": "/// <reference types=\"node\" />\ndeclare module \"gpt4all\";\n\ninterface LLModelOptions {\n    /**\n     * Model architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user.\n     */\n    type?: string;\n    model_name: string;\n    model_path: string;\n    library_path?: string;\n}\n\ninterface ModelConfig {\n    systemPrompt: string;\n    promptTemplate: string;\n    path: string;\n    url?: string;\n}\n\n/**\n * Options for the chat session.\n */\ninterface ChatSessionOptions extends Partial<LLModelPromptContext> {\n    /**\n     * System prompt to ingest on initialization.\n     */\n    systemPrompt?: string;\n\n    /**\n     * Messages to ingest on initialization.\n     */\n    messages?: ChatMessage[];\n}\n\n/**\n * ChatSession utilizes an InferenceModel for efficient processing of chat conversations.\n */\ndeclare class ChatSession implements CompletionProvider {\n    /**\n     * Constructs a new ChatSession using the provided InferenceModel and options.\n     * Does not set the chat session as the active chat session until initialize is called.\n     * @param {InferenceModel} model An InferenceModel instance.\n     * @param {ChatSessionOptions} [options] Options for the chat session including default completion options.\n     */\n    constructor(model: InferenceModel, options?: ChatSessionOptions);\n    /**\n     * The underlying InferenceModel used for generating completions.\n     */\n    model: InferenceModel;\n    /**\n     * The name of the model.\n     */\n    modelName: string;\n    /**\n     * The messages that have been exchanged in this chat session.\n     */\n    messages: ChatMessage[];\n    /**\n     * The system prompt that has been ingested at the beginning of the chat session.\n     */\n    systemPrompt: string;\n    /**\n     * The current prompt context of the chat session.\n     */\n    promptContext: LLModelPromptContext;\n\n    /**\n     * Ingests system prompt and initial messages.\n     * Sets this chat session as the active chat session of the model.\n     * @param {CompletionOptions} [options] Set completion options for initialization.\n     * @returns {Promise<number>} The number of tokens ingested during initialization. systemPrompt + messages.\n     */\n    initialize(completionOpts?: CompletionOptions): Promise<number>;\n\n    /**\n     * Prompts the model in chat-session context.\n     * @param {CompletionInput} input Input string or message array.\n     * @param {CompletionOptions} [options] Set completion options for this generation.\n     * @returns {Promise<InferenceResult>} The inference result.\n     * @throws {Error} If the chat session is not the active chat session of the model.\n     * @throws {Error} If nPast is set to a value higher than what has been ingested in the session.\n     */\n    generate(\n        input: CompletionInput,\n        options?: CompletionOptions\n    ): Promise<InferenceResult>;\n}\n\n/**\n * Shape of InferenceModel generations.\n */\ninterface InferenceResult extends LLModelInferenceResult {\n    tokensIngested: number;\n    tokensGenerated: number;\n}\n\n/**\n * InferenceModel represents an LLM which can make next-token predictions.\n */\ndeclare class InferenceModel implements CompletionProvider {\n    constructor(llm: LLModel, config: ModelConfig);\n    /** The native LLModel */\n    llm: LLModel;\n    /** The configuration the instance was constructed with. */\n    config: ModelConfig;\n    /** The active chat session of the model. */\n    activeChatSession?: ChatSession;\n    /** The name of the model. */\n    modelName: string;\n\n    /**\n     * Create a chat session with the model and set it as the active chat session of this model.\n     * A model instance can only have one active chat session at a time.\n     * @param {ChatSessionOptions} options The options for the chat session.\n     * @returns {Promise<ChatSession>} The chat session.\n     */\n    createChatSession(options?: ChatSessionOptions): Promise<ChatSession>;\n\n    /**\n     * Prompts the model with a given input and optional parameters.\n     * @param {CompletionInput} input The prompt input.\n     * @param {CompletionOptions} options Prompt context and other options.\n     * @returns {Promise<InferenceResult>} The model's response to the prompt.\n     * @throws {Error} If nPast is set to a value smaller than 0.\n     * @throws {Error} If a messages array without a tailing user message is provided.\n     */\n    generate(\n        prompt: string,\n        options?: CompletionOptions\n    ): Promise<InferenceResult>;\n\n    /**\n     * delete and cleanup the native model\n     */\n    dispose(): void;\n}\n\n/**\n * Options for generating one or more embeddings.\n */\ninterface EmbedddingOptions {\n    /**\n     * The model-specific prefix representing the embedding task, without the trailing colon. For Nomic Embed\n     * this can be `search_query`, `search_document`, `classification`, or `clustering`.\n     */\n    prefix?: string;\n    /**\n     *The embedding dimension, for use with Matryoshka-capable models. Defaults to full-size.\n     * @default determines on the model being used.\n     */\n    dimensionality?: number;\n    /**\n     * How to handle texts longer than the model can accept. One of `mean` or `truncate`.\n     * @default \"mean\"\n     */\n    longTextMode?: \"mean\" | \"truncate\";\n    /**\n     * Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens\n     * with long_text_mode=\"mean\" will raise an error. Disabled by default.\n     * @default false\n     */\n    atlas?: boolean;\n}\n\n/**\n * The nodejs moral equivalent to python binding's Embed4All().embed()\n * meow\n * @param {EmbeddingModel} model The embedding model instance.\n * @param {string} text Text to embed.\n * @param {EmbeddingOptions} options Optional parameters for the embedding.\n * @returns {EmbeddingResult} The embedding result.\n * @throws {Error} If dimensionality is set to a value smaller than 1.\n */\ndeclare function createEmbedding(\n    model: EmbeddingModel,\n    text: string,\n    options?: EmbedddingOptions\n): EmbeddingResult<Float32Array>;\n\n/**\n * Overload that takes multiple strings to embed.\n * @param {EmbeddingModel} model The embedding model instance.\n * @param {string[]} texts Texts to embed.\n * @param {EmbeddingOptions} options Optional parameters for the embedding.\n * @returns {EmbeddingResult<Float32Array[]>} The embedding result.\n * @throws {Error} If dimensionality is set to a value smaller than 1.\n */\ndeclare function createEmbedding(\n    model: EmbeddingModel,\n    text: string[],\n    options?: EmbedddingOptions\n): EmbeddingResult<Float32Array[]>;\n\n/**\n * The resulting embedding.\n */\ninterface EmbeddingResult<T> {\n    /**\n     * Encoded token count. Includes overlap but specifically excludes tokens used for the prefix/task_type, BOS/CLS token, and EOS/SEP token\n     **/\n    n_prompt_tokens: number;\n\n    embeddings: T;\n}\n/**\n * EmbeddingModel represents an LLM which can create embeddings, which are float arrays\n */\ndeclare class EmbeddingModel {\n    constructor(llm: LLModel, config: ModelConfig);\n    /** The native LLModel */\n    llm: LLModel;\n    /** The configuration the instance was constructed with. */\n    config: ModelConfig;\n\n    /**\n     * Create an embedding from a given input string. See EmbeddingOptions.\n     * @param {string} text\n     * @param {string} prefix\n     * @param {number} dimensionality\n     * @param {boolean} doMean\n     * @param {boolean} atlas\n     * @returns {EmbeddingResult<Float32Array>} The embedding result.\n     */\n    embed(\n        text: string,\n        prefix: string,\n        dimensionality: number,\n        doMean: boolean,\n        atlas: boolean\n    ): EmbeddingResult<Float32Array>;\n    /**\n     * Create an embedding from a given input text array. See EmbeddingOptions.\n     * @param {string[]} text\n     * @param {string} prefix\n     * @param {number} dimensionality\n     * @param {boolean} doMean\n     * @param {boolean} atlas\n     * @returns {EmbeddingResult<Float32Array[]>} The embedding result.\n     */\n    embed(\n        text: string[],\n        prefix: string,\n        dimensionality: number,\n        doMean: boolean,\n        atlas: boolean\n    ): EmbeddingResult<Float32Array[]>;\n\n    /**\n     * delete and cleanup the native model\n     */\n    dispose(): void;\n}\n\n/**\n * Shape of LLModel's inference result.\n */\ninterface LLModelInferenceResult {\n    text: string;\n    nPast: number;\n}\n\ninterface LLModelInferenceOptions extends Partial<LLModelPromptContext> {\n    /** Callback for response tokens, called for each generated token.\n     * @param {number} tokenId The token id.\n     * @param {string} token The token.\n     * @returns {boolean | undefined} Whether to continue generating tokens.\n     * */\n    onResponseToken?: (tokenId: number, token: string) => boolean | void;\n    /** Callback for prompt tokens, called for each input token in the prompt.\n     * @param {number} tokenId The token id.\n     * @returns {boolean | undefined} Whether to continue ingesting the prompt.\n     * */\n    onPromptToken?: (tokenId: number) => boolean | void;\n}\n\n/**\n * LLModel class representing a language model.\n * This is a base class that provides common functionality for different types of language models.\n */\ndeclare class LLModel {\n    /**\n     * Initialize a new LLModel.\n     * @param {string} path Absolute path to the model file.\n     * @throws {Error} If the model file does not exist.\n     */\n    constructor(options: LLModelOptions);\n\n    /** undefined or user supplied */\n    type(): string | undefined;\n\n    /** The name of the model. */\n    name(): string;\n\n    /**\n     * Get the size of the internal state of the model.\n     * NOTE: This state data is specific to the type of model you have created.\n     * @return the size in bytes of the internal state of the model\n     */\n    stateSize(): number;\n\n    /**\n     * Get the number of threads used for model inference.\n     * The default is the number of physical cores your computer has.\n     * @returns The number of threads used for model inference.\n     */\n    threadCount(): number;\n\n    /**\n     * Set the number of threads used for model inference.\n     * @param newNumber The new number of threads.\n     */\n    setThreadCount(newNumber: number): void;\n\n    /**\n     * Prompt the model directly with a given input string and optional parameters.\n     * Use the higher level createCompletion methods for a more user-friendly interface.\n     * @param {string} prompt The prompt input.\n     * @param {LLModelInferenceOptions} options Optional parameters for the generation.\n     * @returns {LLModelInferenceResult} The response text and final context size.\n     */\n    infer(\n        prompt: string,\n        options: LLModelInferenceOptions\n    ): Promise<LLModelInferenceResult>;\n\n    /**\n     * Embed text with the model. See EmbeddingOptions for more information.\n     * Use the higher level createEmbedding methods for a more user-friendly interface.\n     * @param {string} text\n     * @param {string} prefix\n     * @param {number} dimensionality\n     * @param {boolean} doMean\n     * @param {boolean} atlas\n     * @returns {Float32Array} The embedding of the text.\n     */\n    embed(\n        text: string,\n        prefix: string,\n        dimensionality: number,\n        doMean: boolean,\n        atlas: boolean\n    ): Float32Array;\n\n    /**\n     * Embed multiple texts with the model. See EmbeddingOptions for more information.\n     * Use the higher level createEmbedding methods for a more user-friendly interface.\n     * @param {string[]} texts\n     * @param {string} prefix\n     * @param {number} dimensionality\n     * @param {boolean} doMean\n     * @param {boolean} atlas\n     * @returns {Float32Array[]} The embeddings of the texts.\n     */\n    embed(\n        texts: string,\n        prefix: string,\n        dimensionality: number,\n        doMean: boolean,\n        atlas: boolean\n    ): Float32Array[];\n\n    /**\n     * Whether the model is loaded or not.\n     */\n    isModelLoaded(): boolean;\n\n    /**\n     * Where to search for the pluggable backend libraries\n     */\n    setLibraryPath(s: string): void;\n\n    /**\n     * Where to get the pluggable backend libraries\n     */\n    getLibraryPath(): string;\n\n    /**\n     * Initiate a GPU by a string identifier.\n     * @param {number} memory_required Should be in the range size_t or will throw\n     * @param {string} device_name  'amd' | 'nvidia' | 'intel' | 'gpu' | gpu name.\n     * read LoadModelOptions.device for more information\n     */\n    initGpuByString(memory_required: number, device_name: string): boolean;\n\n    /**\n     * From C documentation\n     * @returns True if a GPU device is successfully initialized, false otherwise.\n     */\n    hasGpuDevice(): boolean;\n\n    /**\n     * GPUs that are usable for this LLModel\n     * @param {number} nCtx Maximum size of context window\n     * @throws if hasGpuDevice returns false (i think)\n     * @returns\n     */\n    listGpu(nCtx: number): GpuDevice[];\n\n    /**\n     * delete and cleanup the native model\n     */\n    dispose(): void;\n}\n/**\n * an object that contains gpu data on this machine.\n */\ninterface GpuDevice {\n    index: number;\n    /**\n     * same as VkPhysicalDeviceType\n     */\n    type: number;\n    heapSize: number;\n    name: string;\n    vendor: string;\n}\n\n/**\n * Options that configure a model's behavior.\n */\ninterface LoadModelOptions {\n    /**\n     * Where to look for model files.\n     */\n    modelPath?: string;\n    /**\n     * Where to look for the backend libraries.\n     */\n    librariesPath?: string;\n    /**\n     * The path to the model configuration file, useful for offline usage or custom model configurations.\n     */\n    modelConfigFile?: string;\n    /**\n     * Whether to allow downloading the model if it is not present at the specified path.\n     */\n    allowDownload?: boolean;\n    /**\n     * Enable verbose logging.\n     */\n    verbose?: boolean;\n    /**\n     * The processing unit on which the model will run. It can be set to\n     * - \"cpu\": Model will run on the central processing unit.\n     * - \"gpu\": Model will run on the best available graphics processing unit, irrespective of its vendor.\n     * - \"amd\", \"nvidia\", \"intel\": Model will run on the best available GPU from the specified vendor.\n     * - \"gpu name\": Model will run on the GPU that matches the name if it's available.\n     * Note: If a GPU device lacks sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All\n     * instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the\n     * model.\n     * @default \"cpu\"\n     */\n    device?: string;\n    /**\n     * The Maximum window size of this model\n     * @default 2048\n     */\n    nCtx?: number;\n    /**\n     * Number of gpu layers needed\n     * @default 100\n     */\n    ngl?: number;\n}\n\ninterface InferenceModelOptions extends LoadModelOptions {\n    type?: \"inference\";\n}\n\ninterface EmbeddingModelOptions extends LoadModelOptions {\n    type: \"embedding\";\n}\n\n/**\n * Loads a machine learning model with the specified name. The defacto way to create a model.\n * By default this will download a model from the official GPT4ALL website, if a model is not present at given path.\n *\n * @param {string} modelName - The name of the model to load.\n * @param {LoadModelOptions|undefined} [options] - (Optional) Additional options for loading the model.\n * @returns {Promise<InferenceModel | EmbeddingModel>} A promise that resolves to an instance of the loaded LLModel.\n */\ndeclare function loadModel(\n    modelName: string,\n    options?: InferenceModelOptions\n): Promise<InferenceModel>;\n\ndeclare function loadModel(\n    modelName: string,\n    options?: EmbeddingModelOptions\n): Promise<EmbeddingModel>;\n\ndeclare function loadModel(\n    modelName: string,\n    options?: EmbeddingModelOptions | InferenceModelOptions\n): Promise<InferenceModel | EmbeddingModel>;\n\n/**\n * Interface for createCompletion methods, implemented by InferenceModel and ChatSession.\n * Implement your own CompletionProvider or extend ChatSession to generate completions with custom logic.\n */\ninterface CompletionProvider {\n    modelName: string;\n    generate(\n        input: CompletionInput,\n        options?: CompletionOptions\n    ): Promise<InferenceResult>;\n}\n\n/**\n * Options for creating a completion.\n */\ninterface CompletionOptions extends LLModelInferenceOptions {\n    /**\n     * Indicates if verbose logging is enabled.\n     * @default false\n     */\n    verbose?: boolean;\n}\n\n/**\n * The input for creating a completion. May be a string or an array of messages.\n */\ntype CompletionInput = string | ChatMessage[];\n\n/**\n * The nodejs equivalent to python binding's chat_completion\n * @param {CompletionProvider} provider - The inference model object or chat session\n * @param {CompletionInput} input - The input string or message array\n * @param {CompletionOptions} options - The options for creating the completion.\n * @returns {CompletionResult} The completion result.\n */\ndeclare function createCompletion(\n    provider: CompletionProvider,\n    input: CompletionInput,\n    options?: CompletionOptions\n): Promise<CompletionResult>;\n\n/**\n * Streaming variant of createCompletion, returns a stream of tokens and a promise that resolves to the completion result.\n * @param {CompletionProvider} provider - The inference model object or chat session\n * @param {CompletionInput} input - The input string or message array\n * @param {CompletionOptions} options - The options for creating the completion.\n * @returns {CompletionStreamReturn} An object of token stream and the completion result promise.\n */\ndeclare function createCompletionStream(\n    provider: CompletionProvider,\n    input: CompletionInput,\n    options?: CompletionOptions\n): CompletionStreamReturn;\n\n/**\n * The result of a streamed completion, containing a stream of tokens and a promise that resolves to the completion result.\n */\ninterface CompletionStreamReturn {\n    tokens: NodeJS.ReadableStream;\n    result: Promise<CompletionResult>;\n}\n\n/**\n * Async generator variant of createCompletion, yields tokens as they are generated and returns the completion result.\n * @param {CompletionProvider} provider - The inference model object or chat session\n * @param {CompletionInput} input - The input string or message array\n * @param {CompletionOptions} options - The options for creating the completion.\n * @returns {AsyncGenerator<string>} The stream of generated tokens\n */\ndeclare function createCompletionGenerator(\n    provider: CompletionProvider,\n    input: CompletionInput,\n    options: CompletionOptions\n): AsyncGenerator<string, CompletionResult>;\n\n/**\n * A message in the conversation.\n */\ninterface ChatMessage {\n    /** The role of the message. */\n    role: \"system\" | \"assistant\" | \"user\";\n\n    /** The message content. */\n    content: string;\n}\n\n/**\n * The result of a completion.\n */\ninterface CompletionResult {\n    /** The model used for the completion. */\n    model: string;\n\n    /** Token usage report. */\n    usage: {\n        /** The number of tokens ingested during the completion. */\n        prompt_tokens: number;\n\n        /** The number of tokens generated in the completion. */\n        completion_tokens: number;\n\n        /** The total number of tokens used. */\n        total_tokens: number;\n\n        /** Number of tokens used in the conversation. */\n        n_past_tokens: number;\n    };\n\n    /** The generated completion. */\n    choices: Array<{\n        message: ChatMessage;\n    }>;\n}\n\n/**\n * Model inference arguments for generating completions.\n */\ninterface LLModelPromptContext {\n    /** The size of the raw logits vector. */\n    logitsSize: number;\n\n    /** The size of the raw tokens vector. */\n    tokensSize: number;\n\n    /** The number of tokens in the past conversation.\n     * This may be used to \"roll back\" the conversation to a previous state.\n     * Note that for most use cases the default value should be sufficient and this should not be set.\n     * @default 0 For completions using InferenceModel, meaning the model will only consider the input prompt.\n     * @default nPast For completions using ChatSession. This means the context window will be automatically determined\n     * and possibly resized (see contextErase) to keep the conversation performant.\n     * */\n    nPast: number;\n\n    /** The maximum number of tokens to predict.\n     * @default 4096\n     * */\n    nPredict: number;\n\n    /** Template for user / assistant message pairs.\n     * %1 is required and will be replaced by the user input.\n     * %2 is optional and will be replaced by the assistant response. If not present, the assistant response will be appended.\n     */\n    promptTemplate?: string;\n\n    /** The context window size. Do not use, it has no effect. See loadModel options.\n     * THIS IS DEPRECATED!!!\n     * Use loadModel's nCtx option instead.\n     * @default 2048\n     */\n    nCtx: number;\n\n    /** The top-k logits to sample from.\n     * Top-K sampling selects the next token only from the top K most likely tokens predicted by the model.\n     * It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit\n     * the diversity of the output. A higher value for top-K (eg., 100) will consider more tokens and lead\n     * to more diverse text, while a lower value (eg., 10) will focus on the most probable tokens and generate\n     * more conservative text. 30 - 60 is a good range for most tasks.\n     * @default 40\n     * */\n    topK: number;\n\n    /** The nucleus sampling probability threshold.\n     * Top-P limits the selection of the next token to a subset of tokens with a cumulative probability\n     * above a threshold P. This method, also known as nucleus sampling, finds a balance between diversity\n     * and quality by considering both token probabilities and the number of tokens available for sampling.\n     * When using a higher value for top-P (eg., 0.95), the generated text becomes more diverse.\n     * On the other hand, a lower value (eg., 0.1) produces more focused and conservative text.\n     * @default 0.9\n     *\n     * */\n    topP: number;\n\n    /**\n     * The minimum probability of a token to be considered.\n     * @default 0.0\n     */\n    minP: number;\n\n    /** The temperature to adjust the model's output distribution.\n     * Temperature is like a knob that adjusts how creative or focused the output becomes. Higher temperatures\n     * (eg., 1.2) increase randomness, resulting in more imaginative and diverse text. Lower temperatures (eg., 0.5)\n     * make the output more focused, predictable, and conservative. When the temperature is set to 0, the output\n     * becomes completely deterministic, always selecting the most probable next token and producing identical results\n     * each time. Try what value fits best for your use case and model.\n     * @default 0.1\n     * @alias temperature\n     * */\n    temp: number;\n    temperature: number;\n\n    /** The number of predictions to generate in parallel.\n     * By splitting the prompt every N tokens, prompt-batch-size reduces RAM usage during processing. However,\n     * this can increase the processing time as a trade-off. If the N value is set too low (e.g., 10), long prompts\n     * with 500+ tokens will be most affected, requiring numerous processing runs to complete the prompt processing.\n     * To ensure optimal performance, setting the prompt-batch-size to 2048 allows processing of all tokens in a single run.\n     * @default 8\n     * */\n    nBatch: number;\n\n    /** The penalty factor for repeated tokens.\n     * Repeat-penalty can help penalize tokens based on how frequently they occur in the text, including the input prompt.\n     * A token that has already appeared five times is penalized more heavily than a token that has appeared only one time.\n     * A value of 1 means that there is no penalty and values larger than 1 discourage repeated tokens.\n     * @default 1.18\n     * */\n    repeatPenalty: number;\n\n    /** The number of last tokens to penalize.\n     * The repeat-penalty-tokens N option controls the number of tokens in the history to consider for penalizing repetition.\n     * A larger value will look further back in the generated text to prevent repetitions, while a smaller value will only\n     * consider recent tokens.\n     * @default 10\n     * */\n    repeatLastN: number;\n\n    /** The percentage of context to erase if the context window is exceeded.\n     * Set it to a lower value to keep context for longer at the cost of performance.\n     * @default 0.75\n     * */\n    contextErase: number;\n}\n\n/**\n * From python api:\n * models will be stored in (homedir)/.cache/gpt4all/`\n */\ndeclare const DEFAULT_DIRECTORY: string;\n/**\n * From python api:\n * The default path for dynamic libraries to be stored.\n * You may separate paths by a semicolon to search in multiple areas.\n * This searches DEFAULT_DIRECTORY/libraries, cwd/libraries, and finally cwd.\n */\ndeclare const DEFAULT_LIBRARIES_DIRECTORY: string;\n\n/**\n * Default model configuration.\n */\ndeclare const DEFAULT_MODEL_CONFIG: ModelConfig;\n\n/**\n * Default prompt context.\n */\ndeclare const DEFAULT_PROMPT_CONTEXT: LLModelPromptContext;\n\n/**\n * Default model list url.\n */\ndeclare const DEFAULT_MODEL_LIST_URL: string;\n\n/**\n * Initiates the download of a model file.\n * By default this downloads without waiting. use the controller returned to alter this behavior.\n * @param {string} modelName - The model to be downloaded.\n * @param {DownloadModelOptions} options - to pass into the downloader. Default is { location: (cwd), verbose: false }.\n * @returns {DownloadController} object that allows controlling the download process.\n *\n * @throws {Error} If the model already exists in the specified location.\n * @throws {Error} If the model cannot be found at the specified url.\n *\n * @example\n * const download = downloadModel('ggml-gpt4all-j-v1.3-groovy.bin')\n * download.promise.then(() => console.log('Downloaded!'))\n */\ndeclare function downloadModel(\n    modelName: string,\n    options?: DownloadModelOptions\n): DownloadController;\n\n/**\n * Options for the model download process.\n */\ninterface DownloadModelOptions {\n    /**\n     * location to download the model.\n     * Default is process.cwd(), or the current working directory\n     */\n    modelPath?: string;\n\n    /**\n     * Debug mode -- check how long it took to download in seconds\n     * @default false\n     */\n    verbose?: boolean;\n\n    /**\n     * Remote download url. Defaults to `https://gpt4all.io/models/gguf/<modelName>`\n     * @default https://gpt4all.io/models/gguf/<modelName>\n     */\n    url?: string;\n    /**\n     * MD5 sum of the model file. If this is provided, the downloaded file will be checked against this sum.\n     * If the sums do not match, an error will be thrown and the file will be deleted.\n     */\n    md5sum?: string;\n}\n\ninterface ListModelsOptions {\n    url?: string;\n    file?: string;\n}\n\ndeclare function listModels(\n    options?: ListModelsOptions\n): Promise<ModelConfig[]>;\n\ninterface RetrieveModelOptions {\n    allowDownload?: boolean;\n    verbose?: boolean;\n    modelPath?: string;\n    modelConfigFile?: string;\n}\n\ndeclare function retrieveModel(\n    modelName: string,\n    options?: RetrieveModelOptions\n): Promise<ModelConfig>;\n\n/**\n * Model download controller.\n */\ninterface DownloadController {\n    /** Cancel the request to download if this is called. */\n    cancel: () => void;\n    /** A promise resolving to the downloaded models config once the download is done */\n    promise: Promise<ModelConfig>;\n}\n\nexport {\n    LLModel,\n    LLModelPromptContext,\n    ModelConfig,\n    InferenceModel,\n    InferenceResult,\n    EmbeddingModel,\n    EmbeddingResult,\n    ChatSession,\n    ChatMessage,\n    CompletionInput,\n    CompletionProvider,\n    CompletionOptions,\n    CompletionResult,\n    LoadModelOptions,\n    DownloadController,\n    RetrieveModelOptions,\n    DownloadModelOptions,\n    GpuDevice,\n    loadModel,\n    downloadModel,\n    retrieveModel,\n    listModels,\n    createCompletion,\n    createCompletionStream,\n    createCompletionGenerator,\n    createEmbedding,\n    DEFAULT_DIRECTORY,\n    DEFAULT_LIBRARIES_DIRECTORY,\n    DEFAULT_MODEL_CONFIG,\n    DEFAULT_PROMPT_CONTEXT,\n    DEFAULT_MODEL_LIST_URL,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/gpt4all.js",
    "content": "\"use strict\";\n\n/// This file implements the gpt4all.d.ts file endings.\n/// Written in commonjs to support both ESM and CJS projects.\nconst { existsSync } = require(\"node:fs\");\nconst path = require(\"node:path\");\nconst Stream = require(\"node:stream\");\nconst assert = require(\"node:assert\");\nconst { LLModel } = require(\"node-gyp-build\")(path.resolve(__dirname, \"..\"));\nconst {\n    retrieveModel,\n    downloadModel,\n    appendBinSuffixIfMissing,\n} = require(\"./util.js\");\nconst {\n    DEFAULT_DIRECTORY,\n    DEFAULT_LIBRARIES_DIRECTORY,\n    DEFAULT_PROMPT_CONTEXT,\n    DEFAULT_MODEL_CONFIG,\n    DEFAULT_MODEL_LIST_URL,\n} = require(\"./config.js\");\nconst { InferenceModel, EmbeddingModel } = require(\"./models.js\");\nconst { ChatSession } = require(\"./chat-session.js\");\n\n/**\n * Loads a machine learning model with the specified name. The defacto way to create a model.\n * By default this will download a model from the official GPT4ALL website, if a model is not present at given path.\n *\n * @param {string} modelName - The name of the model to load.\n * @param {import('./gpt4all').LoadModelOptions|undefined} [options] - (Optional) Additional options for loading the model.\n * @returns {Promise<InferenceModel | EmbeddingModel>} A promise that resolves to an instance of the loaded LLModel.\n */\nasync function loadModel(modelName, options = {}) {\n    const loadOptions = {\n        modelPath: DEFAULT_DIRECTORY,\n        librariesPath: DEFAULT_LIBRARIES_DIRECTORY,\n        type: \"inference\",\n        allowDownload: true,\n        verbose: false,\n        device: \"cpu\",\n        nCtx: 2048,\n        ngl: 100,\n        ...options,\n    };\n\n    const modelConfig = await retrieveModel(modelName, {\n        modelPath: loadOptions.modelPath,\n        modelConfigFile: loadOptions.modelConfigFile,\n        allowDownload: loadOptions.allowDownload,\n        verbose: loadOptions.verbose,\n    });\n\n    assert.ok(\n        typeof loadOptions.librariesPath === \"string\",\n        \"Libraries path should be a string\"\n    );\n    const existingPaths = loadOptions.librariesPath\n        .split(\";\")\n        .filter(existsSync)\n        .join(\";\");\n\n    const llmOptions = {\n        model_name: appendBinSuffixIfMissing(modelName),\n        model_path: loadOptions.modelPath,\n        library_path: existingPaths,\n        device: loadOptions.device,\n        nCtx: loadOptions.nCtx,\n        ngl: loadOptions.ngl,\n    };\n\n    if (loadOptions.verbose) {\n        console.debug(\"Creating LLModel:\", {\n            llmOptions,\n            modelConfig,\n        });\n    }\n    const llmodel = new LLModel(llmOptions);\n    if (loadOptions.type === \"embedding\") {\n        return new EmbeddingModel(llmodel, modelConfig);\n    } else if (loadOptions.type === \"inference\") {\n        return new InferenceModel(llmodel, modelConfig);\n    } else {\n        throw Error(\"Invalid model type: \" + loadOptions.type);\n    }\n}\n\nfunction createEmbedding(model, text, options={}) {\n    let {\n        dimensionality = undefined,\n        longTextMode = \"mean\",\n        atlas = false,\n    } = options;\n\n    if (dimensionality === undefined) {\n        dimensionality = -1;\n    } else {\n        if (dimensionality <= 0) {\n            throw new Error(\n                `Dimensionality must be undefined or a positive integer, got ${dimensionality}`\n            );\n        }\n        if (dimensionality < model.MIN_DIMENSIONALITY) {\n            console.warn(\n                `Dimensionality ${dimensionality} is less than the suggested minimum of ${model.MIN_DIMENSIONALITY}. Performance may be degraded.`\n            );\n        }\n    }\n\n    let doMean;\n    switch (longTextMode) {\n        case \"mean\":\n            doMean = true;\n            break;\n        case \"truncate\":\n            doMean = false;\n            break;\n        default:\n            throw new Error(\n                `Long text mode must be one of 'mean' or 'truncate', got ${longTextMode}`\n            );\n    }\n\n    return model.embed(text, options?.prefix, dimensionality, doMean, atlas);\n}\n\nconst defaultCompletionOptions = {\n    verbose: false,\n    ...DEFAULT_PROMPT_CONTEXT,\n};\n\nasync function createCompletion(\n    provider,\n    input,\n    options = defaultCompletionOptions\n) {\n    const completionOptions = {\n        ...defaultCompletionOptions,\n        ...options,\n    };\n\n    const result = await provider.generate(\n        input,\n        completionOptions,\n    );\n\n    return {\n        model: provider.modelName,\n        usage: {\n            prompt_tokens: result.tokensIngested,\n            total_tokens: result.tokensIngested + result.tokensGenerated,\n            completion_tokens: result.tokensGenerated,\n            n_past_tokens: result.nPast,\n        },\n        choices: [\n            {\n                message: {\n                    role: \"assistant\",\n                    content: result.text,\n                },\n                // TODO some completion APIs also provide logprobs and finish_reason, could look into adding those\n            },\n        ],\n    };\n}\n\nfunction createCompletionStream(\n    provider,\n    input,\n    options = defaultCompletionOptions\n) {\n    const completionStream = new Stream.PassThrough({\n        encoding: \"utf-8\",\n    });\n\n    const completionPromise = createCompletion(provider, input, {\n        ...options,\n        onResponseToken: (tokenId, token) => {\n            completionStream.push(token);\n            if (options.onResponseToken) {\n                return options.onResponseToken(tokenId, token);\n            }\n        },\n    }).then((result) => {\n        completionStream.push(null);\n        completionStream.emit(\"end\");\n        return result;\n    });\n\n    return {\n        tokens: completionStream,\n        result: completionPromise,\n    };\n}\n\nasync function* createCompletionGenerator(provider, input, options) {\n    const completion = createCompletionStream(provider, input, options);\n    for await (const chunk of completion.tokens) {\n        yield chunk;\n    }\n    return await completion.result;\n}\n\nmodule.exports = {\n    DEFAULT_LIBRARIES_DIRECTORY,\n    DEFAULT_DIRECTORY,\n    DEFAULT_PROMPT_CONTEXT,\n    DEFAULT_MODEL_CONFIG,\n    DEFAULT_MODEL_LIST_URL,\n    LLModel,\n    InferenceModel,\n    EmbeddingModel,\n    ChatSession,\n    createCompletion,\n    createCompletionStream,\n    createCompletionGenerator,\n    createEmbedding,\n    downloadModel,\n    retrieveModel,\n    loadModel,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/models.js",
    "content": "const { DEFAULT_PROMPT_CONTEXT } = require(\"./config\");\nconst { ChatSession } = require(\"./chat-session\");\nconst { prepareMessagesForIngest } = require(\"./util\");\n\nclass InferenceModel {\n    llm;\n    modelName;\n    config;\n    activeChatSession;\n\n    constructor(llmodel, config) {\n        this.llm = llmodel;\n        this.config = config;\n        this.modelName = this.llm.name();\n    }\n\n    async createChatSession(options) {\n        const chatSession = new ChatSession(this, options);\n        await chatSession.initialize();\n        this.activeChatSession = chatSession;\n        return this.activeChatSession;\n    }\n\n    async generate(input, options = DEFAULT_PROMPT_CONTEXT) {\n        const { verbose, ...otherOptions } = options;\n        const promptContext = {\n            promptTemplate: this.config.promptTemplate,\n            temp:\n                otherOptions.temp ??\n                otherOptions.temperature ??\n                DEFAULT_PROMPT_CONTEXT.temp,\n            ...otherOptions,\n        };\n        \n        if (promptContext.nPast < 0) {\n            throw new Error(\"nPast must be a non-negative integer.\");\n        }\n\n        if (verbose) {\n            console.debug(\"Generating completion\", {\n                input,\n                promptContext,\n            });\n        }\n\n        let prompt = input;\n        let nPast = promptContext.nPast;\n        let tokensIngested = 0;\n\n        if (Array.isArray(input)) {\n            // assuming input is a messages array\n            // -> tailing user message will be used as the final prompt. its required.\n            // -> leading system message will be ingested as systemPrompt, further system messages will be ignored\n            // -> all other messages will be ingested with fakeReply\n            // -> model/context will only be kept for this completion; \"stateless\"\n            nPast = 0;\n            const messages = [...input];\n            const lastMessage = input[input.length - 1];\n            if (lastMessage.role !== \"user\") {\n                // this is most likely a user error\n                throw new Error(\"The final message must be of role 'user'.\");\n            }\n            if (input[0].role === \"system\") {\n                // needs to be a pre-templated prompt ala '<|im_start|>system\\nYou are an advanced mathematician.\\n<|im_end|>\\n'\n                const systemPrompt = input[0].content;\n                const systemRes = await this.llm.infer(systemPrompt, {\n                    promptTemplate: \"%1\",\n                    nPredict: 0,\n                    special: true,\n                });\n                nPast = systemRes.nPast;\n                tokensIngested += systemRes.tokensIngested;\n                messages.shift();\n            }\n\n            prompt = lastMessage.content;\n            const messagesToIngest = messages.slice(0, input.length - 1);\n            const turns = prepareMessagesForIngest(messagesToIngest);\n\n            for (const turn of turns) {\n                const turnRes = await this.llm.infer(turn.user, {\n                    ...promptContext,\n                    nPast,\n                    fakeReply: turn.assistant,\n                });\n                tokensIngested += turnRes.tokensIngested;\n                nPast = turnRes.nPast;\n            }\n        }\n\n        let tokensGenerated = 0;\n\n        const result = await this.llm.infer(prompt, {\n            ...promptContext,\n            nPast,\n            onPromptToken: (tokenId) => {\n                let continueIngestion = true;\n                tokensIngested++;\n                if (options.onPromptToken) {\n                    // catch errors because if they go through cpp they will loose stacktraces\n                    try {\n                        // don't cancel ingestion unless user explicitly returns false\n                        continueIngestion =\n                            options.onPromptToken(tokenId) !== false;\n                    } catch (e) {\n                        console.error(\"Error in onPromptToken callback\", e);\n                        continueIngestion = false;\n                    }\n                }\n                return continueIngestion;\n            },\n            onResponseToken: (tokenId, token) => {\n                let continueGeneration = true;\n                tokensGenerated++;\n                if (options.onResponseToken) {\n                    try {\n                        // don't cancel the generation unless user explicitly returns false\n                        continueGeneration =\n                            options.onResponseToken(tokenId, token) !== false;\n                    } catch (err) {\n                        console.error(\"Error in onResponseToken callback\", err);\n                        continueGeneration = false;\n                    }\n                }\n                return continueGeneration;\n            },\n        });\n\n        result.tokensGenerated = tokensGenerated;\n        result.tokensIngested = tokensIngested;\n\n        if (verbose) {\n            console.debug(\"Finished completion:\\n\", result);\n        }\n\n        return result;\n    }\n\n    dispose() {\n        this.llm.dispose();\n    }\n}\n\nclass EmbeddingModel {\n    llm;\n    config;\n    MIN_DIMENSIONALITY = 64;\n    constructor(llmodel, config) {\n        this.llm = llmodel;\n        this.config = config;\n    }\n\n    embed(text, prefix, dimensionality, do_mean, atlas) {\n        return this.llm.embed(text, prefix, dimensionality, do_mean, atlas);\n    }\n\n    dispose() {\n        this.llm.dispose();\n    }\n}\n\nmodule.exports = {\n    InferenceModel,\n    EmbeddingModel,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/src/util.js",
    "content": "const { createWriteStream, existsSync, statSync, mkdirSync } = require(\"node:fs\");\nconst fsp = require(\"node:fs/promises\");\nconst { performance } = require(\"node:perf_hooks\");\nconst path = require(\"node:path\");\nconst md5File = require(\"md5-file\");\nconst {\n    DEFAULT_DIRECTORY,\n    DEFAULT_MODEL_CONFIG,\n    DEFAULT_MODEL_LIST_URL,\n} = require(\"./config.js\");\n\nasync function listModels(\n    options = {\n        url: DEFAULT_MODEL_LIST_URL,\n    }\n) {\n    if (!options || (!options.url && !options.file)) {\n        throw new Error(\n            `No model list source specified. Please specify either a url or a file.`\n        );\n    }\n\n    if (options.file) {\n        if (!existsSync(options.file)) {\n            throw new Error(`Model list file ${options.file} does not exist.`);\n        }\n\n        const fileContents = await fsp.readFile(options.file, \"utf-8\");\n        const modelList = JSON.parse(fileContents);\n        return modelList;\n    } else if (options.url) {\n        const res = await fetch(options.url);\n\n        if (!res.ok) {\n            throw Error(\n                `Failed to retrieve model list from ${url} - ${res.status} ${res.statusText}`\n            );\n        }\n        const modelList = await res.json();\n        return modelList;\n    }\n}\n\nfunction appendBinSuffixIfMissing(name) {\n    const ext = path.extname(name);\n    if (![\".bin\", \".gguf\"].includes(ext)) {\n        return name + \".gguf\";\n    }\n    return name;\n}\n\nfunction prepareMessagesForIngest(messages) {\n    const systemMessages = messages.filter(\n        (message) => message.role === \"system\"\n    );\n    if (systemMessages.length > 0) {\n        console.warn(\n            \"System messages are currently not supported and will be ignored. Use the systemPrompt option instead.\"\n        );\n    }\n\n    const userAssistantMessages = messages.filter(\n        (message) => message.role !== \"system\"\n    );\n\n    // make sure the first message is a user message\n    // if its not, the turns will be out of order\n    if (userAssistantMessages[0].role !== \"user\") {\n        userAssistantMessages.unshift({\n            role: \"user\",\n            content: \"\",\n        });\n    }\n\n    // create turns of user input + assistant reply\n    const turns = [];\n    let userMessage = null;\n    let assistantMessage = null;\n\n    for (const message of userAssistantMessages) {\n        // consecutive messages of the same role are concatenated into one message\n        if (message.role === \"user\") {\n            if (!userMessage) {\n                userMessage = message.content;\n            } else {\n                userMessage += \"\\n\" + message.content;\n            }\n        } else if (message.role === \"assistant\") {\n            if (!assistantMessage) {\n                assistantMessage = message.content;\n            } else {\n                assistantMessage += \"\\n\" + message.content;\n            }\n        }\n\n        if (userMessage && assistantMessage) {\n            turns.push({\n                user: userMessage,\n                assistant: assistantMessage,\n            });\n            userMessage = null;\n            assistantMessage = null;\n        }\n    }\n\n    return turns;\n}\n\n// readChunks() reads from the provided reader and yields the results into an async iterable\n// https://css-tricks.com/web-streams-everywhere-and-fetch-for-node-js/\nfunction readChunks(reader) {\n    return {\n        async *[Symbol.asyncIterator]() {\n            let readResult = await reader.read();\n            while (!readResult.done) {\n                yield readResult.value;\n                readResult = await reader.read();\n            }\n        },\n    };\n}\n\nfunction downloadModel(modelName, options = {}) {\n    const downloadOptions = {\n        modelPath: DEFAULT_DIRECTORY,\n        verbose: false,\n        ...options,\n    };\n\n    const modelFileName = appendBinSuffixIfMissing(modelName);\n    const partialModelPath = path.join(\n        downloadOptions.modelPath,\n        modelName + \".part\"\n    );\n    const finalModelPath = path.join(downloadOptions.modelPath, modelFileName);\n    const modelUrl =\n        downloadOptions.url ??\n        `https://gpt4all.io/models/gguf/${modelFileName}`;\n\n    mkdirSync(downloadOptions.modelPath, { recursive: true });\n\n    if (existsSync(finalModelPath)) {\n        throw Error(`Model already exists at ${finalModelPath}`);\n    }\n\n    if (downloadOptions.verbose) {\n        console.debug(`Downloading ${modelName} from ${modelUrl}`);\n    }\n\n    const headers = {\n        \"Accept-Ranges\": \"arraybuffer\",\n        \"Response-Type\": \"arraybuffer\",\n    };\n\n    const writeStreamOpts = {};\n\n    if (existsSync(partialModelPath)) {\n        if (downloadOptions.verbose) {\n            console.debug(\"Partial model exists, resuming download...\");\n        }\n        const startRange = statSync(partialModelPath).size;\n        headers[\"Range\"] = `bytes=${startRange}-`;\n        writeStreamOpts.flags = \"a\";\n    }\n\n    const abortController = new AbortController();\n    const signal = abortController.signal;\n\n    const finalizeDownload = async () => {\n        if (downloadOptions.md5sum) {\n            const fileHash = await md5File(partialModelPath);\n            if (fileHash !== downloadOptions.md5sum) {\n                await fsp.unlink(partialModelPath);\n                const message = `Model \"${modelName}\" failed verification: Hashes mismatch. Expected ${downloadOptions.md5sum}, got ${fileHash}`;\n                throw Error(message);\n            }\n            if (downloadOptions.verbose) {\n                console.debug(`MD5 hash verified: ${fileHash}`);\n            }\n        }\n\n        await fsp.rename(partialModelPath, finalModelPath);\n    };\n\n    // a promise that executes and writes to a stream. Resolves to the path the model was downloaded to when done writing.\n    const downloadPromise = new Promise((resolve, reject) => {\n        let timestampStart;\n\n        if (downloadOptions.verbose) {\n            console.debug(`Downloading @ ${partialModelPath} ...`);\n            timestampStart = performance.now();\n        }\n\n        const writeStream = createWriteStream(\n            partialModelPath,\n            writeStreamOpts\n        );\n\n        writeStream.on(\"error\", (e) => {\n            writeStream.close();\n            reject(e);\n        });\n\n        writeStream.on(\"finish\", () => {\n            if (downloadOptions.verbose) {\n                const elapsed = performance.now() - timestampStart;\n                console.log(`Finished. Download took ${elapsed.toFixed(2)} ms`);\n            }\n\n            finalizeDownload()\n                .then(() => {\n                    resolve(finalModelPath);\n                })\n                .catch(reject);\n        });\n\n        fetch(modelUrl, {\n            signal,\n            headers,\n        })\n            .then((res) => {\n                if (!res.ok) {\n                    const message = `Failed to download model from ${modelUrl} - ${res.status} ${res.statusText}`;\n                    reject(Error(message));\n                }\n                return res.body.getReader();\n            })\n            .then(async (reader) => {\n                for await (const chunk of readChunks(reader)) {\n                    writeStream.write(chunk);\n                }\n                writeStream.end();\n            })\n            .catch(reject);\n    });\n\n    return {\n        cancel: () => abortController.abort(),\n        promise: downloadPromise,\n    };\n}\n\nasync function retrieveModel(modelName, options = {}) {\n    const retrieveOptions = {\n        modelPath: DEFAULT_DIRECTORY,\n        allowDownload: true,\n        verbose: false,\n        ...options,\n    };\n    mkdirSync(retrieveOptions.modelPath, { recursive: true });\n\n    const modelFileName = appendBinSuffixIfMissing(modelName);\n    const fullModelPath = path.join(retrieveOptions.modelPath, modelFileName);\n    const modelExists = existsSync(fullModelPath);\n\n    let config = { ...DEFAULT_MODEL_CONFIG };\n\n    const availableModels = await listModels({\n        file: retrieveOptions.modelConfigFile,\n        url:\n            retrieveOptions.allowDownload &&\n            \"https://gpt4all.io/models/models3.json\",\n    });\n\n    const loadedModelConfig = availableModels.find(\n        (model) => model.filename === modelFileName\n    );\n\n    if (loadedModelConfig) {\n        config = {\n            ...config,\n            ...loadedModelConfig,\n        };\n    } else {\n        // if there's no local modelConfigFile specified, and allowDownload is false, the default model config will be used.\n        // warning the user here because the model may not work as expected.\n        console.warn(\n            `Failed to load model config for ${modelName}. Using defaults.`\n        );\n    }\n\n    config.systemPrompt = config.systemPrompt.trim();\n\n    if (modelExists) {\n        config.path = fullModelPath;\n\n        if (retrieveOptions.verbose) {\n            console.debug(`Found ${modelName} at ${fullModelPath}`);\n        }\n    } else if (retrieveOptions.allowDownload) {\n        const downloadController = downloadModel(modelName, {\n            modelPath: retrieveOptions.modelPath,\n            verbose: retrieveOptions.verbose,\n            filesize: config.filesize,\n            url: config.url,\n            md5sum: config.md5sum,\n        });\n\n        const downloadPath = await downloadController.promise;\n        config.path = downloadPath;\n\n        if (retrieveOptions.verbose) {\n            console.debug(`Model downloaded to ${downloadPath}`);\n        }\n    } else {\n        throw Error(\"Failed to retrieve model.\");\n    }\n    return config;\n}\n\nmodule.exports = {\n    appendBinSuffixIfMissing,\n    prepareMessagesForIngest,\n    downloadModel,\n    retrieveModel,\n    listModels,\n};\n"
  },
  {
    "path": "gpt4all-bindings/typescript/test/gpt4all.test.js",
    "content": "const path = require(\"node:path\");\nconst os = require(\"node:os\");\nconst fsp = require(\"node:fs/promises\");\nconst { existsSync } = require('node:fs');\nconst { LLModel } = require(\"node-gyp-build\")(path.resolve(__dirname, \"..\"));\nconst {\n    listModels,\n    downloadModel,\n    appendBinSuffixIfMissing,\n} = require(\"../src/util.js\");\nconst {\n    DEFAULT_DIRECTORY,\n    DEFAULT_LIBRARIES_DIRECTORY,\n    DEFAULT_MODEL_LIST_URL,\n} = require(\"../src/config.js\");\nconst {\n    loadModel,\n    createPrompt,\n    createCompletion,\n} = require(\"../src/gpt4all.js\");\n\ndescribe(\"config\", () => {\n    test(\"default paths constants are available and correct\", () => {\n        expect(DEFAULT_DIRECTORY).toBe(\n            path.resolve(os.homedir(), \".cache/gpt4all\")\n        );\n        const paths = [\n            path.join(DEFAULT_DIRECTORY, \"libraries\"),\n            path.resolve(\"./libraries\"),\n            path.resolve(\n                __dirname,\n                \"..\",\n                `runtimes/${process.platform}-${process.arch}/native`\n            ),\n            path.resolve(\n                __dirname,\n                \"..\",\n                `runtimes/${process.platform}/native`,\n            ),\n            process.cwd(),\n        ];\n        expect(typeof DEFAULT_LIBRARIES_DIRECTORY).toBe(\"string\");\n        expect(DEFAULT_LIBRARIES_DIRECTORY).toBe(paths.join(\";\"));\n    });\n});\n\ndescribe(\"listModels\", () => {\n    const fakeModels = require(\"./models.json\");\n    const fakeModel = fakeModels[0];\n    const mockResponse = JSON.stringify([fakeModel]);\n\n    let mockFetch, originalFetch;\n\n    beforeAll(() => {\n        // Mock the fetch function for all tests\n        mockFetch = jest.fn().mockResolvedValue({\n            ok: true,\n            json: () => JSON.parse(mockResponse),\n        });\n        originalFetch = global.fetch;\n        global.fetch = mockFetch;\n    });\n\n    afterEach(() => {\n        // Reset the fetch counter after each test\n        mockFetch.mockClear();\n    });\n    afterAll(() => {\n        // Restore fetch\n        global.fetch = originalFetch;\n    });\n\n    it(\"should load the model list from remote when called without args\", async () => {\n        const models = await listModels();\n        expect(fetch).toHaveBeenCalledTimes(1);\n        expect(fetch).toHaveBeenCalledWith(DEFAULT_MODEL_LIST_URL);\n        expect(models[0]).toEqual(fakeModel);\n    });\n\n    it(\"should load the model list from a local file, if specified\", async () => {\n        const models = await listModels({\n            file: path.resolve(__dirname, \"models.json\"),\n        });\n        expect(fetch).toHaveBeenCalledTimes(0);\n        expect(models[0]).toEqual(fakeModel);\n    });\n\n    it(\"should throw an error if neither url nor file is specified\", async () => {\n        await expect(listModels(null)).rejects.toThrow(\n            \"No model list source specified. Please specify either a url or a file.\"\n        );\n    });\n});\n\ndescribe(\"appendBinSuffixIfMissing\", () => {\n    it(\"should make sure the suffix is there\", () => {\n        expect(appendBinSuffixIfMissing(\"filename\")).toBe(\"filename.gguf\");\n        expect(appendBinSuffixIfMissing(\"filename.bin\")).toBe(\"filename.bin\");\n    });\n});\n\ndescribe(\"downloadModel\", () => {\n    let mockAbortController, mockFetch;\n    const fakeModelName = \"fake-model\";\n\n    const createMockFetch = () => {\n        const mockData = new Uint8Array([1, 2, 3, 4]);\n        const mockResponse = new ReadableStream({\n            start(controller) {\n                controller.enqueue(mockData);\n                controller.close();\n            },\n        });\n        const mockFetchImplementation = jest.fn(() =>\n            Promise.resolve({\n                ok: true,\n                body: mockResponse,\n            })\n        );\n        return mockFetchImplementation;\n    };\n\n    beforeEach(async () => {\n        // Mocking the AbortController constructor\n        mockAbortController = jest.fn();\n        global.AbortController = mockAbortController;\n        mockAbortController.mockReturnValue({\n            signal: \"signal\",\n            abort: jest.fn(),\n        });\n        mockFetch = createMockFetch();\n        jest.spyOn(global, \"fetch\").mockImplementation(mockFetch);\n\n    });\n\n    afterEach(async () => {\n        // Clean up mocks\n        mockAbortController.mockReset();\n        mockFetch.mockClear();\n        global.fetch.mockRestore();\n\n        const rootDefaultPath = path.resolve(DEFAULT_DIRECTORY),\n              partialPath = path.resolve(rootDefaultPath, fakeModelName+'.part'),\n              fullPath = path.resolve(rootDefaultPath, fakeModelName+'.bin')\n\n        //if tests fail, remove the created files\n        // acts as cleanup if tests fail\n        //\n        if(existsSync(fullPath)) {\n            await fsp.rm(fullPath)\n        }\n        if(existsSync(partialPath)) {\n            await fsp.rm(partialPath)\n        }\n\n    });\n\n    test(\"should successfully download a model file\", async () => {\n        const downloadController = downloadModel(fakeModelName);\n        const modelFilePath = await downloadController.promise;\n        expect(modelFilePath).toBe(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.gguf`));\n\n        expect(global.fetch).toHaveBeenCalledTimes(1);\n        expect(global.fetch).toHaveBeenCalledWith(\n            \"https://gpt4all.io/models/gguf/fake-model.gguf\",\n            {\n                signal: \"signal\",\n                headers: {\n                    \"Accept-Ranges\": \"arraybuffer\",\n                    \"Response-Type\": \"arraybuffer\",\n                },\n            }\n        );\n\n        // final model file should be present\n        await expect(fsp.access(modelFilePath)).resolves.not.toThrow();\n\n        // remove the testing model file\n        await fsp.unlink(modelFilePath);\n    });\n\n    test(\"should error and cleanup if md5sum is not matching\", async () => {\n        const downloadController = downloadModel(fakeModelName, {\n            md5sum: \"wrong-md5sum\",\n        });\n        // the promise should reject with a mismatch\n        await expect(downloadController.promise).rejects.toThrow(\n            `Model \"fake-model\" failed verification: Hashes mismatch. Expected wrong-md5sum, got 08d6c05a21512a79a1dfeb9d2a8f262f`\n        );\n        // fetch should have been called\n        expect(global.fetch).toHaveBeenCalledTimes(1);\n        // the file should be missing\n        await expect(\n            fsp.access(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.gguf`))\n        ).rejects.toThrow();\n        // partial file should also be missing\n        await expect(\n            fsp.access(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.part`))\n        ).rejects.toThrow();\n    });\n\n    // TODO\n    // test(\"should be able to cancel and resume a download\", async () => {\n    // });\n});\n"
  },
  {
    "path": "gpt4all-bindings/typescript/test/models.json",
    "content": "[\n  {\n    \"order\": \"a\",\n    \"md5sum\": \"08d6c05a21512a79a1dfeb9d2a8f262f\",\n    \"name\": \"Not a real model\",\n    \"filename\": \"fake-model.gguf\",\n    \"filesize\": \"4\",\n    \"systemPrompt\": \" \"\n  }\n]\n"
  },
  {
    "path": "gpt4all-chat/.flake8",
    "content": "# vim: set syntax=dosini:\n[flake8]\nexclude = .*,__pycache__\nmax-line-length = 120\nextend-ignore = B001,C408,D,DAR,E221,E303,E722,E741,E800,N801,N806,P101,S101,S324,S404,S406,S410,S603,WPS100,WPS110,WPS111,WPS113,WPS114,WPS115,WPS120,WPS2,WPS300,WPS301,WPS304,WPS305,WPS306,WPS309,WPS316,WPS317,WPS318,WPS319,WPS322,WPS323,WPS326,WPS329,WPS330,WPS332,WPS336,WPS337,WPS347,WPS360,WPS361,WPS407,WPS414,WPS420,WPS421,WPS429,WPS430,WPS431,WPS432,WPS433,WPS437,WPS440,WPS440,WPS441,WPS442,WPS457,WPS458,WPS460,WPS462,WPS463,WPS473,WPS501,WPS504,WPS505,WPS508,WPS509,WPS510,WPS515,WPS516,WPS519,WPS520,WPS529,WPS531,WPS602,WPS604,WPS605,WPS608,WPS609,WPS613,WPS615\n"
  },
  {
    "path": "gpt4all-chat/CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).\n\n## [3.10.0] - 2025-02-24\n\n### Added\n- Whitelist Granite (non-MoE) model architecture (by [@ThiloteE](https://github.com/ThiloteE) in [#3487](https://github.com/nomic-ai/gpt4all/pull/3487))\n- Add support for CUDA compute 5.0 GPUs such as the GTX 750 ([#3499](https://github.com/nomic-ai/gpt4all/pull/3499))\n- Add a Remote Providers tab to the Add Model page ([#3506](https://github.com/nomic-ai/gpt4all/pull/3506))\n\n### Changed\n- Substitute prettier default templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B (by [@ThiloteE](https://github.com/ThiloteE) in [#3471](https://github.com/nomic-ai/gpt4all/pull/3471))\n- Build with LLVM Clang 19 on macOS and Ubuntu ([#3500](https://github.com/nomic-ai/gpt4all/pull/3500))\n\n### Fixed\n- Fix several potential crashes ([#3465](https://github.com/nomic-ai/gpt4all/pull/3465))\n- Fix visual spacing issues with deepseek models ([#3470](https://github.com/nomic-ai/gpt4all/pull/3470))\n- Add missing strings to Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#3496](https://github.com/nomic-ai/gpt4all/pull/3496))\n- Update Simplified Chinese translation (by [@Junior2Ran](https://github.com/Junior2Ran) in [#3467](https://github.com/nomic-ai/pull/3467))\n\n## [3.9.0] - 2025-02-04\n\n### Added\n- Whitelist OLMoE and Granite MoE model architectures (no Vulkan) (by [@ThiloteE](https://github.com/ThiloteE) in [#3449](https://github.com/nomic-ai/gpt4all/pull/3449))\n\n### Fixed\n- Fix \"index N is not a prompt\" when using LocalDocs with reasoning ([#3451](https://github.com/nomic-ai/gpt4all/pull/3451))\n- Work around rendering artifacts on Snapdragon SoCs with Windows ([#3450](https://github.com/nomic-ai/gpt4all/pull/3450))\n- Prevent DeepSeek-R1 reasoning from appearing in chat names and follow-up questions ([#3458](https://github.com/nomic-ai/gpt4all/pull/3458))\n- Fix LocalDocs crash on Windows ARM when reading PDFs ([#3460](https://github.com/nomic-ai/gpt4all/pull/3460))\n- Fix UI freeze when chat template is `{#` ([#3446](https://github.com/nomic-ai/gpt4all/pull/3446))\n\n## [3.8.0] - 2025-01-30\n\n### Added\n- Support DeepSeek-R1 Qwen models ([#3431](https://github.com/nomic-ai/gpt4all/pull/3431))\n- Support for think tags in the GUI ([#3440](https://github.com/nomic-ai/gpt4all/pull/3440))\n- Support specifying SHA256 hash in models3.json instead of MD5 ([#3437](https://github.com/nomic-ai/gpt4all/pull/3437))\n\n### Changed\n- Use minja instead of Jinja2Cpp for significantly improved template compatibility ([#3433](https://github.com/nomic-ai/gpt4all/pull/3433))\n\n### Fixed\n- Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410))\n- Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411))\n- Fix `codesign --verify` failure on macOS ([#3413](https://github.com/nomic-ai/gpt4all/pull/3413))\n- Code Interpreter: Fix console.log not accepting a single string after v3.7.0 ([#3426](https://github.com/nomic-ai/gpt4all/pull/3426))\n- Fix Phi 3.1 Mini 128K Instruct template (by [@ThiloteE](https://github.com/ThiloteE) in [#3412](https://github.com/nomic-ai/gpt4all/pull/3412))\n- Don't block the gui thread for reasoning ([#3435](https://github.com/nomic-ai/gpt4all/pull/3435))\n- Fix corruption of unicode in output of reasoning models ([#3443](https://github.com/nomic-ai/gpt4all/pull/3443))\n\n## [3.7.0] - 2025-01-21\n\n### Added\n- Add support for the Windows ARM64 target platform (CPU-only) ([#3385](https://github.com/nomic-ai/gpt4all/pull/3385))\n\n### Changed\n- Update from Qt 6.5.1 to 6.8.1 ([#3386](https://github.com/nomic-ai/gpt4all/pull/3386))\n\n### Fixed\n- Fix the timeout error in code interpreter ([#3369](https://github.com/nomic-ai/gpt4all/pull/3369))\n- Fix code interpreter console.log not accepting multiple arguments ([#3371](https://github.com/nomic-ai/gpt4all/pull/3371))\n- Remove 'X is defined' checks from templates for better compatibility ([#3372](https://github.com/nomic-ai/gpt4all/pull/3372))\n- Jinja2Cpp: Add 'if' requirement for 'else' parsing to fix crash ([#3373](https://github.com/nomic-ai/gpt4all/pull/3373))\n- Save chats on quit, even if the window isn't closed first ([#3387](https://github.com/nomic-ai/gpt4all/pull/3387))\n- Add chat template replacements for five new models and fix EM German Mistral ([#3393](https://github.com/nomic-ai/gpt4all/pull/3393))\n- Fix crash when entering `{{ a[\"foo\"(` as chat template ([#3394](https://github.com/nomic-ai/gpt4all/pull/3394))\n- Sign the maintenance tool on macOS to prevent crash on Sequoia ([#3391](https://github.com/nomic-ai/gpt4all/pull/3391))\n- Jinja2Cpp: Fix operator precedence in 'not X is defined' ([#3402](https://github.com/nomic-ai/gpt4all/pull/3402))\n\n## [3.6.1] - 2024-12-20\n\n### Fixed\n- Fix the stop generation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))\n- Fix the copy entire conversation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))\n\n## [3.6.0] - 2024-12-19\n\n### Added\n- Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs ([#3327](https://github.com/nomic-ai/gpt4all/pull/3327))\n- Built-in javascript code interpreter tool plus model ([#3173](https://github.com/nomic-ai/gpt4all/pull/3173))\n\n### Fixed\n- Fix remote model template to allow for XML in messages ([#3318](https://github.com/nomic-ai/gpt4all/pull/3318))\n- Fix Jinja2Cpp bug that broke system message detection in chat templates ([#3325](https://github.com/nomic-ai/gpt4all/pull/3325))\n- Fix LocalDocs sources displaying in unconsolidated form after v3.5.0 ([#3328](https://github.com/nomic-ai/gpt4all/pull/3328))\n\n## [3.5.3] - 2024-12-16\n\n### Fixed\n- Fix LocalDocs not using information from sources in v3.5.2 ([#3302](https://github.com/nomic-ai/gpt4all/pull/3302))\n\n## [3.5.2] - 2024-12-13\n\n### Added\n- Create separate download pages for built-in and HuggingFace models ([#3269](https://github.com/nomic-ai/gpt4all/pull/3269))\n\n### Fixed\n- Fix API server ignoring assistant messages in history after v3.5.0 ([#3256](https://github.com/nomic-ai/gpt4all/pull/3256))\n- Fix API server replying with incorrect token counts and stop reason after v3.5.0 ([#3256](https://github.com/nomic-ai/gpt4all/pull/3256))\n- Fix API server remembering previous, unrelated conversations after v3.5.0 ([#3256](https://github.com/nomic-ai/gpt4all/pull/3256))\n- Fix mishandling of default chat template and system message of cloned models in v3.5.0 ([#3262](https://github.com/nomic-ai/gpt4all/pull/3262))\n- Fix untranslated text on the startup dialog ([#3293](https://github.com/nomic-ai/gpt4all/pull/3293))\n\n## [3.5.1] - 2024-12-10\n\n### Fixed\n- Fix an incorrect value for currentResponse ([#3245](https://github.com/nomic-ai/gpt4all/pull/3245))\n- Fix the default model button so it works again after 3.5.0 ([#3246](https://github.com/nomic-ai/gpt4all/pull/3246))\n- Fix chat templates for Nous Hermes 2 Mistral, Mistral OpenOrca, Qwen 2, and remote models ([#3250](https://github.com/nomic-ai/gpt4all/pull/3250))\n- Fix chat templates for Llama 3.2 models ([#3251](https://github.com/nomic-ai/gpt4all/pull/3251))\n\n## [3.5.0] - 2024-12-09\n\n### Changed\n- Update Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#3236](https://github.com/nomic-ai/gpt4all/pull/3236))\n- Update Romanian translation (by [@SINAPSA-IC](https://github.com/SINAPSA-IC) in [#3232](https://github.com/nomic-ai/gpt4all/pull/3232))\n\n### Fixed\n- Fix a few more problems with the Jinja changes ([#3239](https://github.com/nomic-ai/gpt4all/pull/3239))\n\n## [3.5.0-rc2] - 2024-12-06\n\n### Changed\n- Fade messages out with an animation when they are removed from the chat view ([#3227](https://github.com/nomic-ai/gpt4all/pull/3227))\n- Tweak wording of edit/redo confirmation dialogs ([#3228](https://github.com/nomic-ai/gpt4all/pull/3228))\n- Make edit/redo buttons disabled instead of invisible when they are temporarily unavailable ([#3228](https://github.com/nomic-ai/gpt4all/pull/3228))\n\n## [3.5.0-rc1] - 2024-12-04\n\n### Added\n- Add ability to attach text, markdown, and rst files to chat ([#3135](https://github.com/nomic-ai/gpt4all/pull/3135))\n- Add feature to minimize to system tray (by [@bgallois](https://github.com/bgallois) in [#3109](https://github.com/nomic-ai/gpt4all/pull/3109))\n- Basic cache for faster prefill when the input shares a prefix with previous context ([#3073](https://github.com/nomic-ai/gpt4all/pull/3073))\n- Add ability to edit prompts and regenerate any response ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n\n### Changed\n- Implement Qt 6.8 compatibility ([#3121](https://github.com/nomic-ai/gpt4all/pull/3121))\n- Use Jinja for chat templates instead of per-message QString.arg-style templates ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n- API server: Use system message(s) from client instead of settings ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n- API server: Accept messages in any order supported by the model instead of requiring user/assistant pairs ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n- Remote models: Pass system message with \"system\" role instead of joining with user message ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n\n### Removed\n- Remove option to save binary model state to disk ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))\n\n### Fixed\n- Fix bug in GUI when localdocs encounters binary data ([#3137](https://github.com/nomic-ai/gpt4all/pull/3137))\n- Fix LocalDocs bugs that prevented some docx files from fully chunking ([#3140](https://github.com/nomic-ai/gpt4all/pull/3140))\n- Fix missing softmax that was causing crashes and effectively infinite temperature since 3.4.0 ([#3202](https://github.com/nomic-ai/gpt4all/pull/3202))\n\n## [3.4.2] - 2024-10-16\n\n### Fixed\n- Limit bm25 retrieval to only specified collections ([#3083](https://github.com/nomic-ai/gpt4all/pull/3083))\n- Fix bug removing documents because of a wrong case sensitive file suffix check ([#3083](https://github.com/nomic-ai/gpt4all/pull/3083))\n- Fix bug with hybrid localdocs search where database would get out of sync ([#3083](https://github.com/nomic-ai/gpt4all/pull/3083))\n- Fix GUI bug where the localdocs embedding device appears blank ([#3083](https://github.com/nomic-ai/gpt4all/pull/3083))\n- Prevent LocalDocs from not making progress in certain cases ([#3094](https://github.com/nomic-ai/gpt4all/pull/3094))\n\n## [3.4.1] - 2024-10-11\n\n### Fixed\n- Improve the Italian translation ([#3048](https://github.com/nomic-ai/gpt4all/pull/3048))\n- Fix models.json cache location ([#3052](https://github.com/nomic-ai/gpt4all/pull/3052))\n- Fix LocalDocs regressions caused by docx change ([#3079](https://github.com/nomic-ai/gpt4all/pull/3079))\n- Fix Go code being highlighted as Java ([#3080](https://github.com/nomic-ai/gpt4all/pull/3080))\n\n## [3.4.0] - 2024-10-08\n\n### Added\n- Add bm25 hybrid search to localdocs ([#2969](https://github.com/nomic-ai/gpt4all/pull/2969))\n- LocalDocs support for .docx files ([#2986](https://github.com/nomic-ai/gpt4all/pull/2986))\n- Add support for attaching Excel spreadsheet to chat ([#3007](https://github.com/nomic-ai/gpt4all/pull/3007), [#3028](https://github.com/nomic-ai/gpt4all/pull/3028))\n\n### Changed\n- Rebase llama.cpp on latest upstream as of September 26th ([#2998](https://github.com/nomic-ai/gpt4all/pull/2998))\n- Change the error message when a message is too long ([#3004](https://github.com/nomic-ai/gpt4all/pull/3004))\n- Simplify chatmodel to get rid of unnecessary field and bump chat version ([#3016](https://github.com/nomic-ai/gpt4all/pull/3016))\n- Allow ChatLLM to have direct access to ChatModel for restoring state from text ([#3018](https://github.com/nomic-ai/gpt4all/pull/3018))\n- Improvements to XLSX conversion and UI fix ([#3022](https://github.com/nomic-ai/gpt4all/pull/3022))\n\n### Fixed\n- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))\n- Fix the local server rejecting min\\_p/top\\_p less than 1 ([#2996](https://github.com/nomic-ai/gpt4all/pull/2996))\n- Fix \"regenerate\" always forgetting the most recent message ([#3011](https://github.com/nomic-ai/gpt4all/pull/3011))\n- Fix loaded chats forgetting context when there is a system prompt ([#3015](https://github.com/nomic-ai/gpt4all/pull/3015))\n- Make it possible to downgrade and keep some chats, and avoid crash for some model types ([#3030](https://github.com/nomic-ai/gpt4all/pull/3030))\n- Fix scroll positition being reset in model view, and attempt a better fix for the clone issue ([#3042](https://github.com/nomic-ai/gpt4all/pull/3042))\n\n## [3.3.1] - 2024-09-27 ([v3.3.y](https://github.com/nomic-ai/gpt4all/tree/v3.3.y))\n\n### Fixed\n- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))\n- Fix the local server rejecting min\\_p/top\\_p less than 1 ([#2996](https://github.com/nomic-ai/gpt4all/pull/2996))\n\n## [3.3.0] - 2024-09-20\n\n### Added\n- Use greedy sampling when temperature is set to zero ([#2854](https://github.com/nomic-ai/gpt4all/pull/2854))\n- Use configured system prompt in server mode and ignore system messages ([#2921](https://github.com/nomic-ai/gpt4all/pull/2921), [#2924](https://github.com/nomic-ai/gpt4all/pull/2924))\n- Add more system information to anonymous usage stats ([#2939](https://github.com/nomic-ai/gpt4all/pull/2939))\n- Check for unsupported Ubuntu and macOS versions at install time ([#2940](https://github.com/nomic-ai/gpt4all/pull/2940))\n\n### Changed\n- The offline update button now directs users to the offline installer releases page. (by [@3Simplex](https://github.com/3Simplex) in [#2888](https://github.com/nomic-ai/gpt4all/pull/2888))\n- Change the website link on the home page to point to the new URL ([#2915](https://github.com/nomic-ai/gpt4all/pull/2915))\n- Smaller default window size, dynamic minimum size, and scaling tweaks ([#2904](https://github.com/nomic-ai/gpt4all/pull/2904))\n- Only allow a single instance of program to be run at a time ([#2923](https://github.com/nomic-ai/gpt4all/pull/2923]))\n\n### Fixed\n- Bring back \"Auto\" option for Embeddings Device as \"Application default,\" which went missing in v3.1.0 ([#2873](https://github.com/nomic-ai/gpt4all/pull/2873))\n- Correct a few strings in the Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#2872](https://github.com/nomic-ai/gpt4all/pull/2872) and [#2909](https://github.com/nomic-ai/gpt4all/pull/2909))\n- Correct typos in Traditional Chinese translation (by [@supersonictw](https://github.com/supersonictw) in [#2852](https://github.com/nomic-ai/gpt4all/pull/2852))\n- Set the window icon on Linux ([#2880](https://github.com/nomic-ai/gpt4all/pull/2880))\n- Corrections to the Romanian translation (by [@SINAPSA-IC](https://github.com/SINAPSA-IC) in [#2890](https://github.com/nomic-ai/gpt4all/pull/2890))\n- Fix singular/plural forms of LocalDocs \"x Sources\" (by [@cosmic-snow](https://github.com/cosmic-snow) in [#2885](https://github.com/nomic-ai/gpt4all/pull/2885))\n- Fix a typo in Model Settings (by [@3Simplex](https://github.com/3Simplex) in [#2916](https://github.com/nomic-ai/gpt4all/pull/2916))\n- Fix the antenna icon tooltip when using the local server ([#2922](https://github.com/nomic-ai/gpt4all/pull/2922))\n- Fix a few issues with locating files and handling errors when loading remote models on startup ([#2875](https://github.com/nomic-ai/gpt4all/pull/2875))\n- Significantly improve API server request parsing and response correctness ([#2929](https://github.com/nomic-ai/gpt4all/pull/2929))\n- Remove unnecessary dependency on Qt WaylandCompositor module ([#2949](https://github.com/nomic-ai/gpt4all/pull/2949))\n- Update translations ([#2970](https://github.com/nomic-ai/gpt4all/pull/2970))\n- Fix macOS installer and remove extra installed copy of Nomic Embed ([#2973](https://github.com/nomic-ai/gpt4all/pull/2973))\n\n## [3.2.1] - 2024-08-13\n\n### Fixed\n- Do not initialize Vulkan driver when only using CPU ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))\n- Fix a potential crash on exit when using only CPU on Linux with NVIDIA (does not affect X11) ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))\n- Fix default CUDA architecture list after [#2802](https://github.com/nomic-ai/gpt4all/pull/2802) ([#2855](https://github.com/nomic-ai/gpt4all/pull/2855))\n\n## [3.2.0] - 2024-08-12\n\n### Added\n- Add Qwen2-1.5B-Instruct to models3.json (by [@ThiloteE](https://github.com/ThiloteE) in [#2759](https://github.com/nomic-ai/gpt4all/pull/2759))\n- Enable translation feature for seven languages: English, Spanish, Italian, Portuguese, Chinese Simplified, Chinese Traditional, Romanian ([#2830](https://github.com/nomic-ai/gpt4all/pull/2830))\n\n### Changed\n- Add missing entries to Italian transltation (by [@Harvester62](https://github.com/Harvester62) in [#2783](https://github.com/nomic-ai/gpt4all/pull/2783))\n- Use llama\\_kv\\_cache ops to shift context faster ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n- Don't stop generating at end of context ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n\n### Fixed\n- Case-insensitive LocalDocs source icon detection (by [@cosmic-snow](https://github.com/cosmic-snow) in [#2761](https://github.com/nomic-ai/gpt4all/pull/2761))\n- Fix comparison of pre- and post-release versions for update check and models3.json ([#2762](https://github.com/nomic-ai/gpt4all/pull/2762), [#2772](https://github.com/nomic-ai/gpt4all/pull/2772))\n- Fix several backend issues ([#2778](https://github.com/nomic-ai/gpt4all/pull/2778))\n  - Restore leading space removal logic that was incorrectly removed in [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)\n  - CUDA: Cherry-pick llama.cpp DMMV cols requirement fix that caused a crash with long conversations since [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)\n- Make reverse prompt detection work more reliably and prevent it from breaking output ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n- Disallow context shift for chat name and follow-up generation to prevent bugs ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))\n- Explicitly target macOS 12.6 in CI to fix Metal compatibility on older macOS ([#2846](https://github.com/nomic-ai/gpt4all/pull/2846))\n\n## [3.1.1] - 2024-07-27\n\n### Added\n- Add Llama 3.1 8B Instruct to models3.json (by [@3Simplex](https://github.com/3Simplex) in [#2731](https://github.com/nomic-ai/gpt4all/pull/2731) and [#2732](https://github.com/nomic-ai/gpt4all/pull/2732))\n- Portuguese (BR) translation (by [thiagojramos](https://github.com/thiagojramos) in [#2733](https://github.com/nomic-ai/gpt4all/pull/2733))\n- Support adding arbitrary OpenAI-compatible models by URL (by [@supersonictw](https://github.com/supersonictw) in [#2683](https://github.com/nomic-ai/gpt4all/pull/2683))\n- Support Llama 3.1 RoPE scaling ([#2758](https://github.com/nomic-ai/gpt4all/pull/2758))\n\n### Changed\n- Add missing entries to Chinese (Simplified) translation (by [wuodoo](https://github.com/wuodoo) in [#2716](https://github.com/nomic-ai/gpt4all/pull/2716) and [#2749](https://github.com/nomic-ai/gpt4all/pull/2749))\n- Update translation files and add missing paths to CMakeLists.txt ([#2735](https://github.com/nomic-ai/gpt4all/2735))\n\n## [3.1.0] - 2024-07-24\n\n### Added\n- Generate suggested follow-up questions ([#2634](https://github.com/nomic-ai/gpt4all/pull/2634), [#2723](https://github.com/nomic-ai/gpt4all/pull/2723))\n  - Also add options for the chat name and follow-up question prompt templates\n- Scaffolding for translations ([#2612](https://github.com/nomic-ai/gpt4all/pull/2612))\n- Spanish (MX) translation (by [@jstayco](https://github.com/jstayco) in [#2654](https://github.com/nomic-ai/gpt4all/pull/2654))\n- Chinese (Simplified) translation by mikage ([#2657](https://github.com/nomic-ai/gpt4all/pull/2657))\n- Dynamic changes of language and locale at runtime ([#2659](https://github.com/nomic-ai/gpt4all/pull/2659), [#2677](https://github.com/nomic-ai/gpt4all/pull/2677))\n- Romanian translation by [@SINAPSA\\_IC](https://github.com/SINAPSA_IC) ([#2662](https://github.com/nomic-ai/gpt4all/pull/2662))\n- Chinese (Traditional) translation (by [@supersonictw](https://github.com/supersonictw) in [#2661](https://github.com/nomic-ai/gpt4all/pull/2661))\n- Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#2700](https://github.com/nomic-ai/gpt4all/pull/2700))\n\n### Changed\n- Customize combo boxes and context menus to fit the new style ([#2535](https://github.com/nomic-ai/gpt4all/pull/2535))\n- Improve view bar scaling and Model Settings layout ([#2520](https://github.com/nomic-ai/gpt4all/pull/2520)\n- Make the logo spin while the model is generating ([#2557](https://github.com/nomic-ai/gpt4all/pull/2557))\n- Server: Reply to wrong GET/POST method with HTTP 405 instead of 404 (by [@cosmic-snow](https://github.com/cosmic-snow) in [#2615](https://github.com/nomic-ai/gpt4all/pull/2615))\n- Update theme for menus (by [@3Simplex](https://github.com/3Simplex) in [#2578](https://github.com/nomic-ai/gpt4all/pull/2578))\n- Move the \"stop\" button to the message box ([#2561](https://github.com/nomic-ai/gpt4all/pull/2561))\n- Build with CUDA 11.8 for better compatibility ([#2639](https://github.com/nomic-ai/gpt4all/pull/2639))\n- Make links in latest news section clickable ([#2643](https://github.com/nomic-ai/gpt4all/pull/2643))\n- Support translation of settings choices ([#2667](https://github.com/nomic-ai/gpt4all/pull/2667), [#2690](https://github.com/nomic-ai/gpt4all/pull/2690))\n- Improve LocalDocs view's error message (by @cosmic-snow in [#2679](https://github.com/nomic-ai/gpt4all/pull/2679))\n- Ignore case of LocalDocs file extensions ([#2642](https://github.com/nomic-ai/gpt4all/pull/2642), [#2684](https://github.com/nomic-ai/gpt4all/pull/2684))\n- Update llama.cpp to commit 87e397d00 from July 19th ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694), [#2702](https://github.com/nomic-ai/gpt4all/pull/2702))\n  - Add support for GPT-NeoX, Gemma 2, OpenELM, ChatGLM, and Jais architectures (all with Vulkan support)\n  - Add support for DeepSeek-V2 architecture (no Vulkan support)\n  - Enable Vulkan support for StarCoder2, XVERSE, Command R, and OLMo\n- Show scrollbar in chat collections list as needed (by [@cosmic-snow](https://github.com/cosmic-snow) in [#2691](https://github.com/nomic-ai/gpt4all/pull/2691))\n\n### Removed\n- Remove support for GPT-J models ([#2676](https://github.com/nomic-ai/gpt4all/pull/2676), [#2693](https://github.com/nomic-ai/gpt4all/pull/2693))\n\n### Fixed\n- Fix placement of thumbs-down and datalake opt-in dialogs ([#2540](https://github.com/nomic-ai/gpt4all/pull/2540))\n- Select the correct folder with the Linux fallback folder dialog ([#2541](https://github.com/nomic-ai/gpt4all/pull/2541))\n- Fix clone button sometimes producing blank model info ([#2545](https://github.com/nomic-ai/gpt4all/pull/2545))\n- Fix jerky chat view scrolling ([#2555](https://github.com/nomic-ai/gpt4all/pull/2555))\n- Fix \"reload\" showing for chats with missing models ([#2520](https://github.com/nomic-ai/gpt4all/pull/2520)\n- Fix property binding loop warning ([#2601](https://github.com/nomic-ai/gpt4all/pull/2601))\n- Fix UI hang with certain chat view content ([#2543](https://github.com/nomic-ai/gpt4all/pull/2543))\n- Fix crash when Kompute falls back to CPU ([#2640](https://github.com/nomic-ai/gpt4all/pull/2640))\n- Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))\n- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))\n\n[3.10.0]: https://github.com/nomic-ai/gpt4all/compare/v3.9.0...v3.10.0\n[3.9.0]: https://github.com/nomic-ai/gpt4all/compare/v3.8.0...v3.9.0\n[3.8.0]: https://github.com/nomic-ai/gpt4all/compare/v3.7.0...v3.8.0\n[3.7.0]: https://github.com/nomic-ai/gpt4all/compare/v3.6.1...v3.7.0\n[3.6.1]: https://github.com/nomic-ai/gpt4all/compare/v3.6.0...v3.6.1\n[3.6.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.3...v3.6.0\n[3.5.3]: https://github.com/nomic-ai/gpt4all/compare/v3.5.2...v3.5.3\n[3.5.2]: https://github.com/nomic-ai/gpt4all/compare/v3.5.1...v3.5.2\n[3.5.1]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0...v3.5.1\n[3.5.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0-rc2...v3.5.0\n[3.5.0-rc2]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0-rc1...v3.5.0-rc2\n[3.5.0-rc1]: https://github.com/nomic-ai/gpt4all/compare/v3.4.2...v3.5.0-rc1\n[3.4.2]: https://github.com/nomic-ai/gpt4all/compare/v3.4.1...v3.4.2\n[3.4.1]: https://github.com/nomic-ai/gpt4all/compare/v3.4.0...v3.4.1\n[3.4.0]: https://github.com/nomic-ai/gpt4all/compare/v3.3.0...v3.4.0\n[3.3.1]: https://github.com/nomic-ai/gpt4all/compare/v3.3.0...v3.3.1\n[3.3.0]: https://github.com/nomic-ai/gpt4all/compare/v3.2.1...v3.3.0\n[3.2.1]: https://github.com/nomic-ai/gpt4all/compare/v3.2.0...v3.2.1\n[3.2.0]: https://github.com/nomic-ai/gpt4all/compare/v3.1.1...v3.2.0\n[3.1.1]: https://github.com/nomic-ai/gpt4all/compare/v3.1.0...v3.1.1\n[3.1.0]: https://github.com/nomic-ai/gpt4all/compare/v3.0.0...v3.1.0\n"
  },
  {
    "path": "gpt4all-chat/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.25)  # for try_compile SOURCE_FROM_VAR\n\ninclude(../common/common.cmake)\n\nset(APP_VERSION_MAJOR 3)\nset(APP_VERSION_MINOR 10)\nset(APP_VERSION_PATCH 1)\nset(APP_VERSION_BASE \"${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}\")\nset(APP_VERSION \"${APP_VERSION_BASE}-dev0\")\n\nproject(gpt4all VERSION ${APP_VERSION_BASE} LANGUAGES CXX C)\n\nif (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)\n  set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH \"...\" FORCE)\nendif()\n\nif(APPLE)\n  option(BUILD_UNIVERSAL \"Build a Universal binary on macOS\" OFF)\n  if(BUILD_UNIVERSAL)\n    # Build a Universal binary on macOS\n    # This requires that the found Qt library is compiled as Universal binaries.\n    set(CMAKE_OSX_ARCHITECTURES \"arm64;x86_64\" CACHE STRING \"\" FORCE)\n  else()\n    # Build for the host architecture on macOS\n    set(CMAKE_OSX_ARCHITECTURES \"${CMAKE_HOST_SYSTEM_PROCESSOR}\" CACHE STRING \"\" FORCE)\n  endif()\nendif()\n\nfind_package(Python3 3.12 QUIET COMPONENTS Interpreter)\n\noption(GPT4ALL_TEST \"Build the tests\" ${Python3_FOUND})\noption(GPT4ALL_LOCALHOST \"Build installer for localhost repo\" OFF)\noption(GPT4ALL_OFFLINE_INSTALLER \"Build an offline installer\" OFF)\noption(GPT4ALL_SIGN_INSTALL \"Sign installed binaries and installers (requires signing identities)\" OFF)\noption(GPT4ALL_GEN_CPACK_CONFIG \"Generate the CPack config.xml in the package step and nothing else.\" OFF)\nset(GPT4ALL_USE_QTPDF \"AUTO\" CACHE STRING \"Whether to Use QtPDF for LocalDocs. If OFF or not available on this platform, PDFium is used.\")\nset_property(CACHE GPT4ALL_USE_QTPDF PROPERTY STRINGS AUTO ON OFF)\nset(GPT4ALL_FORCE_D3D12 \"AUTO\" CACHE STRING \"Whether to use Direct3D 12 as the Qt scene graph backend. Defaults to ON on Windows ARM.\")\nset_property(CACHE GPT4ALL_FORCE_D3D12 PROPERTY STRINGS AUTO ON OFF)\n\ninclude(cmake/cpack_config.cmake)\n\nif (GPT4ALL_GEN_CPACK_CONFIG)\n    configure_file(\"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cpack-steal-config.cmake.in\"\n                   \"${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake\" @ONLY)\n    set(CPACK_POST_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake)\n    include(CPack)\n    include(CPackIFW)\n    return()\nendif()\n\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON)\nset(CMAKE_CXX_STANDARD 23)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nif (MSVC)\n    # Enable accurate __cplusplus macro\n    add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)\nendif()\n\n\n# conftests\nfunction(check_cpp_feature FEATURE_NAME MIN_VALUE)\n    message(CHECK_START \"Checking for ${FEATURE_NAME} >= ${MIN_VALUE}\")\n    string(CONCAT SRC\n        \"#include <version>\\n\"\n        \"#if !defined(${FEATURE_NAME}) || ${FEATURE_NAME} < ${MIN_VALUE}\\n\"\n        \"#   error \\\"${FEATURE_NAME} is not defined or less than ${MIN_VALUE}\\\"\\n\"\n        \"#endif\\n\"\n        \"int main() { return 0; }\\n\"\n    )\n    try_compile(HAS_FEATURE SOURCE_FROM_VAR \"test_${FEATURE_NAME}.cpp\" SRC)\n    if (NOT HAS_FEATURE)\n        message(CHECK_FAIL \"fail\")\n        message(FATAL_ERROR\n            \"The C++ compiler\\n  \\\"${CMAKE_CXX_COMPILER}\\\"\\n\"\n            \"is too old to support ${FEATURE_NAME} >= ${MIN_VALUE}.\\n\"\n            \"Please specify a newer compiler via -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.\"\n        )\n    endif()\n  message(CHECK_PASS \"pass\")\nendfunction()\n\n# check for monadic operations in std::optional (e.g. transform)\ncheck_cpp_feature(\"__cpp_lib_optional\" \"202110L\")\n\n\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_LIST_DIR}/cmake/Modules\")\n\n# Include the binary directory for the generated header file\ninclude_directories(\"${CMAKE_CURRENT_BINARY_DIR}\")\n\nset(CMAKE_AUTOMOC ON)\nset(CMAKE_AUTORCC ON)\n\nset(CMAKE_FIND_PACKAGE_TARGETS_GLOBAL ON)\nset(GPT4ALL_QT_COMPONENTS Core HttpServer LinguistTools Quick QuickDialogs2 Sql Svg)\nset(GPT4ALL_USING_QTPDF OFF)\nif (CMAKE_SYSTEM_NAME MATCHES Windows AND CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|AARCH64|arm64|ARM64)$\")\n    # QtPDF is not available.\n    if (GPT4ALL_USE_QTPDF STREQUAL \"ON\")\n        message(FATAL_ERROR \"QtPDF is not available on Windows ARM64.\")\n    endif()\nelseif (GPT4ALL_USE_QTPDF MATCHES \"^(ON|AUTO)$\")\n    set(GPT4ALL_USING_QTPDF ON)\n    list(APPEND GPT4ALL_QT_COMPONENTS Pdf)\nendif()\nfind_package(Qt6 6.8 COMPONENTS ${GPT4ALL_QT_COMPONENTS} REQUIRED)\n\nif (QT_KNOWN_POLICY_QTP0004)\n    qt_policy(SET QTP0004 NEW)  # generate extra qmldir files on Qt 6.8+\nendif()\n\n# Get the Qt6Core target properties\nget_target_property(Qt6Core_INCLUDE_DIRS Qt6::Core INTERFACE_INCLUDE_DIRECTORIES)\nget_target_property(Qt6Core_LIBRARY_RELEASE Qt6::Core LOCATION_RELEASE)\n\n# Find the qmake binary\nfind_program(QMAKE_EXECUTABLE NAMES qmake qmake6 PATHS ${Qt6Core_INCLUDE_DIRS}/../.. NO_DEFAULT_PATH)\n\n# Get the Qt 6 root directory\nget_filename_component(Qt6_ROOT_DIR \"${Qt6Core_LIBRARY_RELEASE}\" DIRECTORY)\nget_filename_component(Qt6_ROOT_DIR \"${Qt6_ROOT_DIR}/..\" ABSOLUTE)\n\nmessage(STATUS \"qmake binary: ${QMAKE_EXECUTABLE}\")\nmessage(STATUS \"Qt 6 root directory: ${Qt6_ROOT_DIR}\")\n\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)\n\nset(GPT4ALL_CONFIG_FORCE_D3D12 -1)\nif (NOT CMAKE_SYSTEM_NAME MATCHES Windows OR Qt6_VERSION VERSION_LESS \"6.6\")\n    # Direct3D 12 is not available.\n    if (GPT4ALL_FORCE_D3D12 STREQUAL \"ON\")\n        message(FATAL_ERROR \"Cannot use Direct3D 12 on this platform.\")\n    endif()\nelseif (GPT4ALL_FORCE_D3D12 MATCHES \"^(ON|AUTO)$\")\n    if (GPT4ALL_FORCE_D3D12 STREQUAL \"ON\" OR CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|AARCH64|arm64|ARM64)$\")\n        set(GPT4ALL_CONFIG_FORCE_D3D12 1)\n    endif()\nendif()\n\n# Generate a header file for configuration\nconfigure_file(\n    \"${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/config.h\"\n)\n\nadd_subdirectory(deps)\nadd_subdirectory(../gpt4all-backend llmodel)\n\nif (GPT4ALL_TEST)\n    enable_testing()\n\n    # Llama-3.2-1B model\n    set(TEST_MODEL \"Llama-3.2-1B-Instruct-Q4_0.gguf\")\n    set(TEST_MODEL_MD5 \"48ff0243978606fdba19d899b77802fc\")\n    set(TEST_MODEL_PATH \"${CMAKE_BINARY_DIR}/resources/${TEST_MODEL}\")\n    set(TEST_MODEL_URL \"https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/${TEST_MODEL}\")\n\n    # Create a custom command to download the file if it does not exist or if the checksum does not match\n    add_custom_command(\n        OUTPUT \"${TEST_MODEL_PATH}\"\n        COMMAND ${CMAKE_COMMAND} -E echo \"Downloading test model from ${TEST_MODEL_URL} ...\"\n        COMMAND ${CMAKE_COMMAND} -DURL=\"${TEST_MODEL_URL}\" -DOUTPUT_PATH=\"${TEST_MODEL_PATH}\" -DEXPECTED_MD5=\"${TEST_MODEL_MD5}\" -P \"${CMAKE_SOURCE_DIR}/cmake/download_model.cmake\"\n        DEPENDS \"${CMAKE_SOURCE_DIR}/cmake/download_model.cmake\"\n    )\n\n    # Define a custom target that depends on the downloaded model\n    add_custom_target(download_test_model\n        DEPENDS \"${TEST_MODEL_PATH}\"\n    )\n\n    add_subdirectory(tests)\n\n    # The 'check' target makes sure the tests and their dependencies are up-to-date before running them\n    add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure DEPENDS download_test_model chat gpt4all_tests)\nendif()\n\nset(CHAT_EXE_RESOURCES)\n\n# Metal shader library\nif (APPLE)\n    list(APPEND CHAT_EXE_RESOURCES \"${GGML_METALLIB}\")\nendif()\n\n# App icon\nif (WIN32)\n    list(APPEND CHAT_EXE_RESOURCES \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.rc\")\nelseif (APPLE)\n    # The MACOSX_BUNDLE_ICON_FILE variable is added to the Info.plist\n    # generated by CMake. This variable contains the .icns file name,\n    # without the path.\n    set(MACOSX_BUNDLE_ICON_FILE gpt4all.icns)\n\n    # And the following tells CMake where to find and install the file itself.\n    set(APP_ICON_RESOURCE \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns\")\n    list(APPEND CHAT_EXE_RESOURCES \"${APP_ICON_RESOURCE}\")\nendif()\n\n# Embedding model\nset(LOCAL_EMBEDDING_MODEL \"nomic-embed-text-v1.5.f16.gguf\")\nset(LOCAL_EMBEDDING_MODEL_MD5 \"a5401e7f7e46ed9fcaed5b60a281d547\")\nset(LOCAL_EMBEDDING_MODEL_PATH \"${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}\")\nset(LOCAL_EMBEDDING_MODEL_URL \"https://gpt4all.io/models/gguf/${LOCAL_EMBEDDING_MODEL}\")\nmessage(STATUS \"Downloading embedding model from ${LOCAL_EMBEDDING_MODEL_URL} ...\")\nfile(DOWNLOAD\n    \"${LOCAL_EMBEDDING_MODEL_URL}\"\n    \"${LOCAL_EMBEDDING_MODEL_PATH}\"\n    EXPECTED_HASH \"MD5=${LOCAL_EMBEDDING_MODEL_MD5}\"\n)\nmessage(STATUS \"Embedding model downloaded to ${LOCAL_EMBEDDING_MODEL_PATH}\")\nif (APPLE)\n    list(APPEND CHAT_EXE_RESOURCES \"${LOCAL_EMBEDDING_MODEL_PATH}\")\nendif()\n\nif (DEFINED GGML_METALLIB)\n    set_source_files_properties(\"${GGML_METALLIB}\" PROPERTIES GENERATED ON)\nendif()\nif (APPLE)\n    set_source_files_properties(${CHAT_EXE_RESOURCES} PROPERTIES MACOSX_PACKAGE_LOCATION Resources)\nendif()\n\nset(MACOS_SOURCES)\nif (APPLE)\n    find_library(COCOA_LIBRARY Cocoa)\n    list(APPEND MACOS_SOURCES src/macosdock.mm src/macosdock.h)\nendif()\n\nqt_add_executable(chat\n    src/main.cpp\n    src/chat.cpp                  src/chat.h\n    src/chatapi.cpp               src/chatapi.h\n    src/chatlistmodel.cpp         src/chatlistmodel.h\n    src/chatllm.cpp               src/chatllm.h\n    src/chatmodel.h               src/chatmodel.cpp\n    src/chatviewtextprocessor.cpp src/chatviewtextprocessor.h\n    src/codeinterpreter.cpp       src/codeinterpreter.h\n    src/database.cpp              src/database.h\n    src/download.cpp              src/download.h\n    src/embllm.cpp                src/embllm.h\n    src/jinja_helpers.cpp         src/jinja_helpers.h\n    src/jinja_replacements.cpp    src/jinja_replacements.h\n    src/llm.cpp                   src/llm.h\n    src/localdocs.cpp             src/localdocs.h\n    src/localdocsmodel.cpp        src/localdocsmodel.h\n    src/logger.cpp                src/logger.h\n    src/modellist.cpp             src/modellist.h\n    src/mysettings.cpp            src/mysettings.h\n    src/network.cpp               src/network.h\n    src/server.cpp                src/server.h\n    src/tool.cpp                  src/tool.h\n    src/toolcallparser.cpp        src/toolcallparser.h\n    src/toolmodel.cpp             src/toolmodel.h\n    src/xlsxtomd.cpp              src/xlsxtomd.h\n    ${CHAT_EXE_RESOURCES}\n    ${MACOS_SOURCES}\n)\ngpt4all_add_warning_options(chat)\n\nqt_add_qml_module(chat\n    URI gpt4all\n    VERSION 1.0\n    NO_CACHEGEN\n    QML_FILES\n      main.qml\n      qml/AddCollectionView.qml\n      qml/AddModelView.qml\n      qml/AddGPT4AllModelView.qml\n      qml/AddHFModelView.qml\n      qml/AddRemoteModelView.qml\n      qml/ApplicationSettings.qml\n      qml/ChatDrawer.qml\n      qml/ChatCollapsibleItem.qml\n      qml/ChatItemView.qml\n      qml/ChatMessageButton.qml\n      qml/ChatTextItem.qml\n      qml/ChatView.qml\n      qml/CollectionsDrawer.qml\n      qml/HomeView.qml\n      qml/LocalDocsSettings.qml\n      qml/LocalDocsView.qml\n      qml/ModelSettings.qml\n      qml/ModelsView.qml\n      qml/NetworkDialog.qml\n      qml/NewVersionDialog.qml\n      qml/PopupDialog.qml\n      qml/SettingsView.qml\n      qml/StartupDialog.qml\n      qml/ConfirmationDialog.qml\n      qml/Theme.qml\n      qml/ThumbsDownDialog.qml\n      qml/Toast.qml\n      qml/ToastManager.qml\n      qml/MyBusyIndicator.qml\n      qml/MyButton.qml\n      qml/MyTabButton.qml\n      qml/MyCheckBox.qml\n      qml/MyComboBox.qml\n      qml/MyDialog.qml\n      qml/MyDirectoryField.qml\n      qml/MyFileDialog.qml\n      qml/MyFileIcon.qml\n      qml/MyFolderDialog.qml\n      qml/MyFancyLink.qml\n      qml/MyMenu.qml\n      qml/MyMenuItem.qml\n      qml/MyMiniButton.qml\n      qml/MySettingsButton.qml\n      qml/MySettingsDestructiveButton.qml\n      qml/MySettingsLabel.qml\n      qml/MySettingsStack.qml\n      qml/MySettingsTab.qml\n      qml/MySlug.qml\n      qml/MyTextArea.qml\n      qml/MyTextButton.qml\n      qml/MyTextField.qml\n      qml/MyToolButton.qml\n      qml/MyWelcomeButton.qml\n      qml/RemoteModelCard.qml\n    RESOURCES\n      icons/antenna_1.svg\n      icons/antenna_2.svg\n      icons/antenna_3.svg\n      icons/caret_down.svg\n      icons/caret_right.svg\n      icons/changelog.svg\n      icons/chat.svg\n      icons/check.svg\n      icons/close.svg\n      icons/copy.svg\n      icons/db.svg\n      icons/discord.svg\n      icons/download.svg\n      icons/edit.svg\n      icons/eject.svg\n      icons/email.svg\n      icons/file-doc.svg\n      icons/file-docx.svg\n      icons/file-md.svg\n      icons/file-pdf.svg\n      icons/file-txt.svg\n      icons/file-xls.svg\n      icons/file.svg\n      icons/github.svg\n      icons/globe.svg\n      icons/gpt4all-32.png\n      icons/gpt4all-48.png\n      icons/gpt4all.svg\n      icons/gpt4all_transparent.svg\n      icons/groq.svg\n      icons/home.svg\n      icons/image.svg\n      icons/info.svg\n      icons/left_panel_closed.svg\n      icons/left_panel_open.svg\n      icons/local-docs.svg\n      icons/models.svg\n      icons/mistral.svg\n      icons/network.svg\n      icons/nomic_logo.svg\n      icons/notes.svg\n      icons/paperclip.svg\n      icons/plus.svg\n      icons/plus_circle.svg\n      icons/openai.svg\n      icons/recycle.svg\n      icons/regenerate.svg\n      icons/search.svg\n      icons/send_message.svg\n      icons/settings.svg\n      icons/stack.svg\n      icons/stop_generating.svg\n      icons/thumbs_down.svg\n      icons/thumbs_up.svg\n      icons/trash.svg\n      icons/twitter.svg\n      icons/up_down.svg\n      icons/webpage.svg\n      icons/you.svg\n)\n\nqt_add_translations(chat\n    TS_FILES\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_en_US.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_es_MX.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_zh_CN.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_zh_TW.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_ro_RO.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_it_IT.ts\n    ${CMAKE_SOURCE_DIR}/translations/gpt4all_pt_BR.ts\n)\n\nset_target_properties(chat PROPERTIES\n    WIN32_EXECUTABLE TRUE\n)\n\nmacro(REPORT_MISSING_SIGNING_CONTEXT)\n    message(FATAL_ERROR [=[\n        Signing requested but no identity configured.\n        Please set the correct env variable or provide the MAC_SIGNING_IDENTITY argument on the command line\n        ]=])\nendmacro()\n\nif (APPLE)\n    set_target_properties(chat PROPERTIES\n        MACOSX_BUNDLE TRUE\n        MACOSX_BUNDLE_GUI_IDENTIFIER gpt4all\n        MACOSX_BUNDLE_BUNDLE_VERSION ${PROJECT_VERSION}\n        MACOSX_BUNDLE_SHORT_VERSION_STRING ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}\n        OUTPUT_NAME gpt4all\n    )\n    add_dependencies(chat ggml-metal)\nendif()\n\nif (APPLE AND GPT4ALL_SIGN_INSTALL)\n    if (NOT MAC_SIGNING_IDENTITY)\n        if (NOT DEFINED ENV{MAC_SIGNING_CERT_NAME})\n            REPORT_MISSING_SIGNING_CONTEXT()\n        endif()\n        set(MAC_SIGNING_IDENTITY $ENV{MAC_SIGNING_CERT_NAME})\n    endif()\n    if (NOT MAC_SIGNING_TID)\n        if (NOT DEFINED ENV{MAC_NOTARIZATION_TID})\n            REPORT_MISSING_SIGNING_CONTEXT()\n        endif()\n        set(MAC_SIGNING_TID $ENV{MAC_NOTARIZATION_TID})\n    endif()\n\n    # Setup MacOS signing for individual binaries\n    set_target_properties(chat PROPERTIES\n        XCODE_ATTRIBUTE_CODE_SIGN_STYLE \"Manual\"\n        XCODE_ATTRIBUTE_DEVELOPMENT_TEAM ${MAC_SIGNING_TID}\n        XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY ${MAC_SIGNING_IDENTITY}\n        XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED True\n        XCODE_ATTRIBUTE_OTHER_CODE_SIGN_FLAGS \"--timestamp=http://timestamp.apple.com/ts01  --options=runtime,library\"\n    )\nendif()\n\ntarget_compile_definitions(chat\n    PRIVATE $<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:QT_QML_DEBUG>)\n\ntarget_include_directories(chat PRIVATE src)\n\n# usearch uses the identifier 'slots' which conflicts with Qt's 'slots' keyword\ntarget_compile_definitions(chat PRIVATE QT_NO_SIGNALS_SLOTS_KEYWORDS)\n\ntarget_include_directories(chat PRIVATE deps/usearch/include\n                                        deps/usearch/fp16/include)\n\ntarget_link_libraries(chat\n    PRIVATE Qt6::Core Qt6::HttpServer Qt6::Quick Qt6::Sql Qt6::Svg)\nif (GPT4ALL_USING_QTPDF)\n    target_compile_definitions(chat PRIVATE GPT4ALL_USE_QTPDF)\n    target_link_libraries(chat PRIVATE Qt6::Pdf)\nelse()\n    # Link PDFium\n    target_link_libraries(chat PRIVATE pdfium)\nendif()\ntarget_link_libraries(chat\n    PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx)\ntarget_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include)\ntarget_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include/nlohmann)\ntarget_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/minja/include)\n\nif (APPLE)\n    target_link_libraries(chat PRIVATE ${COCOA_LIBRARY})\nendif()\n\n# -- install --\n\nif (APPLE)\n    set(GPT4ALL_LIB_DEST bin/gpt4all.app/Contents/Frameworks)\nelse()\n    set(GPT4ALL_LIB_DEST lib)\nendif()\n\ninstall(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})\n\ninstall(\n    TARGETS llmodel\n    LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN}  # .so/.dylib\n    RUNTIME DESTINATION bin                 COMPONENT ${COMPONENT_NAME_MAIN}  # .dll\n)\n\n# We should probably iterate through the list of the cmake for backend, but these need to be installed\n# to the this component's dir for the finicky qt installer to work\nif (LLMODEL_KOMPUTE)\n    set(MODEL_IMPL_TARGETS\n        llamamodel-mainline-kompute\n        llamamodel-mainline-kompute-avxonly\n    )\nelse()\n    set(MODEL_IMPL_TARGETS\n        llamamodel-mainline-cpu\n        llamamodel-mainline-cpu-avxonly\n    )\nendif()\n\nif (APPLE)\n    list(APPEND MODEL_IMPL_TARGETS llamamodel-mainline-metal)\nendif()\n\ninstall(\n    TARGETS ${MODEL_IMPL_TARGETS}\n    LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN}  # .so/.dylib\n    RUNTIME DESTINATION lib                 COMPONENT ${COMPONENT_NAME_MAIN}  # .dll\n)\n\nif(APPLE AND GPT4ALL_SIGN_INSTALL)\n    include(SignMacOSBinaries)\n    install_sign_osx(chat)\n    install_sign_osx(llmodel)\n    foreach(tgt ${MODEL_IMPL_TARGETS})\n        install_sign_osx(${tgt})\n    endforeach()\nendif()\n\nif(WIN32 AND GPT4ALL_SIGN_INSTALL)\n    include(SignWindowsBinaries)\n    sign_target_windows(chat)\n    sign_target_windows(llmodel)\n    foreach(tgt ${MODEL_IMPL_TARGETS})\n        sign_target_windows(${tgt})\n    endforeach()\nendif()\n\nif (LLMODEL_CUDA)\n    set_property(TARGET llamamodel-mainline-cuda llamamodel-mainline-cuda-avxonly\n                 APPEND PROPERTY INSTALL_RPATH \"$ORIGIN\")\n\n    install(\n        TARGETS llamamodel-mainline-cuda\n                llamamodel-mainline-cuda-avxonly\n        RUNTIME_DEPENDENCY_SET llama-cuda-deps\n        LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}  # .so\n        RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}  # .dll\n    )\n    if (WIN32)\n        install(\n            RUNTIME_DEPENDENCY_SET llama-cuda-deps\n            PRE_EXCLUDE_REGEXES \"^(nvcuda|api-ms-.*)\\\\.dll$\"\n            POST_INCLUDE_REGEXES \"(^|[/\\\\\\\\])(lib)?(cuda|cublas)\" POST_EXCLUDE_REGEXES .\n            DIRECTORIES \"${CUDAToolkit_BIN_DIR}\"\n            DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}\n        )\n    endif()\nendif()\n\nif (NOT GPT4ALL_USING_QTPDF)\n    # Install PDFium\n    if (WIN32)\n        install(FILES ${PDFium_LIBRARY} DESTINATION bin                 COMPONENT ${COMPONENT_NAME_MAIN})  # .dll\n    else()\n        install(FILES ${PDFium_LIBRARY} DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN})  # .so/.dylib\n    endif()\nendif()\n\nif (NOT APPLE)\n    install(FILES \"${LOCAL_EMBEDDING_MODEL_PATH}\"\n            DESTINATION resources\n            COMPONENT ${COMPONENT_NAME_MAIN})\nendif()\n\nif (CMAKE_SYSTEM_NAME MATCHES Linux)\n    find_program(LINUXDEPLOYQT linuxdeployqt HINTS \"$ENV{HOME}/dev/linuxdeployqt/build/tools/linuxdeployqt\" \"$ENV{HOME}/project/linuxdeployqt/bin\")\n    configure_file(\"${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-linux.cmake.in\"\n                   \"${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake\" @ONLY)\n    set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake)\nelseif (CMAKE_SYSTEM_NAME MATCHES Windows)\n    find_program(WINDEPLOYQT windeployqt)\n    configure_file(\"${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-windows.cmake.in\"\n                   \"${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake\" @ONLY)\n    set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake)\nelseif (CMAKE_SYSTEM_NAME MATCHES Darwin)\n    find_program(MACDEPLOYQT macdeployqt)\n    configure_file(\"${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-mac.cmake.in\"\n                   \"${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake\" @ONLY)\n    set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake)\nendif()\n\ninclude(InstallRequiredSystemLibraries)\ninclude(CPack)\ninclude(CPackIFW)\nif(GPT4ALL_OFFLINE_INSTALLER)\n  cpack_add_component(${COMPONENT_NAME_MAIN})\nelse()\n  cpack_add_component(${COMPONENT_NAME_MAIN} DOWNLOADED)\nendif()\ncpack_ifw_configure_component(${COMPONENT_NAME_MAIN} ESSENTIAL FORCED_INSTALLATION)\ncpack_ifw_configure_component(${COMPONENT_NAME_MAIN} VERSION ${APP_VERSION})\ncpack_ifw_configure_component(${COMPONENT_NAME_MAIN} LICENSES \"MIT LICENSE\" ${CPACK_RESOURCE_FILE_LICENSE})\ncpack_ifw_configure_component(${COMPONENT_NAME_MAIN} SCRIPT \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_gpt4all_component.qs\")\ncpack_ifw_configure_component(${COMPONENT_NAME_MAIN} REPLACES \"gpt4all-chat\") #Was used in very earliest prototypes\n\nif (APPLE AND GPT4ALL_SIGN_INSTALL)\n    if (GPT4ALL_OFFLINE_INSTALLER)\n        cpack_add_component(maintenancetool HIDDEN)\n    else()\n        cpack_add_component(maintenancetool HIDDEN DOWNLOADED)\n    endif()\n    cpack_ifw_configure_component(maintenancetool ESSENTIAL FORCED_INSTALLATION)\n    cpack_ifw_configure_component(maintenancetool VERSION ${APP_VERSION})\n    cpack_ifw_configure_component(maintenancetool SCRIPT \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_maintenancetool_component.qs\")\nendif()\n\nif (GPT4ALL_LOCALHOST)\n    cpack_ifw_add_repository(\"GPT4AllRepository\" URL \"http://localhost/repository\")\nelseif (GPT4ALL_OFFLINE_INSTALLER)\n    add_compile_definitions(GPT4ALL_OFFLINE_INSTALLER)\nelse()\n    if (CMAKE_SYSTEM_NAME MATCHES Linux)\n        cpack_ifw_add_repository(\"GPT4AllRepository\" URL \"https://gpt4all.io/installer_repos/linux/repository\")\n    elseif (CMAKE_SYSTEM_NAME MATCHES Windows)\n        # To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target\n        if (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(x86_64|AMD64|amd64)$\")\n            cpack_ifw_add_repository(\"GPT4AllRepository\" URL \"https://gpt4all.io/installer_repos/windows/repository\")\n        elseif (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|AARCH64|arm64|ARM64)$\")\n            cpack_ifw_add_repository(\"GPT4AllRepository\" URL \"https://gpt4all.io/installer_repos/windows_arm/repository\")\n        endif()\n    elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)\n        cpack_ifw_add_repository(\"GPT4AllRepository\" URL \"https://gpt4all.io/installer_repos/mac/repository\")\n    endif()\nendif()\n"
  },
  {
    "path": "gpt4all-chat/LICENSE",
    "content": "Copyright 2023-2024 Nomic, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nADDENDUM:\n\nAny LLM models that are loaded and used by the application are not themselves\nsubject to this license if indeed they are even copyrightable. The terms of\nthis license apply only to the application software and its accompanying\ndocumentation and do not extend to any LLM models, whether created by the\nauthor of the application or obtained from third-party sources.\n"
  },
  {
    "path": "gpt4all-chat/cmake/Modules/SignMacOSBinaries.cmake",
    "content": "function(install_sign_osx tgt)\n    install(CODE \"execute_process(COMMAND codesign --options runtime --timestamp -s \\\"${MAC_SIGNING_IDENTITY}\\\" $<TARGET_FILE:${tgt}>)\")\nendfunction()"
  },
  {
    "path": "gpt4all-chat/cmake/Modules/SignWindowsBinaries.cmake",
    "content": "function(sign_target_windows tgt)\n    if(WIN32 AND GPT4ALL_SIGN_INSTALL)\n        add_custom_command(TARGET ${tgt}\n            POST_BUILD\n            COMMAND AzureSignTool.exe sign\n                -du \"https://www.nomic.ai/gpt4all\"\n                -kvu https://gpt4all.vault.azure.net\n                -kvi \"$Env{AZSignGUID}\"\n                -kvs \"$Env{AZSignPWD}\"\n                -kvc \"$Env{AZSignCertName}\"\n                -kvt \"$Env{AZSignTID}\"\n                -tr http://timestamp.digicert.com\n                -v\n                $<TARGET_FILE:${tgt}>\n        )\n    endif()\nendfunction()\n"
  },
  {
    "path": "gpt4all-chat/cmake/cpack-steal-config.cmake.in",
    "content": "set(OUTPUT_DIR \"@CMAKE_BINARY_DIR@\")\nfile(COPY ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/config DESTINATION ${OUTPUT_DIR}/cpack-config)\n"
  },
  {
    "path": "gpt4all-chat/cmake/cpack_config.cmake",
    "content": "set(COMPONENT_NAME_MAIN \"gpt4all\")\n\nset(CPACK_GENERATOR \"IFW\")\nset(CPACK_VERBATIM_VARIABLES YES)\nset(CPACK_IFW_VERBOSE ON)\n\nif (CMAKE_SYSTEM_NAME MATCHES Linux)\n    set(CPACK_IFW_ROOT \"~/Qt/Tools/QtInstallerFramework/4.6\")\n    set(CPACK_PACKAGE_FILE_NAME \"${COMPONENT_NAME_MAIN}-installer-linux\")\n    set(CPACK_IFW_TARGET_DIRECTORY \"@HomeDir@/${COMPONENT_NAME_MAIN}\")\nelseif (CMAKE_SYSTEM_NAME MATCHES Windows)\n    set(CPACK_IFW_ROOT \"C:/Qt/Tools/QtInstallerFramework/4.6\")\n    set(CPACK_IFW_PACKAGE_ICON \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico\")\n    if (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(x86_64|AMD64|amd64)$\")\n        set(CPACK_PACKAGE_FILE_NAME \"${COMPONENT_NAME_MAIN}-installer-win64\")\n    elseif (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|AARCH64|arm64|ARM64)$\")\n        set(CPACK_PACKAGE_FILE_NAME \"${COMPONENT_NAME_MAIN}-installer-win64-arm\")\n    else()\n        message(FATAL_ERROR \"Unrecognized processor: ${CMAKE_SYSTEM_PROCESSOR}\")\n    endif()\n    set(CPACK_IFW_TARGET_DIRECTORY \"@HomeDir@\\\\${COMPONENT_NAME_MAIN}\")\nelseif (CMAKE_SYSTEM_NAME MATCHES Darwin)\n    set(CPACK_IFW_ROOT \"~/Qt/Tools/QtInstallerFramework/4.6\")\n    set(CPACK_IFW_PACKAGE_ICON \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns\")\n    set(CPACK_PACKAGE_FILE_NAME \"${COMPONENT_NAME_MAIN}-installer-darwin\")\n    set(CPACK_IFW_TARGET_DIRECTORY \"@ApplicationsDir@/${COMPONENT_NAME_MAIN}\")\nendif()\n\nset(CPACK_COMPONENTS_ALL ${COMPONENT_NAME_MAIN})  # exclude development components\nif (APPLE AND GPT4ALL_SIGN_INSTALL)\n    list(APPEND CPACK_COMPONENTS_ALL maintenancetool)\nendif()\nset(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})\nset(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})\nset(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})\nset(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})\nset(CPACK_PACKAGE_HOMEPAGE_URL \"https://www.nomic.ai/gpt4all\")\nset(CPACK_PACKAGE_ICON \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png\")\nset(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)\nset(CPACK_PACKAGE_EXECUTABLES \"GPT4All\")\nset(CPACK_CREATE_DESKTOP_LINKS \"GPT4All\")\nset(CPACK_IFW_PACKAGE_NAME \"GPT4All\")\nset(CPACK_IFW_PACKAGE_TITLE \"GPT4All Installer\")\nset(CPACK_IFW_PACKAGE_PUBLISHER \"Nomic, Inc.\")\nset(CPACK_IFW_PRODUCT_URL \"https://www.nomic.ai/gpt4all\")\nset(CPACK_IFW_PACKAGE_WIZARD_STYLE \"Aero\")\nset(CPACK_IFW_PACKAGE_LOGO \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png\")\nset(CPACK_IFW_PACKAGE_WINDOW_ICON \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png\")\nset(CPACK_IFW_PACKAGE_WIZARD_SHOW_PAGE_LIST OFF)\nset(CPACK_IFW_PACKAGE_CONTROL_SCRIPT \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_control.qs\")\n"
  },
  {
    "path": "gpt4all-chat/cmake/deploy-qt-linux.cmake.in",
    "content": "set(LINUXDEPLOYQT \"@LINUXDEPLOYQT@\")\nset(COMPONENT_NAME_MAIN \"@COMPONENT_NAME_MAIN@\")\nset(CMAKE_CURRENT_SOURCE_DIR \"@CMAKE_CURRENT_SOURCE_DIR@\")\nset(DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\nset(BIN_DIR ${DATA_DIR}/bin)\nset(Qt6_ROOT_DIR \"@Qt6_ROOT_DIR@\")\nset(ENV{LD_LIBRARY_PATH} \"${BIN_DIR}:${Qt6_ROOT_DIR}/../lib/\")\nexecute_process(COMMAND ${LINUXDEPLOYQT} ${BIN_DIR}/chat -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -bundle-non-qt-libs -qmake=${Qt6_ROOT_DIR}/bin/qmake -verbose=2 -exclude-libs=libcuda.so.1)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png\"\n     DESTINATION ${DATA_DIR})\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png\"\n     DESTINATION ${DATA_DIR})\n"
  },
  {
    "path": "gpt4all-chat/cmake/deploy-qt-mac.cmake.in",
    "content": "set(MACDEPLOYQT \"@MACDEPLOYQT@\")\nset(COMPONENT_NAME_MAIN \"@COMPONENT_NAME_MAIN@\")\nset(CMAKE_CURRENT_SOURCE_DIR \"@CMAKE_CURRENT_SOURCE_DIR@\")\nset(GPT4ALL_SIGN_INSTALL \"@GPT4ALL_SIGN_INSTALL@\")\nset(GPT4ALL_SIGNING_ID \"@MAC_SIGNING_IDENTITY@\")\nset(CPACK_CONFIG_DIR \"@CMAKE_BINARY_DIR@\")\nif (GPT4ALL_SIGN_INSTALL)\n    set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID})\nendif()\nexecute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE})\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\n\nif (GPT4ALL_SIGN_INSTALL)\n    # Create signed MaintenanceTool\n    set(MT_DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/maintenancetool/data)\n    file(MAKE_DIRECTORY ${MT_DATA_DIR})\n    execute_process(\n        COMMAND binarycreator --config ${CPACK_CONFIG_DIR}/cpack-config/config/config.xml --create-maintenancetool --sign ${GPT4ALL_SIGNING_ID}\n        WORKING_DIRECTORY ${MT_DATA_DIR}\n    )\nendif()\n"
  },
  {
    "path": "gpt4all-chat/cmake/deploy-qt-windows.cmake.in",
    "content": "set(WINDEPLOYQT \"@WINDEPLOYQT@\")\nset(COMPONENT_NAME_MAIN \"@COMPONENT_NAME_MAIN@\")\nset(CMAKE_CURRENT_SOURCE_DIR \"@CMAKE_CURRENT_SOURCE_DIR@\")\nexecute_process(COMMAND ${WINDEPLOYQT} --qmldir ${CMAKE_CURRENT_SOURCE_DIR} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\nfile(COPY \"${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico\"\n     DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)\n"
  },
  {
    "path": "gpt4all-chat/cmake/download_model.cmake",
    "content": "if(NOT DEFINED URL OR NOT DEFINED OUTPUT_PATH OR NOT DEFINED EXPECTED_MD5)\n    message(FATAL_ERROR \"Usage: cmake -DURL=<url> -DOUTPUT_PATH=<path> -DEXPECTED_MD5=<md5> -P download_model.cmake\")\nendif()\n\nmessage(STATUS \"Downloading model from ${URL} to ${OUTPUT_PATH} ...\")\n\nfile(DOWNLOAD \"${URL}\" \"${OUTPUT_PATH}\" EXPECTED_MD5 \"${EXPECTED_MD5}\" STATUS status)\n\nlist(GET status 0 status_code)\nif(NOT status_code EQUAL 0)\n    message(FATAL_ERROR \"Failed to download model: ${status}\")\nendif()\n"
  },
  {
    "path": "gpt4all-chat/cmake/installer_control.qs",
    "content": "var finishedText = null;\n\nfunction cancelInstaller(message) {\n    installer.setDefaultPageVisible(QInstaller.Introduction,         false);\n    installer.setDefaultPageVisible(QInstaller.TargetDirectory,      false);\n    installer.setDefaultPageVisible(QInstaller.ComponentSelection,   false);\n    installer.setDefaultPageVisible(QInstaller.ReadyForInstallation, false);\n    installer.setDefaultPageVisible(QInstaller.StartMenuSelection,   false);\n    installer.setDefaultPageVisible(QInstaller.PerformInstallation,  false);\n    installer.setDefaultPageVisible(QInstaller.LicenseCheck,         false);\n    finishedText = message;\n    installer.setCanceled();\n}\n\nfunction vercmp(a, b) {\n    return a.localeCompare(b, undefined, { numeric: true, sensitivity: \"base\" });\n}\n\nfunction Controller() {\n}\n\nController.prototype.TargetDirectoryPageCallback = function() {\n    var failedReq = null;\n    if (systemInfo.productType === \"ubuntu\" && vercmp(systemInfo.productVersion, \"22.04\") < 0) {\n        failedReq = \"Ubuntu 22.04 LTS\";\n    } else if (systemInfo.productType === \"macos\" && vercmp(systemInfo.productVersion, \"12.6\") < 0) {\n        failedReq = \"macOS Monterey 12.6\";\n    }\n\n    if (failedReq !== null) {\n        cancelInstaller(\n            \"Installation cannot continue because GPT4All does not support your operating system: \" +\n            `${systemInfo.prettyProductName}<br/><br/>` +\n            `GPT4All requires ${failedReq} or newer.`\n        );\n    }\n}\n\nController.prototype.FinishedPageCallback = function() {\n    const widget = gui.currentPageWidget();\n    if (widget != null && finishedText != null) {\n        widget.MessageLabel.setText(finishedText);\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/cmake/installer_gpt4all_component.qs",
    "content": "function Component() {\n}\n\nvar targetDirectory;\nComponent.prototype.beginInstallation = function() {\n    targetDirectory = installer.value(\"TargetDir\");\n};\n\nComponent.prototype.createOperations = function() {\n    try {\n        // call the base create operations function\n        component.createOperations();\n        if (systemInfo.productType === \"windows\") {\n            try {\n                var userProfile = installer.environmentVariable(\"USERPROFILE\");\n                installer.setValue(\"UserProfile\", userProfile);\n                component.addOperation(\"CreateShortcut\",\n                    targetDirectory + \"/bin/chat.exe\",\n                    \"@UserProfile@/Desktop/GPT4All.lnk\",\n                    \"workingDirectory=\" + targetDirectory + \"/bin\",\n                    \"iconPath=\" + targetDirectory + \"/gpt4all.ico\",\n                    \"iconId=0\", \"description=Open GPT4All\");\n            } catch (e) {\n                print(\"ERROR: creating desktop shortcut\" + e);\n            }\n            component.addOperation(\"CreateShortcut\",\n                targetDirectory + \"/bin/chat.exe\",\n                \"@StartMenuDir@/GPT4All.lnk\",\n                \"workingDirectory=\" + targetDirectory + \"/bin\",\n                \"iconPath=\" + targetDirectory + \"/gpt4all.ico\",\n                \"iconId=0\", \"description=Open GPT4All\");\n        } else if (systemInfo.productType === \"macos\") {\n            var gpt4allAppPath = targetDirectory + \"/bin/gpt4all.app\";\n            var symlinkPath = targetDirectory + \"/../GPT4All.app\";\n            // Remove the symlink if it already exists\n            component.addOperation(\"Execute\", \"rm\", \"-f\", symlinkPath);\n            // Create the symlink\n            component.addOperation(\"Execute\", \"ln\", \"-s\", gpt4allAppPath, symlinkPath);\n        } else { // linux\n            var homeDir = installer.environmentVariable(\"HOME\");\n            if (!installer.fileExists(homeDir + \"/Desktop/GPT4All.desktop\")) {\n                component.addOperation(\"CreateDesktopEntry\",\n                    homeDir + \"/Desktop/GPT4All.desktop\",\n                    \"Type=Application\\nTerminal=false\\nExec=\\\"\" + targetDirectory +\n                    \"/bin/chat\\\"\\nName=GPT4All\\nIcon=\" + targetDirectory +\n                    \"/gpt4all-48.png\\nName[en_US]=GPT4All\");\n            }\n        }\n    } catch (e) {\n        print(\"ERROR: running post installscript.qs\" + e);\n    }\n}\n\nComponent.prototype.createOperationsForArchive = function(archive)\n{\n    component.createOperationsForArchive(archive);\n\n    if (systemInfo.productType === \"macos\") {\n        var uninstallTargetDirectory = installer.value(\"TargetDir\");\n        var symlinkPath = uninstallTargetDirectory + \"/../GPT4All.app\";\n\n        // Remove the symlink during uninstallation\n        if (installer.isUninstaller()) {\n            component.addOperation(\"Execute\", \"rm\", \"-f\", symlinkPath, \"UNDOEXECUTE\");\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/cmake/installer_maintenancetool_component.qs",
    "content": "function Component()\n{\n    component.ifwVersion = installer.value(\"FrameworkVersion\");\n    installer.installationStarted.connect(this, Component.prototype.onInstallationStarted);\n}\n\nComponent.prototype.onInstallationStarted = function()\n{\n    if (component.updateRequested() || component.installationRequested()) {\n        if (installer.value(\"os\") == \"win\") {\n            component.installerbaseBinaryPath = \"@TargetDir@/installerbase.exe\";\n        } else if (installer.value(\"os\") == \"x11\") {\n            component.installerbaseBinaryPath = \"@TargetDir@/installerbase\";\n        } else if (installer.value(\"os\") == \"mac\") {\n            component.installerbaseBinaryPath = \"@TargetDir@/MaintenanceTool.app\";\n        }\n        installer.setInstallerBaseBinary(component.installerbaseBinaryPath);\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/cmake/sign_dmg.py",
    "content": "#!/usr/bin/env python3\nimport os\nimport subprocess\nimport tempfile\nimport shutil\nimport click\nimport re\nfrom typing import Optional\n\n# Requires click\n# pip install click\n\n# Example usage\n# python sign_dmg.py --input-dmg /path/to/your/input.dmg --output-dmg /path/to/your/output.dmg --signing-identity \"Developer ID Application: YOUR_NAME (TEAM_ID)\"\n\n# NOTE: This script assumes that you have the necessary Developer ID Application certificate in your\n# Keychain Access and that the codesign and hdiutil command-line tools are available on your system.\n\n@click.command()\n@click.option('--input-dmg', required=True, help='Path to the input DMG file.')\n@click.option('--output-dmg', required=True, help='Path to the output signed DMG file.')\n@click.option('--sha1-hash', help='SHA-1 hash of the Developer ID Application certificate')\n@click.option('--signing-identity', default=None, help='Common name of the Developer ID Application certificate')\n@click.option('--verify', is_flag=True, show_default=True, required=False, default=False, help='Perform verification of signed app bundle' )\ndef sign_dmg(input_dmg: str, output_dmg: str, signing_identity: Optional[str] = None, sha1_hash: Optional[str] = None, verify: Optional[bool] = False) -> None:\n    if not signing_identity and not sha1_hash:\n        print(\"Error: Either --signing-identity or --sha1-hash must be provided.\")\n        exit(1)\n\n    # Mount the input DMG\n    mount_point = tempfile.mkdtemp()\n    subprocess.run(['hdiutil', 'attach', input_dmg, '-mountpoint', mount_point])\n\n    # Copy the contents of the DMG to a temporary folder\n    temp_dir = tempfile.mkdtemp()\n    shutil.copytree(mount_point, os.path.join(temp_dir, 'contents'))\n    subprocess.run(['hdiutil', 'detach', mount_point])\n\n    # Find the .app bundle in the temporary folder\n    app_bundle = None\n    for item in os.listdir(os.path.join(temp_dir, 'contents')):\n        if item.endswith('.app'):\n            app_bundle = os.path.join(temp_dir, 'contents', item)\n            break\n\n    if not app_bundle:\n        print('No .app bundle found in the DMG.')\n        exit(1)\n\n    # Sign the .app bundle\n    try:\n        subprocess.run([\n            'codesign',\n            '--deep',\n            '--force',\n            '--verbose',\n            '--options', 'runtime',\n            '--timestamp',\n            '--sign', sha1_hash or signing_identity,\n            app_bundle\n        ], check=True)\n    except subprocess.CalledProcessError as e:\n        print(f\"Error during codesign: {e}\")\n        # Clean up temporary directories\n        shutil.rmtree(temp_dir)\n        shutil.rmtree(mount_point)\n        exit(1)\n\n    # Validate signature and entitlements of signed app bundle\n    if verify:\n        try:\n            code_ver_proc = subprocess.run([\n                'codesign',\n                '--deep',\n                '--verify',\n                '--verbose=2',\n                '--strict',\n                app_bundle\n            ], check=True, capture_output=True)\n            if not re.search(fr\"{app_bundle}: valid\", code_ver_proc.stdout.decode()):\n                raise RuntimeError(f\"codesign validation failed: {code_ver_proc.stdout.decode()}\")\n        except subprocess.CalledProcessError as e:\n            print(f\"Error during codesign validation: {e}\")\n            # Clean up temporary directories\n            shutil.rmtree(temp_dir)\n            shutil.rmtree(mount_point)\n            exit(1)\n        try:\n            spctl_proc = subprocess.run([\n                'spctl',\n                '-a',\n                '-t',\n                'exec',\n                '-vv',\n                app_bundle\n            ], check=True, capture_output=True)\n            if not re.search(fr\"{app_bundle}: accepted\", spctl_proc.stdout.decode()):\n                raise RuntimeError(f\"spctl validation failed: {spctl_proc.stdout.decode()}\")\n        except subprocess.CalledProcessError as e:\n            print(f\"Error during spctl validation: {e}\")\n            # Clean up temporary directories\n            shutil.rmtree(temp_dir)\n            shutil.rmtree(mount_point)\n            exit(1)\n\n    # Create a new DMG containing the signed .app bundle\n    subprocess.run([\n        'hdiutil', 'create',\n        '-volname', os.path.splitext(os.path.basename(input_dmg))[0],\n        '-srcfolder', os.path.join(temp_dir, 'contents'),\n        '-ov',\n        '-format', 'UDZO',\n        output_dmg\n    ])\n\n    # Clean up temporary directories\n    shutil.rmtree(temp_dir)\n    shutil.rmtree(mount_point)\n\nif __name__ == '__main__':\n    sign_dmg()\n"
  },
  {
    "path": "gpt4all-chat/contributing_translations.md",
    "content": "# Contributing Foreign Language Translations of GPT4All\n\n## Overview\n\nTo contribute foreign language translations to the GPT4All project will require\ninstallation of a graphical tool called Qt Linguist. This tool can be obtained by\ninstalling a subset of Qt. You'll also need to clone this github repository locally\non your filesystem.\n\nOnce this tool is installed you'll be able to use it to load specific translation\nfiles found in the gpt4all github repository and add your foreign language translations.\nOnce you've done this you can contribute back those translations by opening a pull\nrequest on Github or by sharing it with one of the administrators on GPT4All [discord.](https://discord.gg/4M2QFmTt2k)\n\n\n## Download Qt Linguist\n\n- Go to https://login.qt.io/register to create a free Qt account.\n- Download the Qt Online Installer for your OS from here: https://www.qt.io/download-qt-installer-oss\n- Sign into the installer.\n- Agree to the terms of the (L)GPL 3 license.\n- Select whether you would like to send anonymous usage statistics to Qt.\n- On the Installation Folder page, leave the default installation path, and select \"Custom Installation\".\n\n![image](https://github.com/nomic-ai/gpt4all/assets/10168/85234549-1ea7-43c9-87d1-1e4f0fb93d82)\n\n- Under \"Qt\", select the latest Qt 6.x release as well as Developer and Designer Tools\n- NOTE: This will install much more than the Qt Linguist tool and you can deselect portions, but to be\n  safe I've included the easiest steps that will also enable you to build GPT4All from source if you wish.\n\n## Open Qt Linguist\n\nAfter installation you should be able to find the Qt Linguist application in the following locations:\n\n- Windows `C:\\Qt\\6.7.2\\msvc2019_64\\bin\\linguist.exe`\n- macOS `/Users/username/Qt/6.7.2/macos/bin/Linguist.app`\n- Linux `/home/username/Qt/6.7.2/gcc_64/bin/linguist`\n\n![Peek 2024-07-11 10-26](https://github.com/nomic-ai/gpt4all/assets/10168/957de16f-4e23-4d90-9d20-9089d2028aa8)\n\n## After you've opened Qt Linguist\n\n- Navigate to the translation file you're interested in contributing to. This file will be located\n  in the gpt4all `translations` directory found on your local filesystem after you've cloned the\n  gpt4all github repository. It is this folder [gpt4all/gpt4all-chat/translations](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-chat/translations)\n  located on your local filesystem after cloning the repository.\n- If the file does not exist yet for the language you are interested in, then just copy the english one\n  to a new file with appropriate name and edit that.\n\n## How to see your translations in the app as you develop them\n![Peek 2024-07-12 14-22](https://github.com/user-attachments/assets/6ff00338-5b49-4f97-a0d4-de96f3991469)\n- In the same folder that where your models are stored you can add translation files (.ts) and compile them\n  using the command `/path/to/Qt/6.7.2/gcc_64/bin/lrelease gpt4all_{lang}.ts`\n- This should produce a file named `gpt4all_{lang}.qm` in the same folder. Restart GPT4All and you should\n  now be able to see your language in the settings combobox.\n\n  \n## Information on how to use Qt Linguist\n\n- [Manual for translators](https://doc.qt.io/qt-6/linguist-translators.html)\n- [Video explaining how translators use Qt Linguist](https://youtu.be/xNIz78IPBu0?t=351)\n\n## Once you've edited the translations save the file\n- Open a [pull request](https://github.com/nomic-ai/gpt4all/pulls) for your changes.\n- Alternatively, you may share your translation file with one of the administrators on GPT4All [discord.](https://discord.gg/4M2QFmTt2k)\n\n\n# Thank you!\n"
  },
  {
    "path": "gpt4all-chat/deps/CMakeLists.txt",
    "content": "include(FetchContent)\n\n\nset(BUILD_SHARED_LIBS OFF)\n\nset(FMT_INSTALL OFF)\nadd_subdirectory(fmt)\n\nset(QAPPLICATION_CLASS QApplication)\nadd_subdirectory(SingleApplication)\n\nset(DUCKX_INSTALL OFF)\nadd_subdirectory(DuckX)\n\nset(QT_VERSION_MAJOR 6)\nadd_subdirectory(QXlsx/QXlsx)\n\nif (NOT GPT4ALL_USING_QTPDF)\n    # If we do not use QtPDF, we need to get PDFium.\n    set(GPT4ALL_PDFIUM_TAG \"chromium/6996\")\n    if (CMAKE_SYSTEM_NAME MATCHES Linux)\n        FetchContent_Declare(\n            pdfium\n            URL \"https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-linux-x64.tgz\"\n            URL_HASH \"SHA256=68b381b87efed539f2e33ae1e280304c9a42643a878cc296c1d66a93b0cb4335\"\n        )\n    elseif (CMAKE_SYSTEM_NAME MATCHES Windows)\n        if (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(x86_64|AMD64|amd64)$\")\n            FetchContent_Declare(\n                pdfium\n                URL \"https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-x64.tgz\"\n                URL_HASH \"SHA256=83e714c302ceacccf403826d5cb57ea39b77f393d83b8d5781283012774a9378\"\n            )\n        elseif (CMAKE_SYSTEM_PROCESSOR MATCHES \"^(aarch64|AARCH64|arm64|ARM64)$\")\n            FetchContent_Declare(\n                pdfium\n                URL \"https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-arm64.tgz\"\n                URL_HASH \"SHA256=78e77e871453a4915cbf66fb381b951c9932f88a747c6b2b33c9f27ec2371445\"\n            )\n        endif()\n    elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)\n        FetchContent_Declare(\n            pdfium\n            URL \"https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-mac-univ.tgz\"\n            URL_HASH \"SHA256=e7577f3242ff9c1df50025f9615673a43601a201bc51ee4792975f98920793a2\"\n        )\n    endif()\n\n    FetchContent_MakeAvailable(pdfium)\n    find_package(PDFium REQUIRED PATHS \"${pdfium_SOURCE_DIR}\" NO_DEFAULT_PATH)\nendif()\n"
  },
  {
    "path": "gpt4all-chat/dev-requirements.txt",
    "content": "-r test-requirements.txt\n\n# dev tools\nflake8~=7.1\nmypy~=1.12\npytype>=2024.10.11\nwemake-python-styleguide~=0.19.2\n\n# type stubs and other optional modules\ntypes-requests~=2.32\nurllib3[socks]\n"
  },
  {
    "path": "gpt4all-chat/flatpak-manifest/io.gpt4all.gpt4all.appdata.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<component type=\"desktop\">\n    <id>io.gpt4all.gpt4all</id>\n    <metadata_license>CC0-1.0</metadata_license>\n    <project_license>MIT</project_license>\n    <name>GPT4ALL</name>\n    <summary>Open-source assistant</summary>\n    <developer_name>Nomic-ai</developer_name>\n    <description>\n        <p>Cross platform Qt based GUI for GPT4All</p>\n        <ul>\n            <li>Fast CPU and GPU based inference using ggml for open source LLM's</li>\n            <li>The UI is made to look and feel like you've come to expect from a chatty gpt</li>\n            <li>Check for updates so you can always stay fresh with latest models</li>\n            <li>Easy to install with precompiled binaries available for all three major desktop platforms</li>\n            <li>Multi-model - Ability to load more than one model and switch between them</li>\n            <li>Supports llama.cpp style models</li>\n            <li>Model downloader in GUI featuring many popular open source models</li>\n            <li>Settings dialog to change temp, top_p, top_k, threads, etc</li>\n            <li>Copy your conversation to clipboard</li>\n        </ul>\n    </description>\n    <screenshots>\n        <screenshot type=\"default\">\n            <caption>Main Window</caption>\n            \n        </screenshot>\n        <screenshot>\n            \n        </screenshot>\n        <screenshot>\n            \n        </screenshot>\n    </screenshots>\n    <url type=\"homepage\">https://www.nomic.ai/gpt4all</url>\n    <url type=\"bugtracker\">https://github.com/nomic-ai/gpt4all/issues</url>\n    <url type=\"vcs-browser\">https://github.com/nomic-ai/gpt4all</url>\n    <releases>\n      <release version=\"3.1.0\" date=\"2024-07-24\"/>\n      <release version=\"3.0.0\" date=\"2024-07-02\"/>\n      <release version=\"2.7.5\" date=\"2024-05-03\"/>\n    </releases>\n    <launchable type=\"desktop-id\">io.gpt4all.gpt4all.desktop</launchable>\n    <content_rating type=\"oars-1.1\">\n        <content_attribute id=\"language-profanity\">mild</content_attribute>\n        <content_attribute id=\"language-humor\">moderate</content_attribute>\n        <content_attribute id=\"language-discrimination\">mild</content_attribute>\n    </content_rating>\n</component>\n"
  },
  {
    "path": "gpt4all-chat/flatpak-manifest/io.gpt4all.gpt4all.desktop",
    "content": "[Desktop Entry]\nName=GPT4ALL\nGenericName=Open-source assistant-style large language models that run locally on your CPU\nComment=Run any GPT4All model natively on your home desktop with the auto-updating desktop chat client. See GPT4All Website for a full list of open-source models you can run with this powerful desktop application.\nExec=chat\nIcon=io.gpt4all.gpt4all\nType=Application\nCategories=Utility;Office;\nKeywords=GPT,Chat;AI\n"
  },
  {
    "path": "gpt4all-chat/main.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt5Compat.GraphicalEffects\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport localdocs\nimport mysettings\nimport Qt.labs.platform\n\nWindow {\n    id: window\n    width: 1440\n    height: 810\n    minimumWidth: 658 + 470 * theme.fontScale\n    minimumHeight: 384 + 160 * theme.fontScale\n    visible: true\n    title: qsTr(\"GPT4All v%1\").arg(Qt.application.version)\n\n    SystemTrayIcon {\n        id: systemTrayIcon\n        property bool shouldClose: false\n        visible: MySettings.systemTray && !shouldClose\n        icon.source: \"qrc:/gpt4all/icons/gpt4all.svg\"\n\n        function restore() {\n            LLM.showDockIcon();\n            window.show();\n            window.raise();\n            window.requestActivate();\n        }\n        onActivated: function(reason) {\n            if (reason === SystemTrayIcon.Context && Qt.platform.os !== \"osx\")\n                menu.open();\n            else if (reason === SystemTrayIcon.Trigger)\n                restore();\n        }\n\n        menu: Menu {\n            MenuItem {\n                text: qsTr(\"Restore\")\n                onTriggered: systemTrayIcon.restore()\n            }\n            MenuItem {\n                text: qsTr(\"Quit\")\n                onTriggered: {\n                    systemTrayIcon.restore();\n                    systemTrayIcon.shouldClose = true;\n                    window.shouldClose = true;\n                    savingPopup.open();\n                    ChatListModel.saveChatsForQuit();\n                }\n            }\n        }\n    }\n\n    Settings {\n        property alias x: window.x\n        property alias y: window.y\n        property alias width: window.width\n        property alias height: window.height\n    }\n\n    Theme {\n        id: theme\n    }\n\n    Item {\n        Accessible.role: Accessible.Window\n        Accessible.name: title\n    }\n\n    // Startup code\n    Component.onCompleted: {\n        startupDialogs();\n    }\n\n    Component.onDestruction: {\n        Network.trackEvent(\"session_end\")\n    }\n\n    Connections {\n        target: firstStartDialog\n        function onClosed() {\n            startupDialogs();\n        }\n    }\n\n    Connections {\n        target: Download\n        function onHasNewerReleaseChanged() {\n            startupDialogs();\n        }\n    }\n\n    property bool hasCheckedFirstStart: false\n    property bool hasShownSettingsAccess: false\n    property var currentChat: ChatListModel.currentChat\n\n    function startupDialogs() {\n        if (!LLM.compatHardware()) {\n            Network.trackEvent(\"noncompat_hardware\")\n            errorCompatHardware.open();\n            return;\n        }\n\n        // check if we have access to settings and if not show an error\n        if (!hasShownSettingsAccess && !LLM.hasSettingsAccess()) {\n            errorSettingsAccess.open();\n            hasShownSettingsAccess = true;\n            return;\n        }\n\n        // check for first time start of this version\n        if (!hasCheckedFirstStart) {\n            if (Download.isFirstStart(/*writeVersion*/ true)) {\n                firstStartDialog.open();\n                return;\n            }\n\n            // send startup or opt-out now that the user has made their choice\n            Network.sendStartup()\n            // start localdocs\n            LocalDocs.requestStart()\n\n            hasCheckedFirstStart = true\n        }\n\n        // check for new version\n        if (Download.hasNewerRelease && !firstStartDialog.opened) {\n            newVersionDialog.open();\n            return;\n        }\n    }\n\n    PopupDialog {\n        id: errorCompatHardware\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        shouldShowBusy: false\n        closePolicy: Popup.NoAutoClose\n        modal: true\n        text: qsTr(\"<h3>Encountered an error starting up:</h3><br>\"\n              + \"<i>\\\"Incompatible hardware detected.\\\"</i>\"\n              + \"<br><br>Unfortunately, your CPU does not meet the minimal requirements to run \"\n              + \"this program. In particular, it does not support AVX intrinsics which this \"\n              + \"program requires to successfully run a modern large language model. \"\n              + \"The only solution at this time is to upgrade your hardware to a more modern CPU.\"\n              + \"<br><br>See here for more information: <a href=\\\"https://en.wikipedia.org/wiki/Advanced_Vector_Extensions\\\">\"\n              + \"https://en.wikipedia.org/wiki/Advanced_Vector_Extensions</a>\");\n    }\n\n    PopupDialog {\n        id: errorSettingsAccess\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        shouldShowBusy: false\n        modal: true\n        text: qsTr(\"<h3>Encountered an error starting up:</h3><br>\"\n              + \"<i>\\\"Inability to access settings file.\\\"</i>\"\n              + \"<br><br>Unfortunately, something is preventing the program from accessing \"\n              + \"the settings file. This could be caused by incorrect permissions in the local \"\n              + \"app config directory where the settings file is located. \"\n              + \"Check out our <a href=\\\"https://discord.gg/4M2QFmTt2k\\\">discord channel</a> for help.\")\n    }\n\n    StartupDialog {\n        id: firstStartDialog\n        anchors.centerIn: parent\n    }\n\n    NewVersionDialog {\n        id: newVersionDialog\n        anchors.centerIn: parent\n    }\n\n    Connections {\n        target: Network\n        function onHealthCheckFailed(code) {\n            healthCheckFailed.open();\n        }\n    }\n\n    PopupDialog {\n        id: healthCheckFailed\n        anchors.centerIn: parent\n        text: qsTr(\"Connection to datalake failed.\")\n        font.pixelSize: theme.fontSizeLarge\n    }\n\n    property bool shouldClose: false\n\n    PopupDialog {\n        id: savingPopup\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        shouldShowBusy: true\n        text: qsTr(\"Saving chats.\")\n        font.pixelSize: theme.fontSizeLarge\n    }\n\n    NetworkDialog {\n        id: networkDialog\n        anchors.centerIn: parent\n        width: Math.min(1024, window.width - (window.width * .2))\n        height: Math.min(600, window.height - (window.height * .2))\n        Item {\n            Accessible.role: Accessible.Dialog\n            Accessible.name: qsTr(\"Network dialog\")\n            Accessible.description: qsTr(\"opt-in to share feedback/conversations\")\n        }\n    }\n\n    onClosing: function(close) {\n        if (systemTrayIcon.visible) {\n            LLM.hideDockIcon();\n            window.visible = false;\n            ChatListModel.saveChats();\n            close.accepted = false;\n            return;\n        }\n\n        if (window.shouldClose)\n            return;\n\n        window.shouldClose = true;\n        savingPopup.open();\n        ChatListModel.saveChatsForQuit();\n        close.accepted = false;\n    }\n\n    Connections {\n        target: ChatListModel\n        function onSaveChatsFinished() {\n            savingPopup.close();\n            if (window.shouldClose)\n                window.close()\n        }\n    }\n\n    color: theme.viewBarBackground\n\n    Rectangle {\n        id: viewBar\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: parent.left\n        width: 68 * theme.fontScale\n        color: theme.viewBarBackground\n\n        ColumnLayout {\n            id: viewsLayout\n            anchors.top: parent.top\n            anchors.topMargin: 30\n            anchors.horizontalCenter: parent.horizontalCenter\n            Layout.margins: 0\n            spacing: 16\n\n            MyToolButton {\n                id: homeButton\n                backgroundColor: toggled ? theme.iconBackgroundViewBarHovered : theme.iconBackgroundViewBar\n                backgroundColorHovered: theme.iconBackgroundViewBarHovered\n                Layout.preferredWidth: 38 * theme.fontScale\n                Layout.preferredHeight: 38 * theme.fontScale\n                Layout.alignment: Qt.AlignCenter\n                toggledWidth: 0\n                toggled: homeView.isShown()\n                toggledColor: theme.iconBackgroundViewBarToggled\n                imageWidth: 25 * theme.fontScale\n                imageHeight: 25 * theme.fontScale\n                source: \"qrc:/gpt4all/icons/home.svg\"\n                Accessible.name: qsTr(\"Home view\")\n                Accessible.description: qsTr(\"Home view of application\")\n                onClicked: {\n                    homeView.show()\n                }\n            }\n\n            Text {\n                Layout.topMargin: -20\n                text: qsTr(\"Home\")\n                font.pixelSize: theme.fontSizeMedium\n                font.bold: true\n                color: homeButton.hovered ? homeButton.backgroundColorHovered : homeButton.backgroundColor\n                Layout.preferredWidth: 38 * theme.fontScale\n                horizontalAlignment: Text.AlignHCenter\n                TapHandler {\n                    onTapped: function(eventPoint, button) {\n                        homeView.show()\n                    }\n                }\n            }\n\n            MyToolButton {\n                id: chatButton\n                backgroundColor: toggled ? theme.iconBackgroundViewBarHovered : theme.iconBackgroundViewBar\n                backgroundColorHovered: theme.iconBackgroundViewBarHovered\n                Layout.preferredWidth: 38 * theme.fontScale\n                Layout.preferredHeight: 38 * theme.fontScale\n                Layout.alignment: Qt.AlignCenter\n                toggledWidth: 0\n                toggled: chatView.isShown()\n                toggledColor: theme.iconBackgroundViewBarToggled\n                imageWidth: 25 * theme.fontScale\n                imageHeight: 25 * theme.fontScale\n                source: \"qrc:/gpt4all/icons/chat.svg\"\n                Accessible.name: qsTr(\"Chat view\")\n                Accessible.description: qsTr(\"Chat view to interact with models\")\n                onClicked: {\n                    chatView.show()\n                }\n            }\n\n            Text {\n                Layout.topMargin: -20\n                text: qsTr(\"Chats\")\n                font.pixelSize: theme.fontSizeMedium\n                font.bold: true\n                color: chatButton.hovered ? chatButton.backgroundColorHovered : chatButton.backgroundColor\n                Layout.preferredWidth: 38 * theme.fontScale\n                horizontalAlignment: Text.AlignHCenter\n                TapHandler {\n                    onTapped: function(eventPoint, button) {\n                        chatView.show()\n                    }\n                }\n            }\n\n            MyToolButton {\n                id: modelsButton\n                backgroundColor: toggled ? theme.iconBackgroundViewBarHovered : theme.iconBackgroundViewBar\n                backgroundColorHovered: theme.iconBackgroundViewBarHovered\n                Layout.preferredWidth: 38 * theme.fontScale\n                Layout.preferredHeight: 38 * theme.fontScale\n                toggledWidth: 0\n                toggled: modelsView.isShown()\n                toggledColor: theme.iconBackgroundViewBarToggled\n                imageWidth: 25 * theme.fontScale\n                imageHeight: 25 * theme.fontScale\n                source: \"qrc:/gpt4all/icons/models.svg\"\n                Accessible.name: qsTr(\"Models\")\n                Accessible.description: qsTr(\"Models view for installed models\")\n                onClicked: {\n                    modelsView.show()\n                }\n            }\n\n            Text {\n                Layout.topMargin: -20\n                text: qsTr(\"Models\")\n                font.pixelSize: theme.fontSizeMedium\n                font.bold: true\n                color: modelsButton.hovered ? modelsButton.backgroundColorHovered : modelsButton.backgroundColor\n                Layout.preferredWidth: 38 * theme.fontScale\n                horizontalAlignment: Text.AlignHCenter\n                TapHandler {\n                    onTapped: function(eventPoint, button) {\n                        modelsView.show()\n                    }\n                }\n            }\n\n            MyToolButton {\n                id: localdocsButton\n                backgroundColor: toggled ? theme.iconBackgroundViewBarHovered : theme.iconBackgroundViewBar\n                backgroundColorHovered: theme.iconBackgroundViewBarHovered\n                Layout.preferredWidth: 38 * theme.fontScale\n                Layout.preferredHeight: 38 * theme.fontScale\n                toggledWidth: 0\n                toggledColor: theme.iconBackgroundViewBarToggled\n                toggled: localDocsView.isShown()\n                imageWidth: 25 * theme.fontScale\n                imageHeight: 25 * theme.fontScale\n                source: \"qrc:/gpt4all/icons/db.svg\"\n                Accessible.name: qsTr(\"LocalDocs\")\n                Accessible.description: qsTr(\"LocalDocs view to configure and use local docs\")\n                onClicked: {\n                    localDocsView.show()\n                }\n            }\n\n            Text {\n                Layout.topMargin: -20\n                text: qsTr(\"LocalDocs\")\n                font.pixelSize: theme.fontSizeMedium\n                font.bold: true\n                color: localdocsButton.hovered ? localdocsButton.backgroundColorHovered : localdocsButton.backgroundColor\n                Layout.preferredWidth: 38 * theme.fontScale\n                horizontalAlignment: Text.AlignHCenter\n                TapHandler {\n                    onTapped: function(eventPoint, button) {\n                        localDocsView.show()\n                    }\n                }\n            }\n\n            MyToolButton {\n                id: settingsButton\n                backgroundColor: toggled ? theme.iconBackgroundViewBarHovered : theme.iconBackgroundViewBar\n                backgroundColorHovered: theme.iconBackgroundViewBarHovered\n                Layout.preferredWidth: 38 * theme.fontScale\n                Layout.preferredHeight: 38 * theme.fontScale\n                toggledWidth: 0\n                toggledColor: theme.iconBackgroundViewBarToggled\n                toggled: settingsView.isShown()\n                imageWidth: 25 * theme.fontScale\n                imageHeight: 25 * theme.fontScale\n                source: \"qrc:/gpt4all/icons/settings.svg\"\n                Accessible.name: qsTr(\"Settings\")\n                Accessible.description: qsTr(\"Settings view for application configuration\")\n                onClicked: {\n                    settingsView.show(0 /*pageToDisplay*/)\n                }\n            }\n\n            Text {\n                Layout.topMargin: -20\n                text: qsTr(\"Settings\")\n                font.pixelSize: theme.fontSizeMedium\n                font.bold: true\n                color: settingsButton.hovered ? settingsButton.backgroundColorHovered : settingsButton.backgroundColor\n                Layout.preferredWidth: 38 * theme.fontScale\n                horizontalAlignment: Text.AlignHCenter\n                TapHandler {\n                    onTapped: function(eventPoint, button) {\n                        settingsView.show(0 /*pageToDisplay*/)\n                    }\n                }\n            }\n        }\n\n        ColumnLayout {\n            id: buttonsLayout\n            anchors.bottom: parent.bottom\n            anchors.margins: 0\n            anchors.bottomMargin: 25\n            anchors.horizontalCenter: parent.horizontalCenter\n            Layout.margins: 0\n            spacing: 22\n\n            Item {\n                id: antennaItem\n                Layout.alignment: Qt.AlignCenter\n                Layout.preferredWidth: antennaImage.width\n                Layout.preferredHeight: antennaImage.height\n                Image {\n                    id: antennaImage\n                    sourceSize.width: 32\n                    sourceSize.height: 32\n                    visible: false\n                    fillMode: Image.PreserveAspectFit\n                    source: \"qrc:/gpt4all/icons/antenna_3.svg\"\n                }\n\n                ColorOverlay {\n                    id: antennaColored\n                    visible: ModelList.selectableModels.count !== 0 && (currentChat.isServer || currentChat.modelInfo.isOnline || MySettings.networkIsActive)\n                    anchors.fill: antennaImage\n                    source: antennaImage\n                    color: theme.styledTextColor\n                    ToolTip.text: {\n                        if (MySettings.networkIsActive)\n                            return qsTr(\"The datalake is enabled\")\n                        else if (currentChat.modelInfo.isOnline)\n                            return qsTr(\"Using a network model\")\n                        else if (currentChat.isServer)\n                            return qsTr(\"Server mode is enabled\")\n                        return \"\"\n                    }\n                    ToolTip.visible: maAntenna.containsMouse\n                    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n                    MouseArea {\n                        id: maAntenna\n                        anchors.fill: antennaColored\n                        hoverEnabled: true\n                    }\n                }\n\n                SequentialAnimation {\n                    running: true\n                    loops: Animation.Infinite\n\n                    PropertyAnimation {\n                        target: antennaImage\n                        property: \"source\"\n                        duration: 500\n                        from: \"qrc:/gpt4all/icons/antenna_1.svg\"\n                        to: \"qrc:/gpt4all/icons/antenna_2.svg\"\n                    }\n\n                    PauseAnimation {\n                        duration: 1500\n                    }\n\n                    PropertyAnimation {\n                        target: antennaImage\n                        property: \"source\"\n                        duration: 500\n                        from: \"qrc:/gpt4all/icons/antenna_2.svg\"\n                        to: \"qrc:/gpt4all/icons/antenna_3.svg\"\n                    }\n\n                    PauseAnimation {\n                        duration: 1500\n                    }\n\n                    PropertyAnimation {\n                        target: antennaImage\n                        property: \"source\"\n                        duration: 500\n                        from: \"qrc:/gpt4all/icons/antenna_3.svg\"\n                        to: \"qrc:/gpt4all/icons/antenna_2.svg\"\n                    }\n\n                    PauseAnimation {\n                        duration: 1500\n                    }\n\n                    PropertyAnimation {\n                        target: antennaImage\n                        property: \"source\"\n                        duration: 1500\n                        from: \"qrc:/gpt4all/icons/antenna_2.svg\"\n                        to: \"qrc:/gpt4all/icons/antenna_1.svg\"\n                    }\n\n                    PauseAnimation {\n                        duration: 500\n                    }\n                }\n            }\n\n            Rectangle {\n                Layout.alignment: Qt.AlignCenter\n                Layout.preferredWidth: image.width\n                Layout.preferredHeight: image.height\n                color: \"transparent\"\n\n                Image {\n                    id: image\n                    anchors.centerIn: parent\n                    sourceSize: Qt.size(48 * theme.fontScale, 32 * theme.fontScale)\n                    fillMode: Image.PreserveAspectFit\n                    mipmap: true\n                    visible: false\n                    source: \"qrc:/gpt4all/icons/nomic_logo.svg\"\n                }\n\n                ColorOverlay {\n                    anchors.fill: image\n                    source: image\n                    color: image.hovered ? theme.mutedDarkTextColorHovered : theme.mutedDarkTextColor\n                    TapHandler {\n                        onTapped: function(eventPoint, button) {\n                            Qt.openUrlExternally(\"https://nomic.ai\")\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    Rectangle {\n        id: roundedFrame\n        z: 299\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: viewBar.right\n        anchors.right: parent.right\n        anchors.topMargin: 15\n        anchors.bottomMargin: 15\n        anchors.rightMargin: 15\n        radius: 15\n        border.width: 1\n        border.color: theme.dividerColor\n        color: \"transparent\"\n        clip: true\n    }\n\n    RectangularGlow {\n        id: effect\n        anchors.fill: roundedFrame\n        glowRadius: 15\n        spread: 0\n        color: theme.dividerColor\n        cornerRadius: 10\n        opacity: 0.5\n    }\n\n    StackLayout {\n        id: stackLayout\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: viewBar.right\n        anchors.right: parent.right\n        anchors.topMargin: 15\n        anchors.bottomMargin: 15\n        anchors.rightMargin: 15\n\n        layer.enabled: true\n        layer.effect: OpacityMask {\n            maskSource: Rectangle {\n                width: roundedFrame.width\n                height: roundedFrame.height\n                radius: 15\n            }\n        }\n\n        HomeView {\n            id: homeView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            shouldShowFirstStart: !hasCheckedFirstStart\n\n            function show() {\n                stackLayout.currentIndex = 0;\n            }\n\n            function isShown() {\n                return stackLayout.currentIndex === 0\n            }\n\n            Connections {\n                target: homeView\n                function onChatViewRequested() {\n                    chatView.show();\n                }\n                function onLocalDocsViewRequested() {\n                    localDocsView.show();\n                }\n                function onAddModelViewRequested() {\n                    addModelView.show();\n                }\n                function onSettingsViewRequested(page) {\n                    settingsView.show(page);\n                }\n            }\n        }\n\n        ChatView {\n            id: chatView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show() {\n                stackLayout.currentIndex = 1;\n            }\n\n            function isShown() {\n                return stackLayout.currentIndex === 1\n            }\n\n            Connections {\n                target: chatView\n                function onAddCollectionViewRequested() {\n                    addCollectionView.show();\n                }\n                function onAddModelViewRequested() {\n                    addModelView.show();\n                }\n            }\n        }\n\n        ModelsView {\n            id: modelsView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show() {\n                stackLayout.currentIndex = 2;\n            }\n\n            function isShown() {\n                return stackLayout.currentIndex === 2\n            }\n\n            Item {\n                Accessible.name: qsTr(\"Installed models\")\n                Accessible.description: qsTr(\"View of installed models\")\n            }\n\n            Connections {\n                target: modelsView\n                function onAddModelViewRequested() {\n                    addModelView.show();\n                }\n            }\n        }\n\n        LocalDocsView {\n            id: localDocsView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show() {\n                stackLayout.currentIndex = 3;\n            }\n\n            function isShown() {\n                return stackLayout.currentIndex === 3\n            }\n\n            Connections {\n                target: localDocsView\n                function onAddCollectionViewRequested() {\n                    addCollectionView.show();\n                }\n            }\n        }\n\n        SettingsView {\n            id: settingsView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show(page) {\n                settingsView.pageToDisplay = page;\n                stackLayout.currentIndex = 4;\n            }\n\n            function isShown() {\n                return stackLayout.currentIndex === 4\n            }\n        }\n\n        AddCollectionView {\n            id: addCollectionView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show() {\n                stackLayout.currentIndex = 5;\n            }\n            function isShown() {\n                return stackLayout.currentIndex === 5\n            }\n\n            Connections {\n                target: addCollectionView\n                function onLocalDocsViewRequested() {\n                    localDocsView.show();\n                }\n            }\n        }\n\n        AddModelView {\n            id: addModelView\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            function show() {\n                stackLayout.currentIndex = 6;\n            }\n            function isShown() {\n                return stackLayout.currentIndex === 6\n            }\n\n            Connections {\n                target: addModelView\n                function onModelsViewRequested() {\n                    modelsView.show();\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/metadata/latestnews.md",
    "content": "## Latest News\n\nGPT4All v3.10.0 was released on February 24th. Changes include:\n\n* **Remote Models:**\n  * The Add Model page now has a dedicated tab for remote model providers.\n  * Groq, OpenAI, and Mistral remote models are now easier to configure.\n* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.\n* **New Model:** The non-MoE Granite model is now supported.\n* **Translation Updates:**\n  * The Italian translation has been updated.\n  * The Simplified Chinese translation has been significantly improved.\n* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.\n* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.\n* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.\n"
  },
  {
    "path": "gpt4all-chat/metadata/models.json",
    "content": "[\n  {\n    \"order\": \"a\",\n    \"md5sum\": \"e8d47924f433bd561cb5244557147793\",\n    \"name\": \"Wizard v1.1\",\n    \"filename\": \"wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin\",\n    \"filesize\": \"7323310848\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Best overall model</strong><br><ul><li>Instruction based<li>Gives very long responses<li>Finetuned with only 1k of high-quality data<li>Trained by Microsoft and Peking University<li>Cannot be used commercially</ul\"\n  },\n  {\n    \"order\": \"b\",\n    \"md5sum\": \"725f148218a65ce8ebcc724e52f31b49\",\n    \"name\": \"GPT4All Falcon\",\n    \"filename\": \"ggml-model-gpt4all-falcon-q4_0.bin\",\n    \"filesize\": \"4061641216\",\n    \"requires\": \"2.4.9\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Falcon\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Best overall smaller model</strong><br><ul><li>Fast responses</li><li>Instruction based</li><li>Trained by TII<li>Finetuned by Nomic AI<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n### Response:\\n\"\n  },\n  {\n    \"order\": \"c\",\n    \"md5sum\": \"4acc146dd43eb02845c233c29289c7c5\",\n    \"name\": \"Hermes\",\n    \"filename\": \"nous-hermes-13b.ggmlv3.q4_0.bin\",\n    \"filesize\": \"8136777088\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Extremely good model</strong><br><ul><li>Instruction based<li>Gives long responses<li>Curated with 300,000 uncensored instructions<li>Trained by Nous Research<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n### Response:\\n\"\n  },\n  {\n    \"order\": \"f\",\n    \"md5sum\": \"11d9f060ca24575a2c303bdc39952486\",\n    \"name\": \"Snoozy\",\n    \"filename\": \"GPT4All-13B-snoozy.ggmlv3.q4_0.bin\",\n    \"filesize\": \"8136770688\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Very good overall model</strong><br><ul><li>Instruction based<li>Based on the same dataset as Groovy<li>Slower than Groovy, with higher quality responses<li>Trained by Nomic AI<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin\"\n  },\n  {\n    \"order\": \"h\",\n    \"md5sum\": \"e64e74375ce9d36a3d0af3db1523fd0a\",\n    \"name\": \"Mini Orca\",\n    \"filename\": \"orca-mini-7b.ggmlv3.q4_0.bin\",\n    \"filesize\": \"3791749248\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"OpenLLaMa\",\n    \"description\": \"<strong>New model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/orca_mini_7B-GGML/resolve/main/orca-mini-7b.ggmlv3.q4_0.bin\",\n    \"promptTemplate\": \"### User:\\n%1\\n### Response:\\n\",\n    \"systemPrompt\": \"### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n\"\n  },\n  {\n    \"order\": \"i\",\n    \"md5sum\": \"6a087f7f4598fad0bb70e6cb4023645e\",\n    \"name\": \"Mini Orca (Small)\",\n    \"filename\": \"orca-mini-3b.ggmlv3.q4_0.bin\",\n    \"filesize\": \"1928446208\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"OpenLLaMa\",\n    \"description\": \"<strong>Small version of new model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin\",\n    \"promptTemplate\": \"### User:\\n%1\\n### Response:\\n\",\n    \"systemPrompt\": \"### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n\"\n  },\n  {\n    \"order\": \"j\",\n    \"md5sum\": \"959b7f65b2d12fd1e3ff99e7493c7a3a\",\n    \"name\": \"Mini Orca (Large)\",\n    \"filename\": \"orca-mini-13b.ggmlv3.q4_0.bin\",\n    \"filesize\": \"7323329152\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"OpenLLaMa\",\n    \"description\": \"<strong>Largest version of new model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/orca_mini_13B-GGML/resolve/main/orca-mini-13b.ggmlv3.q4_0.bin\",\n    \"promptTemplate\": \"### User:\\n%1\\n### Response:\\n\",\n    \"systemPrompt\": \"### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n\"\n  },\n  {\n    \"order\": \"r\",\n    \"md5sum\": \"489d21fd48840dcb31e5f92f453f3a20\",\n    \"name\": \"Wizard Uncensored\",\n    \"filename\": \"wizardLM-13B-Uncensored.ggmlv3.q4_0.bin\",\n    \"filesize\": \"8136777088\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Trained on uncensored assistant data and instruction data</strong><br><ul><li>Instruction based<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin\"\n  },\n  {\n    \"order\": \"s\",\n    \"md5sum\": \"615890cb571fcaa0f70b2f8d15ef809e\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Replit\",\n    \"filename\": \"ggml-replit-code-v1-3b.bin\",\n    \"filesize\": \"5202046853\",\n    \"requires\": \"2.4.7\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"f16\",\n    \"type\": \"Replit\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin\"\n  },\n  {\n    \"order\": \"t\",\n    \"md5sum\": \"031bb5d5722c08d13e3e8eaf55c37391\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Bert\",\n    \"filename\": \"ggml-all-MiniLM-L6-v2-f16.bin\",\n    \"filesize\": \"45521167\",\n    \"requires\": \"2.4.14\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"1 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Sbert</strong><br><ul><li>For embeddings\"\n  },\n  {\n    \"order\": \"u\",\n    \"md5sum\": \"379ee1bab9a7a9c27c2314daa097528e\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Starcoder (Small)\",\n    \"filename\": \"starcoderbase-3b-ggml.bin\",\n    \"filesize\": \"7503121552\",\n    \"requires\": \"2.4.14\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"f16\",\n    \"type\": \"Starcoder\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based</ul>\"\n  },\n  {\n    \"order\": \"w\",\n    \"md5sum\": \"f981ab8fbd1ebbe4932ddd667c108ba7\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Starcoder\",\n    \"filename\": \"starcoderbase-7b-ggml.bin\",\n    \"filesize\": \"17860448016\",\n    \"requires\": \"2.4.14\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"f16\",\n    \"type\": \"Starcoder\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based</ul>\"\n  },\n  {\n    \"order\": \"w\",\n    \"md5sum\": \"c7ebc61eec1779bddae1f2bcbf2007cc\",\n    \"name\": \"Llama-2-7B Chat\",\n    \"filename\": \"llama-2-7b-chat.ggmlv3.q4_0.bin\",\n    \"filesize\": \"3791725184\",\n    \"requires\": \"2.4.14\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"description\": \"<strong>New LLaMA2 model from Meta AI.</strong><br><ul><li>Fine-tuned for dialogue.<li>static model trained on an offline dataset<li>RLHF dataset<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_0.bin\",\n    \"promptTemplate\": \"[INST] %1 [/INST] \",\n    \"systemPrompt\": \"[INST]<<SYS>>You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.<</SYS>>[/INST] \"\n  }\n]\n"
  },
  {
    "path": "gpt4all-chat/metadata/models2.json",
    "content": "[\n  {\n    \"order\": \"a\",\n    \"md5sum\": \"f692417a22405d80573ac10cb0cd6c6a\",\n    \"name\": \"Mistral OpenOrca\",\n    \"filename\": \"mistral-7b-openorca.gguf2.Q4_0.gguf\",\n    \"filesize\": \"4108928128\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Best overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by Mistral AI<li>Finetuned on OpenOrca dataset curated via <a href=\\\"https://atlas.nomic.ai/\\\">Nomic Atlas</a><li>Licensed for commercial use</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n\",\n    \"systemPrompt\": \"<|im_start|>system\\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\\n<|im_end|>\"\n  },\n  {\n    \"order\": \"b\",\n    \"md5sum\": \"97463be739b50525df56d33b26b00852\",\n    \"name\": \"Mistral Instruct\",\n    \"filename\": \"mistral-7b-instruct-v0.1.Q4_0.gguf\",\n    \"filesize\": \"4108916384\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Best overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf\",\n    \"promptTemplate\": \"[INST] %1 [/INST]\"\n  },\n  {\n    \"order\": \"c\",\n    \"md5sum\": \"c4c78adf744d6a20f05c8751e3961b84\",\n    \"name\": \"GPT4All Falcon\",\n    \"filename\": \"gpt4all-falcon-newbpe-q4_0.gguf\",\n    \"filesize\": \"4210994112\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Falcon\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Very fast model with good quality</strong><br><ul><li>Fastest responses</li><li>Instruction based</li><li>Trained by TII<li>Finetuned by Nomic AI<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n### Response:\\n\"\n  },\n  {\n    \"order\": \"e\",\n    \"md5sum\": \"00c8593ba57f5240f59662367b3ed4a5\",\n    \"name\": \"Orca 2 (Medium)\",\n    \"filename\": \"orca-2-7b.Q4_0.gguf\",\n    \"filesize\": \"3825824192\",\n    \"requires\": \"2.5.2\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<ul><li>Instruction based<li>Trained by Microsoft<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-2-7b.Q4_0.gguf\"\n  },\n  {\n    \"order\": \"f\",\n    \"md5sum\": \"3c0d63c4689b9af7baa82469a6f51a19\",\n    \"name\": \"Orca 2 (Full)\",\n    \"filename\": \"orca-2-13b.Q4_0.gguf\",\n    \"filesize\": \"7365856064\",\n    \"requires\": \"2.5.2\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<ul><li>Instruction based<li>Trained by Microsoft<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-2-13b.Q4_0.gguf\"\n  },\n  {\n    \"order\": \"g\",\n    \"md5sum\": \"5aff90007499bce5c64b1c0760c0b186\",\n    \"name\": \"Wizard v1.2\",\n    \"filename\": \"wizardlm-13b-v1.2.Q4_0.gguf\",\n    \"filesize\": \"7365834624\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Best overall larger model</strong><br><ul><li>Instruction based<li>Gives very long responses<li>Finetuned with only 1k of high-quality data<li>Trained by Microsoft and Peking University<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf\"\n  },\n  {\n    \"order\": \"h\",\n    \"md5sum\": \"3d12810391d04d1153b692626c0c6e16\",\n    \"name\": \"Hermes\",\n    \"filename\": \"nous-hermes-llama2-13b.Q4_0.gguf\",\n    \"filesize\": \"7366062080\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Extremely good model</strong><br><ul><li>Instruction based<li>Gives long responses<li>Curated with 300,000 uncensored instructions<li>Trained by Nous Research<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/nous-hermes-llama2-13b.Q4_0.gguf\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n### Response:\\n\"\n  },\n  {\n    \"order\": \"i\",\n    \"md5sum\": \"40388eb2f8d16bb5d08c96fdfaac6b2c\",\n    \"name\": \"Snoozy\",\n    \"filename\": \"gpt4all-13b-snoozy-q4_0.gguf\",\n    \"filesize\": \"7365834624\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>Very good overall model</strong><br><ul><li>Instruction based<li>Based on the same dataset as Groovy<li>Slower than Groovy, with higher quality responses<li>Trained by Nomic AI<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf\"\n  },\n  {\n    \"order\": \"j\",\n    \"md5sum\": \"15dcb4d7ea6de322756449c11a0b7545\",\n    \"name\": \"MPT Chat\",\n    \"filename\": \"mpt-7b-chat-newbpe-q4_0.gguf\",\n    \"filesize\": \"3912373472\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"MPT\",\n    \"description\": \"<strong>Good model with novel architecture</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mpt-7b-chat-newbpe-q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n\",\n    \"systemPrompt\": \"<|im_start|>system\\n- You are a helpful assistant chatbot trained by MosaicML.\\n- You answer questions.\\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\"\n  },\n  {\n    \"order\": \"k\",\n    \"md5sum\": \"0e769317b90ac30d6e09486d61fefa26\",\n    \"name\": \"Mini Orca (Small)\",\n    \"filename\": \"orca-mini-3b-gguf2-q4_0.gguf\",\n    \"filesize\": \"1979946720\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"OpenLLaMa\",\n    \"description\": \"<strong>Small version of new model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf\",\n    \"promptTemplate\": \"### User:\\n%1\\n### Response:\\n\",\n    \"systemPrompt\": \"### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n\"\n  },\n  {\n    \"order\": \"l\",\n    \"md5sum\": \"c232f17e09bca4b7ee0b5b1f4107c01e\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Replit\",\n    \"filename\": \"replit-code-v1_5-3b-newbpe-q4_0.gguf\",\n    \"filesize\": \"1953055104\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Replit\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>Licensed for commercial use<li>WARNING: Not available for chat GUI</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/replit-code-v1_5-3b-newbpe-q4_0.gguf\"\n  },\n  {\n    \"order\": \"m\",\n    \"md5sum\": \"70841751ccd95526d3dcfa829e11cd4c\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Starcoder\",\n    \"filename\": \"starcoder-newbpe-q4_0.gguf\",\n    \"filesize\": \"8987411904\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Starcoder\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>WARNING: Not available for chat GUI</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/starcoder-newbpe-q4_0.gguf\"\n  },\n  {\n    \"order\": \"n\",\n    \"md5sum\": \"e973dd26f0ffa6e46783feaea8f08c83\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Rift coder\",\n    \"filename\": \"rift-coder-v0-7b-q4_0.gguf\",\n    \"filesize\": \"3825903776\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \" \",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on collection of Python and TypeScript</strong><br><ul><li>Code completion based<li>WARNING: Not available for chat GUI</li>\",\n    \"url\": \"https://gpt4all.io/models/gguf/rift-coder-v0-7b-q4_0.gguf\"\n  },\n  {\n    \"order\": \"o\",\n    \"md5sum\": \"e479e6f38b59afc51a470d1953a6bfc7\",\n    \"disableGUI\": \"true\",\n    \"name\": \"SBert\",\n    \"filename\": \"all-MiniLM-L6-v2-f16.gguf\",\n    \"filesize\": \"45887744\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"40 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"systemPrompt\": \" \",\n    \"description\": \"<strong>LocalDocs text embeddings model</strong><br><ul><li>For use with LocalDocs feature<li>Used for retrieval augmented generation (RAG)\",\n    \"url\": \"https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf\"\n  },\n  {\n    \"order\": \"p\",\n    \"md5sum\": \"919de4dd6f25351bcb0223790db1932d\",\n    \"name\": \"EM German Mistral\",\n    \"filename\": \"em_german_mistral_v01.Q4_0.gguf\",\n    \"filesize\": \"4108916352\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Mistral-based model for German-language applications</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by ellamind<li>Finetuned on German instruction and chat data</a><li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf\",\n    \"promptTemplate\": \"USER: %1 ASSISTANT: \",\n    \"systemPrompt\": \"Du bist ein hilfreicher Assistent. \"\n  }\n]\n"
  },
  {
    "path": "gpt4all-chat/metadata/models3.json",
    "content": "[\n  {\n    \"order\": \"a\",\n    \"md5sum\": \"a54c08a7b90e4029a8c2ab5b5dc936aa\",\n    \"name\": \"Reasoner v1\",\n    \"filename\": \"qwen2.5-coder-7b-instruct-q4_0.gguf\",\n    \"filesize\": \"4431390720\",\n    \"requires\": \"3.6.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"8 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"qwen2\",\n    \"description\": \"<ul><li>Based on <a href=\\\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct\\\">Qwen2.5-Coder 7B</a></li><li>Uses built-in javascript code interpreter</li><li>Use for complex reasoning tasks that can be aided by computation analysis</li><li>License: <a href=\\\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct/blob/main/LICENSE\\\">Apache License Version 2.0</a></li><li>#reasoning</li></ul>\",\n    \"url\": \"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_0.gguf\",\n    \"chatTemplate\": \"{{- '<|im_start|>system\\\\n' }}\\n{% if toolList|length > 0 %}You have access to the following functions:\\n{% for tool in toolList %}\\nUse the function '{{tool.function}}' to: '{{tool.description}}'\\n{% if tool.parameters|length > 0 %}\\nparameters:\\n{% for info in tool.parameters %}\\n  {{info.name}}:\\n    type: {{info.type}}\\n    description: {{info.description}}\\n    required: {{info.required}}\\n{% endfor %}\\n{% endif %}\\n# Tool Instructions\\nIf you CHOOSE to call this function ONLY reply with the following format:\\n'{{tool.symbolicFormat}}'\\nHere is an example. If the user says, '{{tool.examplePrompt}}', then you reply\\n'{{tool.exampleCall}}'\\nAfter the result you might reply with, '{{tool.exampleReply}}'\\n{% endfor %}\\nYou MUST include both the start and end tags when you use a function.\\n\\nYou are a helpful AI assistant who uses the functions to break down, analyze, perform, and verify complex reasoning tasks. You SHOULD try to verify your answers using the functions where possible.\\n{% endif %}\\n{{- '<|im_end|>\\\\n' }}\\n{% for message in messages %}\\n{{'<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{% endfor %}\\n{% if add_generation_prompt %}\\n{{ '<|im_start|>assistant\\\\n' }}\\n{% endif %}\\n\",\n    \"systemPrompt\": \"\"\n  },\n  {\n    \"order\": \"aa\",\n    \"md5sum\": \"c87ad09e1e4c8f9c35a5fcef52b6f1c9\",\n    \"name\": \"Llama 3 8B Instruct\",\n    \"filename\": \"Meta-Llama-3-8B-Instruct.Q4_0.gguf\",\n    \"filesize\": \"4661724384\",\n    \"requires\": \"2.7.1\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"8 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA3\",\n    \"description\": \"<ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in Llama 3 format</li><li>Trained by Meta</li><li>License: <a href=\\\"https://llama.meta.com/llama3/license/\\\">Meta Llama 3 Community License</a></li></ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf\",\n    \"promptTemplate\": \"<|start_header_id|>user<|end_header_id|>\\n\\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n%2<|eot_id|>\",\n    \"systemPrompt\": \"\",\n    \"chatTemplate\": \"{%- set loop_messages = messages %}\\n{%- for message in loop_messages %}\\n    {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\\\n\\\\n'+ message['content'] | trim + '<|eot_id|>' %}\\n    {{- content }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\\\n\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"aa1\",\n    \"sha256sum\": \"5cd4ee65211770f1d99b4f6f4951780b9ef40e29314bd6542bb5bd0ad0bc29d1\",\n    \"name\": \"DeepSeek-R1-Distill-Qwen-7B\",\n    \"filename\": \"DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf\",\n    \"filesize\": \"4444121056\",\n    \"requires\": \"3.8.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"deepseek\",\n    \"description\": \"<p>The official Qwen2.5-Math-7B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\\\"https://opensource.org/license/mit\\\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf\",\n    \"chatTemplate\": \"{%- if not add_generation_prompt is defined %}\\n    {%- set add_generation_prompt = false %}\\n{%- endif %}\\n{%- if messages[0]['role'] == 'system' %}\\n    {{- messages[0]['content'] }}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if message['role'] == 'user' %}\\n        {{- '<｜User｜>' + message['content'] }}\\n    {%- endif %}\\n    {%- if message['role'] == 'assistant' %}\\n        {%- set content = message['content'] | regex_replace('^[\\\\\\\\s\\\\\\\\S]*</think>', '') %}\\n        {{- '<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>' }}\\n    {%- endif %}\\n{%- endfor -%}\\n{%- if add_generation_prompt %}\\n    {{- '<｜Assistant｜>' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"aa2\",\n    \"sha256sum\": \"906b3382f2680f4ce845459b4a122e904002b075238080307586bcffcde49eef\",\n    \"name\": \"DeepSeek-R1-Distill-Qwen-14B\",\n    \"filename\": \"DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf\",\n    \"filesize\": \"8544267680\",\n    \"requires\": \"3.8.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"14 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"deepseek\",\n    \"description\": \"<p>The official Qwen2.5-14B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\\\"https://opensource.org/license/mit\\\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf\",\n    \"chatTemplate\": \"{%- if not add_generation_prompt is defined %}\\n    {%- set add_generation_prompt = false %}\\n{%- endif %}\\n{%- if messages[0]['role'] == 'system' %}\\n    {{- messages[0]['content'] }}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if message['role'] == 'user' %}\\n        {{- '<｜User｜>' + message['content'] }}\\n    {%- endif %}\\n    {%- if message['role'] == 'assistant' %}\\n        {%- set content = message['content'] | regex_replace('^[\\\\\\\\s\\\\\\\\S]*</think>', '') %}\\n        {{- '<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>' }}\\n    {%- endif %}\\n{%- endfor -%}\\n{%- if add_generation_prompt %}\\n    {{- '<｜Assistant｜>' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"aa3\",\n    \"sha256sum\": \"0eb93e436ac8beec18aceb958c120d282cb2cf5451b23185e7be268fe9d375cc\",\n    \"name\": \"DeepSeek-R1-Distill-Llama-8B\",\n    \"filename\": \"DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf\",\n    \"filesize\": \"4675894112\",\n    \"requires\": \"3.8.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"8 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"deepseek\",\n    \"description\": \"<p>The official Llama-3.1-8B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\\\"https://opensource.org/license/mit\\\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf\",\n    \"chatTemplate\": \"{%- if not add_generation_prompt is defined %}\\n    {%- set add_generation_prompt = false %}\\n{%- endif %}\\n{%- if messages[0]['role'] == 'system' %}\\n    {{- messages[0]['content'] }}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if message['role'] == 'user' %}\\n        {{- '<｜User｜>' + message['content'] }}\\n    {%- endif %}\\n    {%- if message['role'] == 'assistant' %}\\n        {%- set content = message['content'] | regex_replace('^[\\\\\\\\s\\\\\\\\S]*</think>', '') %}\\n        {{- '<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>' }}\\n    {%- endif %}\\n{%- endfor -%}\\n{%- if add_generation_prompt %}\\n    {{- '<｜Assistant｜>' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"aa4\",\n    \"sha256sum\": \"b3af887d0a015b39fab2395e4faf682c1a81a6a3fd09a43f0d4292f7d94bf4d0\",\n    \"name\": \"DeepSeek-R1-Distill-Qwen-1.5B\",\n    \"filename\": \"DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf\",\n    \"filesize\": \"1068807776\",\n    \"requires\": \"3.8.0\",\n    \"ramrequired\": \"3\",\n    \"parameters\": \"1.5 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"deepseek\",\n    \"description\": \"<p>The official Qwen2.5-Math-1.5B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\\\"https://opensource.org/license/mit\\\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf\",\n    \"chatTemplate\": \"{%- if not add_generation_prompt is defined %}\\n    {%- set add_generation_prompt = false %}\\n{%- endif %}\\n{%- if messages[0]['role'] == 'system' %}\\n    {{- messages[0]['content'] }}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if message['role'] == 'user' %}\\n        {{- '<｜User｜>' + message['content'] }}\\n    {%- endif %}\\n    {%- if message['role'] == 'assistant' %}\\n        {%- set content = message['content'] | regex_replace('^[\\\\\\\\s\\\\\\\\S]*</think>', '') %}\\n        {{- '<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>' }}\\n    {%- endif %}\\n{%- endfor -%}\\n{%- if add_generation_prompt %}\\n    {{- '<｜Assistant｜>' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"b\",\n    \"md5sum\": \"27b44e8ae1817525164ddf4f8dae8af4\",\n    \"name\": \"Llama 3.2 3B Instruct\",\n    \"filename\": \"Llama-3.2-3B-Instruct-Q4_0.gguf\",\n    \"filesize\": \"1921909280\",\n    \"requires\": \"3.4.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA3\",\n    \"description\": \"<ul><li>Fast responses</li><li>Instruct model</li><li>Multilingual dialogue use</li><li>Agentic system capable</li><li>Trained by Meta</li><li>License: <a href=\\\"https://llama.meta.com/llama3_2/license/\\\">Meta Llama 3.2 Community License</a></li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_0.gguf\",\n    \"promptTemplate\": \"<|start_header_id|>user<|end_header_id|>\\n\\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n%2\",\n    \"systemPrompt\": \"<|start_header_id|>system<|end_header_id|>\\nCutting Knowledge Date: December 2023\\n\\nYou are a helpful assistant.<|eot_id|>\",\n    \"chatTemplate\": \"{{- bos_token }}\\n{%- set date_string = strftime_now('%d %b %Y') %}\\n\\n{#- This block extracts the system message, so we can slot it into the right place. #}\\n{%- if messages[0]['role'] == 'system' %}\\n    {%- set system_message = messages[0]['content'] | trim %}\\n    {%- set loop_start = 1 %}\\n{%- else %}\\n    {%- set system_message = '' %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n\\n{#- System message #}\\n{{- '<|start_header_id|>system<|end_header_id|>\\\\n\\\\n' }}\\n{{- 'Cutting Knowledge Date: December 2023\\\\n' }}\\n{{- 'Today Date: ' + date_string + '\\\\n\\\\n' }}\\n{{- system_message }}\\n{{- '<|eot_id|>' }}\\n\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\\\n\\\\n' + message['content'] | trim + '<|eot_id|>' }}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\\\n\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"c\",\n    \"md5sum\": \"48ff0243978606fdba19d899b77802fc\",\n    \"name\": \"Llama 3.2 1B Instruct\",\n    \"filename\": \"Llama-3.2-1B-Instruct-Q4_0.gguf\",\n    \"filesize\": \"773025920\",\n    \"requires\": \"3.4.0\",\n    \"ramrequired\": \"2\",\n    \"parameters\": \"1 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA3\",\n    \"description\": \"<ul><li>Fast responses</li><li>Instruct model</li><li>Multilingual dialogue use</li><li>Agentic system capable</li><li>Trained by Meta</li><li>License: <a href=\\\"https://llama.meta.com/llama3_2/license/\\\">Meta Llama 3.2 Community License</a></li></ul>\",\n    \"url\": \"https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf\",\n    \"promptTemplate\": \"<|start_header_id|>user<|end_header_id|>\\n\\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n%2\",\n    \"systemPrompt\": \"<|start_header_id|>system<|end_header_id|>\\nCutting Knowledge Date: December 2023\\n\\nYou are a helpful assistant.<|eot_id|>\",\n    \"chatTemplate\": \"{{- bos_token }}\\n{%- set date_string = strftime_now('%d %b %Y') %}\\n\\n{#- This block extracts the system message, so we can slot it into the right place. #}\\n{%- if messages[0]['role'] == 'system' %}\\n    {%- set system_message = messages[0]['content'] | trim %}\\n    {%- set loop_start = 1 %}\\n{%- else %}\\n    {%- set system_message = '' %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n\\n{#- System message #}\\n{{- '<|start_header_id|>system<|end_header_id|>\\\\n\\\\n' }}\\n{{- 'Cutting Knowledge Date: December 2023\\\\n' }}\\n{{- 'Today Date: ' + date_string + '\\\\n\\\\n' }}\\n{{- system_message }}\\n{{- '<|eot_id|>' }}\\n\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\\\n\\\\n' + message['content'] | trim + '<|eot_id|>' }}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\\\n\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"d\",\n    \"md5sum\": \"a5f6b4eabd3992da4d7fb7f020f921eb\",\n    \"name\": \"Nous Hermes 2 Mistral DPO\",\n    \"filename\": \"Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\",\n    \"filesize\": \"4108928000\",\n    \"requires\": \"2.7.1\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Good overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in ChatML format</li><li>Trained by Mistral AI<li>Finetuned by Nous Research on the OpenHermes-2.5 dataset<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n%2<|im_end|>\\n\",\n    \"systemPrompt\": \"\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"e\",\n    \"md5sum\": \"97463be739b50525df56d33b26b00852\",\n    \"name\": \"Mistral Instruct\",\n    \"filename\": \"mistral-7b-instruct-v0.1.Q4_0.gguf\",\n    \"filesize\": \"4108916384\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>Strong overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf\",\n    \"promptTemplate\": \"[INST] %1 [/INST]\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set system_message = messages[0]['content'] %}\\n    {%- set loop_start = 1 %}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\\n            {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\\n        {%- endif %}\\n        {%- if message['role'] == 'user' %}\\n            {%- if loop.index0 == loop_start and loop_start == 1 %}\\n                {{- ' [INST] ' + system_message + '\\\\n\\\\n' + message['content'] + ' [/INST]' }}\\n            {%- else %}\\n                {{- ' [INST] ' + message['content'] + ' [/INST]' }}\\n            {%- endif %}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- ' ' + message['content'] + eos_token }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\"\n  },\n  {\n    \"order\": \"f\",\n    \"md5sum\": \"8a9c75bcd8a66b7693f158ec96924eeb\",\n    \"name\": \"Llama 3.1 8B Instruct 128k\",\n    \"filename\": \"Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf\",\n    \"filesize\": \"4661212096\",\n    \"requires\": \"3.1.1\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"8 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA3\",\n    \"description\": \"<ul><li><strong>For advanced users only. Not recommended for use on Windows or Linux without selecting CUDA due to speed issues.</strong></li><li>Fast responses</li><li>Chat based model</li><li>Large context size of 128k</li><li>Accepts agentic system prompts in Llama 3.1 format</li><li>Trained by Meta</li><li>License: <a href=\\\"https://llama.meta.com/llama3_1/license/\\\">Meta Llama 3.1 Community License</a></li></ul>\",\n    \"url\": \"https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf\",\n    \"promptTemplate\": \"<|start_header_id|>user<|end_header_id|>\\n\\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\\n\\n%2\",\n    \"systemPrompt\": \"<|start_header_id|>system<|end_header_id|>\\nCutting Knowledge Date: December 2023\\n\\nYou are a helpful assistant.<|eot_id|>\",\n    \"chatTemplate\": \"{%- set loop_messages = messages %}\\n{%- for message in loop_messages %}\\n    {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\\\n\\\\n'+ message['content'] | trim + '<|eot_id|>' %}\\n    {%- if loop.index0 == 0 %}\\n        {%- set content = bos_token + content %}\\n    {%- endif %}\\n    {{- content }}\\n{%- endfor %}\\n{{- '<|start_header_id|>assistant<|end_header_id|>\\\\n\\\\n' }}\"\n  },\n  {\n    \"order\": \"g\",\n    \"md5sum\": \"f692417a22405d80573ac10cb0cd6c6a\",\n    \"name\": \"Mistral OpenOrca\",\n    \"filename\": \"mistral-7b-openorca.gguf2.Q4_0.gguf\",\n    \"filesize\": \"4108928128\",\n    \"requires\": \"2.7.1\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Strong overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by Mistral AI<li>Finetuned on OpenOrca dataset curated via <a href=\\\"https://atlas.nomic.ai/\\\">Nomic Atlas</a><li>Licensed for commercial use</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n%2<|im_end|>\\n\",\n    \"systemPrompt\": \"<|im_start|>system\\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\\n<|im_end|>\\n\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"h\",\n    \"md5sum\": \"c4c78adf744d6a20f05c8751e3961b84\",\n    \"name\": \"GPT4All Falcon\",\n    \"filename\": \"gpt4all-falcon-newbpe-q4_0.gguf\",\n    \"filesize\": \"4210994112\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Falcon\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>Very fast model with good quality</strong><br><ul><li>Fastest responses</li><li>Instruction based</li><li>Trained by TII<li>Finetuned by Nomic AI<li>Licensed for commercial use</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/gpt4all-falcon-newbpe-q4_0.gguf\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n\\n### Response:\\n\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- messages[0]['content'] + '\\\\n\\\\n' }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if message['role'] == 'user' %}\\n            {{- '### User: ' + message['content'] + '\\\\n\\\\n' }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- '### Assistant: ' + message['content'] + '\\\\n\\\\n' }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '### Assistant:' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"i\",\n    \"md5sum\": \"00c8593ba57f5240f59662367b3ed4a5\",\n    \"name\": \"Orca 2 (Medium)\",\n    \"filename\": \"orca-2-7b.Q4_0.gguf\",\n    \"filesize\": \"3825824192\",\n    \"requires\": \"2.5.2\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<ul><li>Instruction based<li>Trained by Microsoft<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-2-7b.Q4_0.gguf\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"j\",\n    \"md5sum\": \"3c0d63c4689b9af7baa82469a6f51a19\",\n    \"name\": \"Orca 2 (Full)\",\n    \"filename\": \"orca-2-13b.Q4_0.gguf\",\n    \"filesize\": \"7365856064\",\n    \"requires\": \"2.5.2\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<ul><li>Instruction based<li>Trained by Microsoft<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-2-13b.Q4_0.gguf\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"k\",\n    \"md5sum\": \"5aff90007499bce5c64b1c0760c0b186\",\n    \"name\": \"Wizard v1.2\",\n    \"filename\": \"wizardlm-13b-v1.2.Q4_0.gguf\",\n    \"filesize\": \"7365834624\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>Strong overall larger model</strong><br><ul><li>Instruction based<li>Gives very long responses<li>Finetuned with only 1k of high-quality data<li>Trained by Microsoft and Peking University<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- messages[0]['content'] + ' ' }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in loop_messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if message['role'] == 'user' %}\\n            {{- 'USER: ' + message['content'] }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- 'ASSISTANT: ' + message['content'] }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n        {%- if (loop.index0 - loop_start) % 2 == 0 %}\\n            {{- ' ' }}\\n        {%- else %}\\n            {{- eos_token }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- 'ASSISTANT:' }}\\n{%- endif %}\",\n    \"systemMessage\": \"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\"\n  },\n  {\n    \"order\": \"l\",\n    \"md5sum\": \"31b47b4e8c1816b62684ac3ca373f9e1\",\n    \"name\": \"Ghost 7B v0.9.1\",\n    \"filename\": \"ghost-7b-v0.9.1-Q4_0.gguf\",\n    \"filesize\": \"4108916960\",\n    \"requires\": \"2.7.1\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Ghost 7B v0.9.1</strong> fast, powerful and smooth for Vietnamese and English languages.\",\n    \"url\": \"https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf\",\n    \"promptTemplate\": \"<|user|>\\n%1</s>\\n<|assistant|>\\n%2</s>\\n\",\n    \"systemPrompt\": \"<|system|>\\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\\n</s>\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {%- if message['role'] == 'user' %}\\n        {{- '<|user|>\\\\n' + message['content'] + eos_token }}\\n    {%- elif message['role'] == 'system' %}\\n        {{- '<|system|>\\\\n' + message['content'] + eos_token }}\\n    {%- elif message['role'] == 'assistant' %}\\n        {{- '<|assistant|>\\\\n'  + message['content'] + eos_token }}\\n    {%- endif %}\\n    {%- if loop.last and add_generation_prompt %}\\n        {{- '<|assistant|>' }}\\n    {%- endif %}\\n{%- endfor %}\",\n    \"systemMessage\": \"You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\"\n  },\n  {\n    \"order\": \"m\",\n    \"md5sum\": \"3d12810391d04d1153b692626c0c6e16\",\n    \"name\": \"Hermes\",\n    \"filename\": \"nous-hermes-llama2-13b.Q4_0.gguf\",\n    \"filesize\": \"7366062080\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA2\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>Extremely good model</strong><br><ul><li>Instruction based<li>Gives long responses<li>Curated with 300,000 uncensored instructions<li>Trained by Nous Research<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/nous-hermes-llama2-13b.Q4_0.gguf\",\n    \"promptTemplate\": \"### Instruction:\\n%1\\n\\n### Response:\\n\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- messages[0]['content'] + '\\\\n\\\\n' }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if message['role'] == 'user' %}\\n            {{- '### Instruction:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- '### Response:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '### Instruction:\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"n\",\n    \"md5sum\": \"40388eb2f8d16bb5d08c96fdfaac6b2c\",\n    \"name\": \"Snoozy\",\n    \"filename\": \"gpt4all-13b-snoozy-q4_0.gguf\",\n    \"filesize\": \"7365834624\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"16\",\n    \"parameters\": \"13 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>Very good overall model</strong><br><ul><li>Instruction based<li>Based on the same dataset as Groovy<li>Slower than Groovy, with higher quality responses<li>Trained by Nomic AI<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/gpt4all-13b-snoozy-q4_0.gguf\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- messages[0]['content'] + '\\\\n\\\\n' }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if message['role'] == 'user' %}\\n            {{- '### Instruction:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- '### Response:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '### Response:\\\\n' }}\\n{%- endif %}\",\n    \"systemMessage\": \"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\n  },\n  {\n    \"order\": \"o\",\n    \"md5sum\": \"15dcb4d7ea6de322756449c11a0b7545\",\n    \"name\": \"MPT Chat\",\n    \"filename\": \"mpt-7b-chat-newbpe-q4_0.gguf\",\n    \"filesize\": \"3912373472\",\n    \"requires\": \"2.7.1\",\n    \"removedIn\": \"2.7.3\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"MPT\",\n    \"description\": \"<strong>Good model with novel architecture</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mpt-7b-chat-newbpe-q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n%2<|im_end|>\\n\",\n    \"systemPrompt\": \"<|im_start|>system\\n- You are a helpful assistant chatbot trained by MosaicML.\\n- You answer questions.\\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\\n\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"p\",\n    \"md5sum\": \"ab5d8e8a2f79365ea803c1f1d0aa749d\",\n    \"name\": \"MPT Chat\",\n    \"filename\": \"mpt-7b-chat.gguf4.Q4_0.gguf\",\n    \"filesize\": \"3796178112\",\n    \"requires\": \"2.7.3\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"MPT\",\n    \"description\": \"<strong>Good model with novel architecture</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/mpt-7b-chat.gguf4.Q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n%2<|im_end|>\\n\",\n    \"systemPrompt\": \"<|im_start|>system\\n- You are a helpful assistant chatbot trained by MosaicML.\\n- You answer questions.\\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\\n\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"q\",\n    \"md5sum\": \"f8347badde9bfc2efbe89124d78ddaf5\",\n    \"name\": \"Phi-3 Mini Instruct\",\n    \"filename\": \"Phi-3-mini-4k-instruct.Q4_0.gguf\",\n    \"filesize\": \"2176181568\",\n    \"requires\": \"2.7.1\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"4 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Phi-3\",\n    \"description\": \"<ul><li>Very fast responses</li><li>Chat based model</li><li>Accepts system prompts in Phi-3 format</li><li>Trained by Microsoft</li><li>License: <a href=\\\"https://opensource.org/license/mit\\\">MIT</a></li><li>No restrictions on commercial use</li></ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf\",\n    \"promptTemplate\": \"<|user|>\\n%1<|end|>\\n<|assistant|>\\n%2<|end|>\\n\",\n    \"systemPrompt\": \"\",\n    \"chatTemplate\": \"{{- bos_token }}\\n{%- for message in messages %}\\n    {{- '<|' + message['role'] + '|>\\\\n' + message['content'] + '<|end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|assistant|>\\\\n' }}\\n{%- else %}\\n    {{- eos_token }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"r\",\n    \"md5sum\": \"0e769317b90ac30d6e09486d61fefa26\",\n    \"name\": \"Mini Orca (Small)\",\n    \"filename\": \"orca-mini-3b-gguf2-q4_0.gguf\",\n    \"filesize\": \"1979946720\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"OpenLLaMa\",\n    \"description\": \"<strong>Small version of new model with novel dataset</strong><br><ul><li>Very fast responses</li><li>Instruction based</li><li>Explain tuned datasets</li><li>Orca Research Paper dataset construction approaches</li><li>Cannot be used commercially</li></ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf\",\n    \"promptTemplate\": \"### User:\\n%1\\n\\n### Response:\\n\",\n    \"systemPrompt\": \"### System:\\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\\n\\n\",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- '### System:\\\\n' + messages[0]['content'] + '\\\\n\\\\n' }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if message['role'] == 'user' %}\\n            {{- '### User:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- '### Response:\\\\n' + message['content'] + '\\\\n\\\\n' }}\\n        {%- else %}\\n            {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '### Response:\\\\n' }}\\n{%- endif %}\"\n  },\n  {\n    \"order\": \"s\",\n    \"md5sum\": \"c232f17e09bca4b7ee0b5b1f4107c01e\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Replit\",\n    \"filename\": \"replit-code-v1_5-3b-newbpe-q4_0.gguf\",\n    \"filesize\": \"1953055104\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"3 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Replit\",\n    \"systemPrompt\": \"\",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>Licensed for commercial use<li>WARNING: Not available for chat GUI</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/replit-code-v1_5-3b-newbpe-q4_0.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"t\",\n    \"md5sum\": \"70841751ccd95526d3dcfa829e11cd4c\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Starcoder\",\n    \"filename\": \"starcoder-newbpe-q4_0.gguf\",\n    \"filesize\": \"8987411904\",\n    \"requires\": \"2.6.0\",\n    \"ramrequired\": \"4\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Starcoder\",\n    \"systemPrompt\": \"\",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>WARNING: Not available for chat GUI</ul>\",\n    \"url\": \"https://gpt4all.io/models/gguf/starcoder-newbpe-q4_0.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"u\",\n    \"md5sum\": \"e973dd26f0ffa6e46783feaea8f08c83\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Rift coder\",\n    \"filename\": \"rift-coder-v0-7b-q4_0.gguf\",\n    \"filesize\": \"3825903776\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"LLaMA\",\n    \"systemPrompt\": \"\",\n    \"promptTemplate\": \"%1\",\n    \"description\": \"<strong>Trained on collection of Python and TypeScript</strong><br><ul><li>Code completion based<li>WARNING: Not available for chat GUI</li>\",\n    \"url\": \"https://gpt4all.io/models/gguf/rift-coder-v0-7b-q4_0.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"v\",\n    \"md5sum\": \"e479e6f38b59afc51a470d1953a6bfc7\",\n    \"disableGUI\": \"true\",\n    \"name\": \"SBert\",\n    \"filename\": \"all-MiniLM-L6-v2-f16.gguf\",\n    \"filesize\": \"45887744\",\n    \"requires\": \"2.5.0\",\n    \"removedIn\": \"2.7.4\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"40 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"embeddingModel\": true,\n    \"systemPrompt\": \"\",\n    \"description\": \"<strong>LocalDocs text embeddings model</strong><br><ul><li>For use with LocalDocs feature<li>Used for retrieval augmented generation (RAG)\",\n    \"url\": \"https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"w\",\n    \"md5sum\": \"dd90e2cb7f8e9316ac3796cece9883b5\",\n    \"name\": \"SBert\",\n    \"filename\": \"all-MiniLM-L6-v2.gguf2.f16.gguf\",\n    \"filesize\": \"45949216\",\n    \"requires\": \"2.7.4\",\n    \"removedIn\": \"3.0.0\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"40 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"embeddingModel\": true,\n    \"description\": \"<strong>LocalDocs text embeddings model</strong><br><ul><li>For use with LocalDocs feature<li>Used for retrieval augmented generation (RAG)\",\n    \"url\": \"https://gpt4all.io/models/gguf/all-MiniLM-L6-v2.gguf2.f16.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"x\",\n    \"md5sum\": \"919de4dd6f25351bcb0223790db1932d\",\n    \"name\": \"EM German Mistral\",\n    \"filename\": \"em_german_mistral_v01.Q4_0.gguf\",\n    \"filesize\": \"4108916352\",\n    \"requires\": \"2.5.0\",\n    \"ramrequired\": \"8\",\n    \"parameters\": \"7 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"Mistral\",\n    \"description\": \"<strong>Mistral-based model for German-language applications</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by ellamind<li>Finetuned on German instruction and chat data</a><li>Licensed for commercial use</ul>\",\n    \"url\": \"https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf\",\n    \"promptTemplate\": \"USER: %1 ASSISTANT: \",\n    \"systemPrompt\": \"Du bist ein hilfreicher Assistent. \",\n    \"chatTemplate\": \"{%- if messages[0]['role'] == 'system' %}\\n    {%- set loop_start = 1 %}\\n    {{- messages[0]['content'] }}\\n{%- else %}\\n    {%- set loop_start = 0 %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if loop.index0 >= loop_start %}\\n        {%- if not loop.first %}\\n            {{- ' ' }}\\n        {%- endif %}\\n        {%- if message['role'] == 'user' %}\\n            {{- 'USER: ' + message['content'] }}\\n        {%- elif message['role'] == 'assistant' %}\\n            {{- 'ASSISTANT: ' + message['content'] }}\\n        {%- else %}\\n            {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {%- if messages %}\\n        {{- ' ' }}\\n    {%- endif %}\\n    {{- 'ASSISTANT:' }}\\n{%- endif %}\",\n    \"systemMessage\": \"Du bist ein hilfreicher Assistent.\"\n  },\n  {\n    \"order\": \"y\",\n    \"md5sum\": \"60ea031126f82db8ddbbfecc668315d2\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Nomic Embed Text v1\",\n    \"filename\": \"nomic-embed-text-v1.f16.gguf\",\n    \"filesize\": \"274290560\",\n    \"requires\": \"2.7.4\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"137 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"embeddingModel\": true,\n    \"systemPrompt\": \"\",\n    \"description\": \"nomic-embed-text-v1\",\n    \"url\": \"https://gpt4all.io/models/gguf/nomic-embed-text-v1.f16.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"z\",\n    \"md5sum\": \"a5401e7f7e46ed9fcaed5b60a281d547\",\n    \"disableGUI\": \"true\",\n    \"name\": \"Nomic Embed Text v1.5\",\n    \"filename\": \"nomic-embed-text-v1.5.f16.gguf\",\n    \"filesize\": \"274290560\",\n    \"requires\": \"2.7.4\",\n    \"ramrequired\": \"1\",\n    \"parameters\": \"137 million\",\n    \"quant\": \"f16\",\n    \"type\": \"Bert\",\n    \"embeddingModel\": true,\n    \"systemPrompt\": \"\",\n    \"description\": \"nomic-embed-text-v1.5\",\n    \"url\": \"https://gpt4all.io/models/gguf/nomic-embed-text-v1.5.f16.gguf\",\n    \"chatTemplate\": null\n  },\n  {\n    \"order\": \"zzz\",\n    \"md5sum\": \"a8c5a783105f87a481543d4ed7d7586d\",\n    \"name\": \"Qwen2-1.5B-Instruct\",\n    \"filename\": \"qwen2-1_5b-instruct-q4_0.gguf\",\n    \"filesize\": \"937532800\",\n    \"requires\": \"3.0\",\n    \"ramrequired\": \"3\",\n    \"parameters\": \"1.5 billion\",\n    \"quant\": \"q4_0\",\n    \"type\": \"qwen2\",\n    \"description\": \"<ul><li>Very fast responses</li><li>Instruction based model</li><li>Usage of LocalDocs (RAG): Highly recommended</li><li>Supports context length of up to 32768</li><li>Trained and finetuned by Qwen (Alibaba Cloud)</li><li>License: <a href=\\\"https://www.apache.org/licenses/LICENSE-2.0.html/\\\">Apache 2.0</a></li></ul>\",\n    \"url\": \"https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf\",\n    \"promptTemplate\": \"<|im_start|>user\\n%1<|im_end|>\\n<|im_start|>assistant\\n%2<|im_end|>\",\n    \"systemPrompt\": \"<|im_start|>system\\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\\n\",\n    \"chatTemplate\": \"{%- for message in messages %}\\n    {%- if loop.first and messages[0]['role'] != 'system' %}\\n        {{- '<|im_start|>system\\\\nYou are a helpful assistant.<|im_end|>\\\\n' }}\\n    {%- endif %}\\n    {{- '<|im_start|>' + message['role'] + '\\\\n' + message['content'] + '<|im_end|>\\\\n' }}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\"\n  }\n]\n"
  },
  {
    "path": "gpt4all-chat/metadata/release.json",
    "content": "[\n  {\n    \"version\": \"2.2.2\",\n    \"notes\": \"* repeat penalty for both gptj and llama models\\n* scroll the context window when conversation reaches context limit\\n* persistent thread count setting\\n* new default template\\n* new settings for model path, repeat penalty\\n* bugfix for settings dialog onEditingFinished\\n* new tab based settings dialog format\\n* bugfix for datalake when conversation contains forbidden json chars\\n* new C library API and split the backend into own separate lib for bindings\\n* apple signed/notarized dmg installer\\n* update llama.cpp submodule to latest\\n* bugfix for too large of a prompt\\n* support for opt-in only anonymous usage and statistics\\n* bugfixes for the model downloader and improve performance\\n* various UI bugfixes and enhancements including the send message textarea automatically wrapping by word\\n* new startup dialog on first start of a new release displaying release notes and opt-in buttons\\n* new logo and icons\\n* fixed apple installer so there is now a symlink in the applications folder\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Matthieu Talbot\\n* Tim Jobbins\\n* chad (eachadea)\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.3.0\",\n    \"notes\": \"* repeat penalty for both gptj and llama models\\n* scroll the context window when conversation reaches context limit\\n* persistent thread count setting\\n* new default template\\n* new settings for model path, repeat penalty\\n* bugfix for settings dialog onEditingFinished\\n* new tab based settings dialog format\\n* bugfix for datalake when conversation contains forbidden json chars\\n* new C library API and split the backend into own separate lib for bindings\\n* apple signed/notarized dmg installer\\n* update llama.cpp submodule to latest\\n* bugfix for too large of a prompt\\n* support for opt-in only anonymous usage and statistics\\n* bugfixes for the model downloader and improve performance\\n* various UI bugfixes and enhancements including the send message textarea automatically wrapping by word\\n* new startup dialog on first start of a new release displaying release notes and opt-in buttons\\n* new logo and icons\\n* fixed apple installer so there is now a symlink in the applications folder\\n* fixed bug with versions\\n* fixed optout marking\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Matthieu Talbot\\n* Tim Jobbins\\n* chad (eachadea)\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.0\",\n    \"notes\": \"* reverse prompt for both llama and gptj models which should help stop them from repeating the prompt template\\n* resumable downloads for models\\n* chat list in the drawer drop down\\n* add/remove/rename chats\\n* persist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB)\\n* NOTE: to turn on the persistent chats feature you need to do so via the settings dialog as it is off by default\\n* automatically rename chats using the AI after the first prompt/response pair\\n* new usage statistics including more detailed hardware info to help debug problems on older hardware\\n* fix dialog sizes for those with smaller displays\\n* add support for persistent contexts and internal model state to the C api\\n* add a confirm button for deletion of chats\\n* bugfix for blocking the gui when changing models\\n* datalake now captures all conversations when network opt-in is turned on\\n* new much shorter prompt template by default\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.1\",\n    \"notes\": \"* compress persistent chats and save order of magnitude disk space on some small chats\\n* persistent chat files are now stored in same folder as models\\n* use a thread for deserializing chats on startup so the gui shows window faster\\n* fail gracefully and early when we detect incompatible hardware\\n* repeat penalty restore default bugfix\\n* new mpt backend for mosaic ml's new base model and chat model\\n* add mpt chat and base model to downloads\\n* lower memory required for gptj models by using f16 for kv cache\\n* better error handling for when a model is deleted by user and persistent chat remains\\n* add a user default model setting so the users preferred model comes up on startup\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Zach Nussbaum (Nomic AI)\\n* Aaron Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.2\",\n    \"notes\": \"* add webserver feature that offers mirror api to chatgpt on localhost:4891\\n* add chatgpt models installed using openai key to chat client gui\\n* fixup the memory handling when switching between chats/models to decrease RAM load across the board\\n* fix bug in thread safety for mpt model and de-duplicated code\\n* uses compact json format for network\\n* add remove model option in download dialog\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.3\",\n    \"notes\": \"* add webserver feature that offers mirror api to chatgpt on localhost:4891\\n* add chatgpt models installed using openai key to chat client gui\\n* fixup the memory handling when switching between chats/models to decrease RAM load across the board\\n* fix bug in thread safety for mpt model and de-duplicated code\\n* uses compact json format for network\\n* add remove model option in download dialog\\n* remove text-davinci-003 as it is not a chat model\\n* fix installers on mac and linux to include libllmodel versions\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.4\",\n    \"notes\": \"* fix buffer overrun in backend\\n* bugfix for browse for model directory\\n* dedup of qml code\\n* revamp settings dialog UI\\n* add localdocs plugin (beta) feature allowing scanning of local docs\\n* various other bugfixes and performance improvements\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller\\n* Juuso Alasuutari\\n* Justin Wang\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.5\",\n    \"notes\": \"* bugfix for model download remove\\n* bugfix for blocking on regenerate\\n* lots of various ui improvements enhancements\\n* big new change that brings us up2date with llama.cpp/ggml support for latest models\\n* advanced avx detection allowing us to fold the two installers into one\\n* new logging mechanism that allows for bug reports to have more detail\\n* make localdocs work with server mode\\n* localdocs fix for stale references after we regenerate\\n* fix so that browse to dialog on linux\\n* fix so that you can also just add a path to the textfield\\n* bugfix for chatgpt and resetting context\\n* move models.json to github repo so people can pr suggested new models\\n* allow for new models to be directly downloaded from huggingface in said prs\\n* better ui for localdocs settings\\n* better error handling when model fails to load\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Konstantin Gukov\\n* Joseph Mearman\\n* Nandakumar\\n* Chase McDougall\\n* mvenditto\\n* Andriy Mulyar (Nomic AI)\\n* FoivosC\\n* Ettore Di Giacinto\\n* Tim Miller\\n* Peter Gagarinov\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.6\",\n    \"notes\": \"* bugfix for model download remove\\n* bugfix for blocking on regenerate\\n* lots of various ui improvements enhancements\\n* big new change that brings us up2date with llama.cpp/ggml support for latest models\\n* advanced avx detection allowing us to fold the two installers into one\\n* new logging mechanism that allows for bug reports to have more detail\\n* make localdocs work with server mode\\n* localdocs fix for stale references after we regenerate\\n* fix so that browse to dialog on linux\\n* fix so that you can also just add a path to the textfield\\n* bugfix for chatgpt and resetting context\\n* move models.json to github repo so people can pr suggested new models\\n* allow for new models to be directly downloaded from huggingface in said prs\\n* better ui for localdocs settings\\n* better error handling when model fails to load\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Konstantin Gukov\\n* Joseph Mearman\\n* Nandakumar\\n* Chase McDougall\\n* mvenditto\\n* Andriy Mulyar (Nomic AI)\\n* FoivosC\\n* Ettore Di Giacinto\\n* Tim Miller\\n* Peter Gagarinov\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.7\",\n    \"notes\": \"* replit model support\\n* macos metal accelerated support\\n* fix markdown for localdocs references\\n* inline syntax highlighting for python and cpp with more languages coming\\n* synced with upstream llama.cpp\\n* ui fixes and default generation settings changes\\n* backend bugfixes\\n* allow for loading files directly from huggingface via TheBloke without name changes\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* Ettore Di Giacinto\\n* AMOGUS\\n* Felix Zaslavskiy\\n* Tim Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.8\",\n    \"notes\": \"* replit model support\\n* macos metal accelerated support\\n* fix markdown for localdocs references\\n* inline syntax highlighting for python and cpp with more languages coming\\n* synced with upstream llama.cpp\\n* ui fixes and default generation settings changes\\n* backend bugfixes\\n* allow for loading files directly from huggingface via TheBloke without name changes\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* Ettore Di Giacinto\\n* AMOGUS\\n* Felix Zaslavskiy\\n* Tim Miller\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.9\",\n    \"notes\": \"* New GPT4All Falcon model\\n* New Orca models\\n* Token generation speed is now reported in GUI\\n* Bugfix for localdocs references when regenerating\\n* General fixes for thread safety\\n* Many fixes to UI to add descriptions for error conditions\\n* Fixes for saving/reloading chats\\n* Complete refactor of the model download dialog with metadata about models available\\n* Resume downloads bugfix\\n* CORS fix\\n* Documentation fixes and typos\\n* Latest llama.cpp update\\n* Update of replit\\n* Force metal setting\\n* Fixes for model loading with metal on macOS\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* cosmic-snow\\n* AMOGUS\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.10\",\n    \"notes\": \"* New GPT4All Falcon model\\n* New Orca models\\n* Token generation speed is now reported in GUI\\n* Bugfix for localdocs references when regenerating\\n* General fixes for thread safety\\n* Many fixes to UI to add descriptions for error conditions\\n* Fixes for saving/reloading chats\\n* Complete refactor of the model download dialog with metadata about models available\\n* Resume downloads bugfix\\n* CORS fix\\n* Documentation fixes and typos\\n* Latest llama.cpp update\\n* Update of replit\\n* Force metal setting\\n* Fixes for model loading with metal on macOS\\n\",\n    \"contributors\": \"* Nils Sauer (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Richard Guo (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* cosmic-snow\\n* AMOGUS\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.11\",\n    \"notes\": \"* Per model settings\\n* Character settings\\n* Adding a system prompt\\n* Important bugfix for chatgpt install\\n* Complete refactor and revamp of settings dialog\\n* New syntax highlighting for java, bash, go\\n* Use monospace font for syntax highlighting of codeblocks\\n* New setting for turning off references in localdocs\\n* Fix memory leaks in falcon model\\n* Fix for backend memory handling\\n* Server mode bugfix\\n* Models.json retrieve bugfix\\n* Free metal context bugfix\\n* Add a close dialog feature to all chat dialogs\\n\",\n    \"contributors\": \"* Lakshay Kansal (Nomic AI)\\n* Matthew Gill\\n* Brandon Beiler\\n* cosmic-snow\\n* Felix Zaslavskiy\\n* Andriy Mulyar (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.12\",\n    \"notes\": \"* Fix bad bug that was breaking numerous current installs (sorry folks!)\\n* Fix bug with 'browse' button in settings dialog\\n* Wayland support on linux\\n* Reduce template ui size in settings dialog\\n\",\n    \"contributors\": \"* Akarshan Biswas\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.13\",\n    \"notes\": \"* Fix bug with prolonging shutdown with generation\\n* Fix bug with update model info on deleting chats\\n* Fix bug with preventing closing of model download dialog\\n* Allows allow closing the model download dialog\\n* Fix numerous bugs with download of models.json and provide backup option\\n* Add json and c# highlighting\\n* Fix bug with chatgpt crashing\\n* Fix bug with chatgpt not working for some keys\\n* Fix bug with mixpanel opt outs not counting\\n* Fix problem with OOM errors causing crash and then repeating on next start\\n* Fix default thread setting and provide guardrails\\n* Fix tap handler in settings dialog for buttons\\n* Fix color of some text fields on macOS for settings dialog\\n* Fix problem with startup dialog not closing\\n* Provide error dialog for settings file not accessible\\n* Try and fix problems with avx-only detection\\n* Fix showing error in model downloads unnecessarily\\n* Prefer 7b models to load by default\\n* Add Wizard v1.1 to download list\\n* Rename Orca models to Mini Orca\\n* Don't use a system prompt unless model was trained with one by default\\n\",\n    \"contributors\": \"* Lakshay Kansal (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters)\"\n  },\n  {\n    \"version\": \"2.4.14\",\n    \"notes\": \"* Add starcoder model support\\n* Add ability to switch between light mode/dark mode\\n* Increase the size of fonts in monospace code blocks a bit\\n\",\n    \"contributors\": \"* Lakshay Kansal (Nomic AI)\\n* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.4.15\",\n    \"notes\": \"* Add Vulkan GPU backend which allows inference on AMD, Intel and NVIDIA GPUs\\n* Add ability to switch font sizes\\n* Various bug fixes\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\\n* Nils Sauer (Nomic AI)\\n* Lakshay Kansal (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.4.16\",\n    \"notes\": \"* Bugfix for properly falling back to CPU when GPU can't be used\\n* Report the actual device we're using\\n* Fix context bugs for GPU accelerated models\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.4.17\",\n    \"notes\": \"* Bugfix for properly falling back to CPU when GPU is out of memory\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Aaron Miller (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.4.18\",\n    \"notes\": \"* Bugfix for devices to show up in the settings combobox on application start and not just on model load\\n* Send information on requested device and actual device on model load to help assess which model/gpu/os combos are working\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.4.19\",\n    \"notes\": \"* Fix a crash on systems with corrupted vulkan drivers or corrupted vulkan dlls\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"2.5.0\",\n    \"notes\": \"* Major new release supports GGUF models only!\\n* New models like Mistral Instruct, Replit 1.5, Rift Coder and more\\n* All previous version of ggml-based models are no longer supported\\n* Extensive changes to vulkan support\\n* Better GPU error messages\\n* Prompt processing on the GPU\\n* Save chats now saves to text (less harddrive space)\\n* Many more changes\\n\",\n    \"contributors\": \"* Aaron Miller (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.5.1\",\n    \"notes\": \"* Accessibility fixes\\n* Bugfix for crasher on Windows\\n\",\n    \"contributors\": \"* Aaron Miller (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\\n* Victor Tsaran <vtsaran@yahoo.com>\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.5.2\",\n    \"notes\": \"* Support for GGUF v3 models\\n* Important fixes for AMD GPUs\\n* Don't start recalculating context immediately for saved chats\\n* UI fixes for chat name generation\\n* UI fixes for leading whitespaces in chat generation\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.5.3\",\n    \"notes\": \"* Major feature update for localdocs!\\n* Localdocs now uses an embedding model for retrieval augmented generation\\n* Localdocs can now search while your collections are indexing\\n* You're guaranteed to get hits from localdocs for every prompt you enter\\n* Fix: AMD gpu fixes\\n* Fix: Better error messages\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.5.4\",\n    \"notes\": \"* Major bugfix release with new models!\\n* Model: Recently released Orca 2 model which does exceptionally well on reasoning tasks\\n* Fix: System prompt was not always being honored\\n* Fix: Download network retry on cloudflare errors\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.6.1\",\n    \"notes\": \"* Update to newer llama.cpp\\n* Implemented configurable context length\\n* Bugfixes for localdocs\\n* Bugfixes for serialization to disk\\n* Bugfixes for AVX\\n* Bugfixes for Windows builds\\n* Bugfixes for context retention and clearing\\n* Add a button to collections dialog\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.6.2\",\n    \"notes\": \"* Update to latest llama.cpp\\n* Update to newly merged vulkan backend\\n* Partial GPU offloading support\\n* New localdocs speed increases and features\\n* New GUI settings option for configuring how many layers to put on GPU\\n* New lightmode theme, darkmode theme and legacy theme\\n* Lots of UI updates and enhancements\\n* Scores of bugfixes for stability and usability\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Karthik Nair\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.0\",\n    \"notes\": \"* Add support for twelve new model architectures\\n* Including Baichuan, BLOOM, CodeShell, GPT-2, Orion, Persimmon, Phi and Phi-2, Plamo, Qwen, Qwen2, Refact, and StableLM\\n* Fix for progress bar colors on legacy theme\\n* Fix sizing for model download dialog elements\\n* Fix dialog sizes to use more screen realestate where available\\n* Fix for vram leak when model loading fails\\n* Fix for making the collection dialog progress bar more readable\\n* Fix for smaller minimum size for main screen\\n* Fix for mistral crash\\n* Fix for mistral openorca prompt template to ChatLM\\n* Fix for excluding non-text documents from localdoc scanning\\n* Fix for scrollbar missing on main conversation\\n* Fix accessibility issues for screen readers\\n* Fix for not showing the download button when not online\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.1\",\n    \"notes\": \"* Update to latest llama.cpp with support for Google Gemma\\n* Gemma, Phi and Phi-2, Qwen2, and StableLM are now all GPU accelerated\\n* Large revamp of the model loading to support explicit unload/reload\\n* Bugfixes for ChatML and improved version of Mistral OpenOrca\\n* We no longer load a model by default on application start\\n* We no longer load a model by default on chat context switch\\n* Fixes for visual artifacts in update reminder dialog\\n* Blacklist Intel GPU's for now as we don't support yet\\n* Fixes for binary save/restore of chat\\n* Save and restore of window geometry across application starts\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.2\",\n    \"notes\": \"* New support for model search/discovery using huggingface search in downloads\\n* Support for more model architectures for GPU acceleration\\n* Three different crash fixes for corner case settings\\n* Add a minp sampling parameter\\n* Bert layoer norm epsilon value\\n* Fix problem with blank lines between reply and next prompt\\n\",\n    \"contributors\": \"* Christopher Barrera\\n* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.3\",\n    \"notes\": \"* Fix for network reachability unknown\\n* Fix undefined behavior with resetContext\\n* Fix ChatGPT which was broken with previous release\\n* Fix for clean up of chat llm thread destruction\\n* Display of model loading warnings\\n* Fix for issue 2080 where the GUI appears to hang when a chat is deleted\\n* Fix for issue 2077 better responsiveness of model download dialog when download is taking place\\n* Fix for issue 2092 don't include models that are disabled for GUI in application default model list\\n* Fix for issue 2087 where cloned modelds were lost and listed in download dialog erroneously\\n* Fix for MPT models without duplicated token embd weight\\n* New feature with api server port setting\\n* Fix for issue 2024 where combobox for model settings uses currently used model by default\\n* Clean up settings properly for removed models and don't list stale model settings in download dialog\\n* Fix for issue 2105 where the cancel button was not working for discovered model downloads\\n\",\n    \"contributors\": \"* Christopher Barrera\\n* Daniel Alencar\\n* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.4\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Add a right-click menu to the chat (by @kryotek777 in PR #2108)\\n* Change the left sidebar to stay open (PR #2117)\\n* Limit the width of text in the chat (PR #2118)\\n* Move to llama.cpp's SBert implementation (PR #2086)\\n* Support models provided by the Mistral AI API (by @Olyxz16 in PR #2053)\\n* Models List: Add Ghost 7B v0.9.1 (by @lh0x00 in PR #2127)\\n* Add Documentation and FAQ links to the New Chat page (by @3Simplex in PR #2183)\\n* Models List: Simplify Mistral OpenOrca system prompt (PR #2220)\\n* Models List: Add Llama 3 Instruct (PR #2242)\\n* Models List: Add Phi-3 Mini Instruct (PR #2252)\\n* Improve accuracy of anonymous usage statistics (PR #2238)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Detect unsupported CPUs correctly on Windows (PR #2141)\\n* Fix the colors used by the server chat (PR #2150)\\n* Fix startup issues when encountering non-Latin characters in paths (PR #2162)\\n* Fix issues causing LocalDocs context links to not work sometimes (PR #2218)\\n* Fix incorrect display of certain code block syntax in the chat (PR #2232)\\n* Fix an issue causing unnecessary indexing of document collections on startup (PR #2236)\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Lam Hieu (`@lh0x00`)\\n* 3Simplex (`@3Simplex`)\\n* Kryotek (`@kryotek777`)\\n* Olyxz16 (`@Olyxz16`)\\n* Robin Verduijn (`@robinverduijn`)\\n* Tim453 (`@Tim453`)\\n* Xu Zhen (`@xuzhen`)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.7.5\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Improve accuracy of anonymous usage statistics (PR #2297, PR #2299)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Fix some issues with anonymous usage statistics (PR #2270, PR #2296)\\n* Default to GPU with most VRAM on Windows and Linux, not least (PR #2297)\\n* Fix initial failure to generate embeddings with Nomic Embed (PR #2284)\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"2.8.0\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Context Menu: Replace \\\"Select All\\\" on message with \\\"Copy Message\\\" (PR #2324)\\n* Context Menu: Hide Copy/Cut when nothing is selected (PR #2324)\\n* Improve speed of context switch after quickly switching between several chats (PR #2343)\\n* New Chat: Always switch to the new chat when the button is clicked (PR #2330)\\n* New Chat: Always scroll to the top of the list when the button is clicked (PR #2330)\\n* Update to latest llama.cpp as of May 9, 2024 (PR #2310)\\n* **Add support for the llama.cpp CUDA backend** (PR #2310, PR #2357)\\n  * Nomic Vulkan is still used by default, but CUDA devices can now be selected in Settings\\n  * When in use: Greatly improved prompt processing and generation speed on some devices\\n  * When in use: GPU support for Q5\\\\_0, Q5\\\\_1, Q8\\\\_0, K-quants, I-quants, and Mixtral\\n* Add support for InternLM models (PR #2310)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Do not allow sending a message while the LLM is responding (PR #2323)\\n* Fix poor quality of generated chat titles with many models (PR #2322)\\n* Set the window icon correctly on Windows (PR #2321)\\n* Fix a few memory leaks (PR #2328, PR #2348, PR #2310)\\n* Do not crash if a model file has no architecture key (PR #2346)\\n* Fix several instances of model loading progress displaying incorrectly (PR #2337, PR #2343)\\n* New Chat: Fix the new chat being scrolled above the top of the list on startup (PR #2330)\\n* macOS: Show a \\\"Metal\\\" device option, and actually use the CPU when \\\"CPU\\\" is selected (PR #2310)\\n* Remove unsupported Mamba, Persimmon, and PLaMo models from the whitelist (PR #2310)\\n* Fix GPT4All.desktop being created by offline installers on macOS (PR #2361)\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Tim453 (`@Tim453`)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"3.0.0\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Complete UI overhaul (PR #2396)\\n* LocalDocs improvements (PR #2396)\\n  * Use nomic-embed-text-v1.5 as local model instead of SBert\\n  * Ship local model with application instead of downloading afterwards\\n  * Store embeddings flat in SQLite DB instead of in hnswlib index\\n  * Do exact KNN search with usearch instead of approximate KNN search with hnswlib\\n* Markdown support (PR #2476)\\n* Support CUDA/Metal device option for embeddings (PR #2477)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Fix embedding tokenization after PR #2310 (PR #2381)\\n* Fix a crash when loading certain models with \\\"code\\\" in their name (PR #2382)\\n* Fix an embedding crash with large chunk sizes after PR #2310 (PR #2383)\\n* Fix inability to load models with non-ASCII path on Windows (PR #2388)\\n* CUDA: Do not show non-fatal DLL errors on Windows (PR #2389)\\n* LocalDocs fixes (PR #2396)\\n  * Always use requested number of snippets even if there are better matches in unselected collections\\n  * Check for deleted files on startup\\n* CUDA: Fix PTX errors with some GPT4All builds (PR #2421)\\n* Fix blank device in UI after model switch and improve usage stats (PR #2409)\\n* Use CPU instead of CUDA backend when GPU loading fails the first time (ngl=0 is not enough) (PR #2477)\\n* Fix crash when sending a message greater than n\\\\_ctx tokens after PR #1970 (PR #2498)\\n\",\n    \"contributors\": \"* Vincent Giardina (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\\n* John W. Parent (Kitware)\\n* Paige Lee (Nomic AI)\\n* Max Cembalest (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* cosmic-snow (`@cosmic-snow`)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"3.1.0\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Generate suggested follow-up questions feature (#2634)\\n\\n<b>&mdash; What's Changed &mdash;</b>\\n* Customize combo boxes and context menus to fit the new style (#2535)\\n* Improve view bar scaling and Model Settings layout (#2520)\\n* Make the logo spin while the model is generating (#2557)\\n* Server: Reply to wrong GET/POST method with HTTP 405 instead of 404 (#2615)\\n* Update theme for menus (#2578)\\n* Move the \\\"stop\\\" button to the message box (#2561)\\n* Build with CUDA 11.8 for better compatibility (#2639)\\n* Make links in latest news section clickable (#2643)\\n* Support translation of settings choices (#2667), (#2690)\\n* Improve LocalDocs view's error message (by @cosmic-snow in #2679)\\n* Ignore case of LocalDocs file extensions (#2642), (#2684)\\n* Update llama.cpp to commit 87e397d00 from July 19th (#2694)\\n  * Add support for GPT-NeoX, Gemma 2, OpenELM, ChatGLM, and Jais architectures (all with Vulkan support)\\n  * Enable Vulkan support for StarCoder2, XVERSE, Command R, and OLMo\\n* Show scrollbar in chat collections list as needed (#2691)\\n\\n<b>&mdash; What's Removed &mdash;</b>\\n* Remove support for GPT-J models (#2676)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Fix placement of thumbs-down and datalake opt-in dialogs (#2540)\\n* Select the correct folder with the Linux fallback folder dialog (#2541)\\n* Fix clone button sometimes producing blank model info (#2545)\\n* Fix jerky chat view scrolling (#2555)\\n* Fix \\\"reload\\\" showing for chats with missing models (#2520)\\n* Fix property binding loop warning (#2601)\\n* Fix UI hang with certain chat view content (#2543)\\n* Fix crash when Kompute falls back to CPU (#2640)\\n* Fix several Vulkan resource management issues (#2694)\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* cosmic-snow (`@cosmic-snow`)\\n* 3Simplex (`@3Simplex`)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"3.1.1\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Ability to add OpenAI compatible remote models (#2683)\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Update llama.cpp to cherry-pick Llama 3.1 RoPE fix. (#2758)\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\\n* Shiranui (@supersonictw)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"3.2.0\",\n    \"notes\": \"<b>&mdash; What's New &mdash;</b>\\n* Translations for Simplified Chinese, Traditional Chinese, Italian, Portuguese, Romanian, and Spanish\\n* Significantly faster context recalculation when context runs out\\n* Models no longer stop generating when they run out of context\\n* Add Qwen2-1.5B-Instruct to the model list\\n\\n<b>&mdash; Fixes &mdash;</b>\\n* Fix a CUDA crash with long conversations since v3.1.0\\n* Fix \\\"file(s)\\\" and \\\"word(s)\\\" appearing in UI instead of proper plurals\\n* Show the correct icons for LocalDocs sources with uppercase extensions\\n* More reliable reverse prompt detection\\n* Fix a minor prompting issue introduced in v3.1.0\\n* Disallow context shift for chat name and follow-up generation\\n* Fix potential incompatibility with macOS 12 and 13\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Riccardo Giovanetti (`@Harvester62`)\\n* Victor Emanuel (`@SINAPSA-IC`)\\n* Jeremy Tayco (`@jstayco`)\\n* Shiranui (`@supersonictw`)\\n* Thiago Ramos (`@thiagojramos`)\\n* ThiloteE (`@ThiloteE`)\\n* Dominik (`@cosmic-snow`)\\n* Jack (`@wuodoo`)\\n* Community (beta testers, bug reporters, bindings authors)\"\n  },\n  {\n    \"version\": \"3.2.1\",\n    \"notes\": \"<b>&mdash; Fixes &mdash;</b>\\n* Fix a potential Vulkan crash on application exit on some Linux systems\\n* Fix a bad CUDA build option that led to gibberish on newer NVIDIA GPUs\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.3.0\",\n    \"notes\": \"* **UI Improvements**: The minimum window size now adapts to the font size. A few labels and links have been fixed. The Embeddings Device selection of \\\"Auto\\\"/\\\"Application default\\\" works again. The window icon is now set on Linux. The antenna icon now displays when the API server is listening.\\n* **Single Instance**: Only one instance of GPT4All can be opened at a time. This is now enforced.\\n* **Greedy Sampling**: Set temperature to zero to enable greedy sampling.\\n* **API Server Changes**: The built-in API server now responds correctly to both legacy completions, and chats with message history. Also, it now uses the system prompt configured in the UI.\\n* **Translation Improvements**: The Italian, Romanian, and Traditional Chinese translations have been updated.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* 3Simplex (`@3Simplex`)\\n* Riccardo Giovanetti (`@Harvester62`)\\n* Victor Emanuel (`@SINAPSA-IC`)\\n* Dominik (`@cosmic-snow`)\\n* Shiranui (`@supersonictw`)\"\n  },\n  {\n    \"version\": \"3.3.1\",\n    \"notes\": \"* Fixed a crash when attempting to continue a chat loaded from disk\\n* Fixed the local server rejecting min\\\\_p/top\\\\_p less than 1\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.4.0\",\n    \"notes\": \"* **Attached Files:** You can now attach a small Microsoft Excel spreadsheet (.xlsx) to a chat message and ask the model about it.\\n* **LocalDocs Accuracy:** The LocalDocs algorithm has been enhanced to find more accurate references for some queries.\\n* **Word Document Support:** LocalDocs now supports Microsoft Word (.docx) documents natively.\\n  * **IMPORTANT NOTE:** If .docx files are not found, make sure Settings > LocalDocs > Allowed File Extensions includes \\\"docx\\\".\\n* **Forgetful Model Fixes:** Issues with the \\\"Redo last chat response\\\" button, and with continuing chats from previous sessions, have been fixed.\\n* **Chat Saving Improvements:** On exit, GPT4All will no longer save chats that are not new or modified. As a bonus, downgrading without losing access to all chats will be possible in the future, should the need arise.\\n* **UI Fixes:** The model list no longer scrolls to the top when you start downloading a model.\\n* **New Models:** LLama 3.2 Instruct 3B and 1B models now available in model list.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Andriy Mulyar (Nomic AI)\\n* Ikko Eltociear Ashimine (`@eltociear`)\\n* Victor Emanuel (`@SINAPSA-IC`)\\n* Shiranui (`@supersonictw`)\"\n  },\n  {\n    \"version\": \"3.4.1\",\n    \"notes\": \"* **LocalDocs Fixes:** Several issues with LocalDocs in v3.4.0 have been fixed, including missing words and very slow indexing.\\n* **Syntax Highlighting:** Go code is now highlighted with the correct colors.\\n* **Cache Fixes:** The model list cache is now stored with a version number, and in a more appropriate directory.\\n* **Translation Updates:** The Italian translation has been improved.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* John Parent (Kitware)\\n* Riccardo Giovanetti (`@Harvester62`)\"\n  },\n  {\n    \"version\": \"3.4.2\",\n    \"notes\": \"* **LocalDocs Fixes:** Several issues with LocalDocs, some of which were introduced in v3.4.0, have been fixed.\\n  * Fixed the possible use of references from unselected collections.\\n  * Fixed unnecessary reindexing of files with uppercase extensions.\\n  * Fixed hybrid search failure due to inconsistent database state.\\n  * Fully fixed the blank Embeddings Device selection in LocalDocs settings.\\n  * Fixed LocalDocs indexing of large PDFs making very slow progress or even stalling.\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.5.0\",\n    \"notes\": \"* **Message Editing:**\\n  * You can now edit any message you've sent by clicking the pencil icon below it.\\n  * You can now redo earlier responses in the conversation.\\n* **Templates:** Chat templates have been completely overhauled! They now use Jinja-style syntax. You may notice warnings or errors in the UI. Read the linked docs, and if you have any questions, please ask on the Discord.\\n* **File Attachments:** Markdown and plain text files are now supported as file attachments.\\n* **System Tray:** There is now an option in Application Settings to allow GPT4All to minimize to the system tray instead of closing.\\n* **Local API Server:**\\n  * The API server now supports system messages from the client and no longer uses the system message in settings.\\n  * You can now send messages to the API server in any order supported by the model instead of just user/assistant pairs.\\n* **Translations:** The Italian and Romanian translations have been improved.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Benjamin Gallois (`@bgallois`)\\n* Riccardo Giovanetti (`@Harvester62`)\\n* Victor Emanuel (`@SINAPSA-IC`)\"\n  },\n  {\n    \"version\": \"3.5.1\",\n    \"notes\": \"* **Chat template fixes:** Llama 3.2 models, Nous Hermes 2 Mistral, Mistral OpenOrca, Qwen 2 and remote models\\n* **Bugfix:** Fix the default model button so it works again after 3.5.0\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.5.2\",\n    \"notes\": \"* **Model Search:** There are now separate tabs for official and third-party models.\\n* **Local Server Fixes:** Several mistakes in v3.5's changes to the API server have been corrected.\\n* **Cloned Model Fixes:** The chat template and system message of cloned models now manage their defaults correctly.\\n* **Translation Improvements:** The Romanian and Italian translations have been updated.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Riccardo Giovanetti (`@Harvester62`)\\n* Victor Emanuel (`@SINAPSA-IC`)\"\n  },\n  {\n    \"version\": \"3.5.3\",\n    \"notes\": \"* **LocalDocs Fix:** A serious issue causing LocalDocs to not work properly in v3.5.2 has been fixed.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.6.0\",\n    \"notes\": \"* **Reasoner v1:**\\n  * Built-in javascript code interpreter tool.\\n  * Custom curated model that utilizes the code interpreter to break down, analyze, perform, and verify complex reasoning tasks.\\n* **Templates:** Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs.\\n* **Fixes:**\\n  * Remote model template to allow for XML in messages.\\n  * Jinja2Cpp bug that broke system message detection in chat templates.\\n  * LocalDocs sources displaying in unconsolidated form after v3.5.0.\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\\n* Jared Van Bortel (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.6.1\",\n    \"notes\": \"* **Fixes:**\\n  * The stop generation button no longer working in v3.6.0.\\n  * The copy entire conversation button no longer working in v3.6.0.\\n\",\n    \"contributors\": \"* Adam Treat (Nomic AI)\"\n  },\n  {\n    \"version\": \"3.7.0\",\n    \"notes\": \"* **Windows ARM Support:** GPT4All now supports the Windows ARM platform, ensuring compatibility with devices powered by Qualcomm Snapdragon and Microsoft SQ-series processors.\\n  * **NOTE:** Support for GPU and/or NPU acceleration is not available at this time. Only the CPU will be used to run LLMs.\\n  * **NOTE:** You must install the new *Windows ARM* version of GPT4All from the website. The standard *Windows* version will not work due to emulation limitations.\\n* **Fixed Updating on macOS:** The maintenance tool no longer crashes when attempting to update or uninstall GPT4All on Sequoia.\\n  * **NOTE:** If you have installed the version from the GitHub releases as a workaround for this issue, you can safely uninstall it and switch back to the version from the website.\\n* **Fixed Chat Saving on macOS:** Chats now save as expected when the application is quit with Command-Q.\\n* **Code Interpreter Improvements:**\\n  * The behavior when the code takes too long to execute and times out has been improved.\\n  * console.log now accepts multiple arguments for better compatibility with native JavaScript.\\n* **Chat Templating Improvements:**\\n  * Two crashes and one compatibility issue have been fixed in the chat template parser.\\n  * The default chat template for EM German Mistral has been fixed.\\n  * Automatic replacements have been added for five new models as we continue to improve compatibility with common chat templates.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* Riccardo Giovanetti (`@Harvester62`)\"\n  },\n  {\n    \"version\": \"3.8.0\",\n    \"notes\": \"* **Native DeepSeek-R1-Distill Support:** GPT4All now has robust support for the DeepSeek-R1 family of distillations.\\n  * Several model variants are now available on the downloads page.\\n  * Reasoning (wrapped in \\\"think\\\" tags) is displayed similarly to the Reasoner model.\\n  * The DeepSeek-R1 Qwen pretokenizer is now supported, resolving the loading failure in previous versions.\\n  * The model is now configured with a GPT4All-compatible prompt template by default.\\n* **Chat Templating Overhaul:** The template parser has been *completely* replaced with one that has much better compatibility with common models.\\n* **Code Interpreter Fixes:**\\n  * An issue preventing the code interpreter from logging a single string in v3.7.0 has been fixed.\\n  * The UI no longer freezes while the code interpreter is running a computation.\\n* **Local Server Fixes:**\\n  * An issue preventing the server from using LocalDocs after the first request since v3.5.0 has been fixed.\\n  * System messages are now correctly hidden from the message history.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* ThiloteE (`@ThiloteE`)\"\n  },\n  {\n    \"version\": \"3.9.0\",\n    \"notes\": \"* **LocalDocs Fix:** LocalDocs no longer shows an error on later messages with reasoning models.\\n* **DeepSeek Fix:** DeepSeek-R1 reasoning (in 'think' tags) no longer appears in chat names and follow-up questions.\\n* **Windows ARM Improvements:**\\n  * Graphical artifacts on some SoCs have been fixed.\\n  * A crash when adding a collection of PDFs to LocalDocs has been fixed.\\n* **Template Parser Fixes:** Chat templates containing an unclosed comment no longer freeze GPT4All.\\n* **New Models:** OLMoE and Granite MoE models are now supported.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* ThiloteE (`@ThiloteE`)\"\n  },\n  {\n    \"version\": \"3.10.0\",\n    \"notes\": \"* **Remote Models:**\\n  * The Add Model page now has a dedicated tab for remote model providers.\\n  * Groq, OpenAI, and Mistral remote models are now easier to configure.\\n* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.\\n* **New Model:** The non-MoE Granite model is now supported.\\n* **Translation Updates:**\\n  * The Italian translation has been updated.\\n  * The Simplified Chinese translation has been significantly improved.\\n* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.\\n* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.\\n* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.\\n\",\n    \"contributors\": \"* Jared Van Bortel (Nomic AI)\\n* Adam Treat (Nomic AI)\\n* ThiloteE (`@ThiloteE`)\\n* Lil Bob (`@Junior2Ran`)\\n* Riccardo Giovanetti (`@Harvester62`)\"\n  }\n]\n"
  },
  {
    "path": "gpt4all-chat/pyproject.toml",
    "content": "[tool.pytest.ini_options]\naddopts = ['--import-mode=importlib']\n\n[tool.mypy]\nfiles = 'tests/python'\npretty = true\nstrict = true\nwarn_unused_ignores = false\n\n[tool.pytype]\ninputs = ['tests/python']\njobs = 'auto'\nbind_decorated_methods = true\nnone_is_not_bool = true\noverriding_renamed_parameter_count_checks = true\nstrict_none_binding = true\nprecise_return = true\n# protocols:\n# - https://github.com/google/pytype/issues/1423\n# - https://github.com/google/pytype/issues/1424\nstrict_import = true\nstrict_parameter_checks = true\nstrict_primitive_comparisons = true\n# strict_undefined_checks: too many false positives\n\n[tool.isort]\nsrc_paths = ['tests/python']\nline_length = 120\ncombine_as_imports = true\n"
  },
  {
    "path": "gpt4all-chat/qa_checklist.md",
    "content": "## QA Checklist\n\n1. Ensure you have a fresh install by **backing up** and then deleting the following directories:\n\n    ### Windows\n      * Settings directory: ```C:\\Users\\{username}\\AppData\\Roaming\\nomic.ai```\n      * Models directory: ```C:\\Users\\{username}\\AppData\\Local\\nomic.ai\\GPT4All```\n    ### Mac\n      * Settings directory: ```/Users/{username}/.config/gpt4all.io```\n      * Models directory: ```/Users/{username}/Library/Application Support/nomic.ai/GPT4All```\n    ### Linux\n      * Settings directory: ```/home/{username}/.config/nomic.ai```\n      * Models directory: ```/home/{username}/.local/share/nomic.ai/GPT4All```\n  \n    ^ Note: If you've changed your models directory manually via the settings you need to backup and delete that one\n\n2. Go through every view and ensure that things display correctly and familiarize yourself with the application flow\n\n3. Navigate to the models view and download Llama 3 Instruct\n\n4. Navigate to the models view and search for \"TheBloke mistral 7b\" and download \"TheBloke/Mistral-7B-Instruct-v0.1-GGUF\"\n\n5. Navigate to the chat view and open new chats and load these models\n\n6. Chat with the models and exercise them. Rename the chats. Delete chats. Open new chats. Switch models when in chats.\n\n7. Create a new localdocs collection from a directory of .txt or .pdf files on your hard drive\n\n8. Enable the new collection in chats (especially with Llama 3 Instruct) and exercise the localdocs feature\n\n9. Go to the settings view and explore each setting\n\n10. Remove collections in localdocs and re-add them. Rebuild collections\n\n11. Now shut down the app, go back and restore any previous settings directory or model directory you had from a previous install and re-test #1 through #11 :)\n\n12. Try to break the app\n\n### EXTRA CREDIT\n\n1. If you have a openai api key install GPT-4 model and chat with it\n\n2. If you have a nomic api key install the remote nomic embedding model for localdocs (see if you can discover how to do this)\n\n3. If you have a python script that targets openai API then enable server mode and try this\n\n4. Really try and break the app\n\nAll feedback is welcome\n"
  },
  {
    "path": "gpt4all-chat/qml/AddCollectionView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nRectangle {\n    id: addCollectionView\n\n    Theme {\n        id: theme\n    }\n\n    color: theme.viewBackground\n    signal localDocsViewRequested()\n\n    ColumnLayout {\n        id: mainArea\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.margins: 30\n        spacing: 20\n\n        RowLayout {\n            Layout.fillWidth: true\n            Layout.alignment: Qt.AlignTop\n            spacing: 50\n\n            MyButton {\n                id: backButton\n                Layout.alignment: Qt.AlignTop | Qt.AlignLeft\n                text: qsTr(\"\\u2190 Existing Collections\")\n\n                borderWidth: 0\n                backgroundColor: theme.lighterButtonBackground\n                backgroundColorHovered: theme.lighterButtonBackgroundHovered\n                backgroundRadius: 5\n                padding: 15\n                topPadding: 8\n                bottomPadding: 8\n                textColor: theme.lighterButtonForeground\n                fontPixelSize: theme.fontSizeLarge\n                fontPixelBold: true\n\n                onClicked: {\n                    localDocsViewRequested()\n                }\n            }\n        }\n\n        Text {\n            id: addDocBanner\n            Layout.alignment: Qt.AlignBottom | Qt.AlignHCenter\n            horizontalAlignment: Qt.AlignHCenter\n            text: qsTr(\"Add Document Collection\")\n            font.pixelSize: theme.fontSizeBanner\n            color: theme.titleTextColor\n        }\n\n        Text {\n            Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n            Layout.maximumWidth: addDocBanner.width\n            wrapMode: Text.WordWrap\n            horizontalAlignment: Text.AlignJustify\n            text: qsTr(\"Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.\")\n            font.pixelSize: theme.fontSizeLarger\n            color: theme.titleInfoTextColor\n        }\n\n        GridLayout {\n            id: root\n            Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n            rowSpacing: 50\n            columnSpacing: 20\n\n            property alias collection: collection.text\n            property alias folder_path: folderEdit.text\n\n            MyFolderDialog {\n                id: folderDialog\n            }\n\n            Label {\n                Layout.row: 2\n                Layout.column: 0\n                text: qsTr(\"Name\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarger\n                color: theme.settingsTitleTextColor\n            }\n\n            MyTextField {\n                id: collection\n                Layout.row: 2\n                Layout.column: 1\n                Layout.minimumWidth: 400\n                Layout.alignment: Qt.AlignRight\n                horizontalAlignment: Text.AlignJustify\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                placeholderText: qsTr(\"Collection name...\")\n                placeholderTextColor: theme.mutedTextColor\n                ToolTip.text: qsTr(\"Name of the collection to add (Required)\")\n                ToolTip.visible: hovered\n                Accessible.role: Accessible.EditableText\n                Accessible.name: collection.text\n                Accessible.description: ToolTip.text\n                function showError() {\n                    collection.placeholderTextColor = theme.textErrorColor\n                }\n                onTextChanged: {\n                    collection.placeholderTextColor = theme.mutedTextColor\n                }\n            }\n\n            Label {\n                Layout.row: 3\n                Layout.column: 0\n                text: qsTr(\"Folder\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarger\n                color: theme.settingsTitleTextColor\n            }\n\n            RowLayout {\n                Layout.row: 3\n                Layout.column: 1\n                Layout.minimumWidth: 400\n                Layout.maximumWidth: 400\n                Layout.alignment: Qt.AlignRight\n                spacing: 10\n                MyDirectoryField {\n                    id: folderEdit\n                    Layout.fillWidth: true\n                    text: root.folder_path\n                    placeholderText: qsTr(\"Folder path...\")\n                    font.pixelSize: theme.fontSizeLarge\n                    placeholderTextColor: theme.mutedTextColor\n                    ToolTip.text: qsTr(\"Folder path to documents (Required)\")\n                    ToolTip.visible: hovered\n                    function showError() {\n                        folderEdit.placeholderTextColor = theme.textErrorColor\n                    }\n                    onTextChanged: {\n                        folderEdit.placeholderTextColor = theme.mutedTextColor\n                    }\n                }\n\n                MySettingsButton {\n                    id: browseButton\n                    text: qsTr(\"Browse\")\n                    onClicked: {\n                        folderDialog.openFolderDialog(StandardPaths.writableLocation(StandardPaths.HomeLocation), function(selectedFolder) {\n                            root.folder_path = selectedFolder\n                        })\n                    }\n                }\n            }\n\n            MyButton {\n                Layout.row: 4\n                Layout.column: 1\n                Layout.alignment: Qt.AlignRight\n                text: qsTr(\"Create Collection\")\n                onClicked: {\n                    var isError = false;\n                    if (root.collection === \"\") {\n                        isError = true;\n                        collection.showError();\n                    }\n                    if (root.folder_path === \"\" || !folderEdit.isValid) {\n                        isError = true;\n                        folderEdit.showError();\n                    }\n                    if (isError)\n                        return;\n                    LocalDocs.addFolder(root.collection, root.folder_path)\n                    root.collection = \"\"\n                    root.folder_path = \"\"\n                    collection.clear()\n                    localDocsViewRequested()\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/AddGPT4AllModelView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\n\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nColumnLayout {\n    Layout.fillWidth: true\n    Layout.alignment: Qt.AlignTop\n    spacing: 5\n\n    Label {\n        Layout.topMargin: 0\n        Layout.bottomMargin: 25\n        Layout.rightMargin: 150 * theme.fontScale\n        Layout.alignment: Qt.AlignTop\n        Layout.fillWidth: true\n        verticalAlignment: Text.AlignTop\n        text: qsTr(\"These models have been specifically configured for use in GPT4All. The first few models on the \" +\n                   \"list are known to work the best, but you should only attempt to use models that will fit in your \" +\n                   \"available memory.\")\n        font.pixelSize: theme.fontSizeLarger\n        color: theme.textColor\n        wrapMode: Text.WordWrap\n    }\n\n    Label {\n        visible: !ModelList.gpt4AllDownloadableModels.count && !ModelList.asyncModelRequestOngoing\n        Layout.fillWidth: true\n        Layout.fillHeight: true\n        horizontalAlignment: Qt.AlignHCenter\n        verticalAlignment: Qt.AlignVCenter\n        text: qsTr(\"Network error: could not retrieve %1\").arg(\"http://gpt4all.io/models/models3.json\")\n        font.pixelSize: theme.fontSizeLarge\n        color: theme.mutedTextColor\n    }\n\n    MyBusyIndicator {\n        visible: !ModelList.gpt4AllDownloadableModels.count && ModelList.asyncModelRequestOngoing\n        running: ModelList.asyncModelRequestOngoing\n        Accessible.role: Accessible.Animation\n        Layout.alignment: Qt.AlignCenter\n        Accessible.name: qsTr(\"Busy indicator\")\n        Accessible.description: qsTr(\"Displayed when the models request is ongoing\")\n    }\n\n    RowLayout {\n        ButtonGroup {\n            id: buttonGroup\n            exclusive: true\n        }\n        MyButton {\n            text: qsTr(\"All\")\n            checked: true\n            borderWidth: 0\n            backgroundColor: checked ? theme.lightButtonBackground : \"transparent\"\n            backgroundColorHovered: theme.lighterButtonBackgroundHovered\n            backgroundRadius: 5\n            padding: 15\n            topPadding: 8\n            bottomPadding: 8\n            textColor: theme.lighterButtonForeground\n            fontPixelSize: theme.fontSizeLarge\n            fontPixelBold: true\n            checkable: true\n            ButtonGroup.group: buttonGroup\n            onClicked: {\n                ModelList.gpt4AllDownloadableModels.filter(\"\");\n            }\n\n        }\n        MyButton {\n            text: qsTr(\"Reasoning\")\n            borderWidth: 0\n            backgroundColor: checked ? theme.lightButtonBackground : \"transparent\"\n            backgroundColorHovered: theme.lighterButtonBackgroundHovered\n            backgroundRadius: 5\n            padding: 15\n            topPadding: 8\n            bottomPadding: 8\n            textColor: theme.lighterButtonForeground\n            fontPixelSize: theme.fontSizeLarge\n            fontPixelBold: true\n            checkable: true\n            ButtonGroup.group: buttonGroup\n            onClicked: {\n                ModelList.gpt4AllDownloadableModels.filter(\"#reasoning\");\n            }\n        }\n        Layout.bottomMargin: 10\n    }\n\n    ScrollView {\n        id: scrollView\n        ScrollBar.vertical.policy: ScrollBar.AsNeeded\n        Layout.fillWidth: true\n        Layout.fillHeight: true\n        clip: true\n\n        ListView {\n            id: modelListView\n            model: ModelList.gpt4AllDownloadableModels\n            boundsBehavior: Flickable.StopAtBounds\n            spacing: 30\n\n            delegate: Rectangle {\n                id: delegateItem\n                width: modelListView.width\n                height: childrenRect.height + 60\n                color: theme.conversationBackground\n                radius: 10\n                border.width: 1\n                border.color: theme.controlBorder\n\n                ColumnLayout {\n                    anchors.top: parent.top\n                    anchors.left: parent.left\n                    anchors.right: parent.right\n                    anchors.margins: 30\n\n                    Text {\n                        Layout.fillWidth: true\n                        Layout.alignment: Qt.AlignLeft\n                        text: name\n                        elide: Text.ElideRight\n                        color: theme.titleTextColor\n                        font.pixelSize: theme.fontSizeLargest\n                        font.bold: true\n                        Accessible.role: Accessible.Paragraph\n                        Accessible.name: qsTr(\"Model file\")\n                        Accessible.description: qsTr(\"Model file to be downloaded\")\n                    }\n\n\n                    Rectangle {\n                        Layout.fillWidth: true\n                        height: 1\n                        color: theme.dividerColor\n                    }\n\n                    RowLayout {\n                        Layout.topMargin: 10\n                        Layout.fillWidth: true\n                        Text {\n                            id: descriptionText\n                            text: description\n                            font.pixelSize: theme.fontSizeLarge\n                            Layout.fillWidth: true\n                            wrapMode: Text.WordWrap\n                            textFormat: Text.StyledText\n                            color: theme.textColor\n                            linkColor: theme.textColor\n                            Accessible.role: Accessible.Paragraph\n                            Accessible.name: qsTr(\"Description\")\n                            Accessible.description: qsTr(\"File description\")\n                            onLinkActivated: function(link) { Qt.openUrlExternally(link); }\n                            MouseArea {\n                                anchors.fill: parent\n                                acceptedButtons: Qt.NoButton // pass clicks to parent\n                                cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor\n                            }\n                        }\n\n                        // FIXME Need to overhaul design here which must take into account\n                        // features not present in current figma including:\n                        // * Ability to cancel a current download\n                        // * Ability to resume a download\n                        // * The presentation of an error if encountered\n                        // * Whether to show already installed models\n                        // * Install of remote models with API keys\n                        // * The presentation of the progress bar\n                        Rectangle {\n                            id: actionBox\n                            width: childrenRect.width + 20\n                            color: \"transparent\"\n                            border.width: 1\n                            border.color: theme.dividerColor\n                            radius: 10\n                            Layout.rightMargin: 20\n                            Layout.bottomMargin: 20\n                            Layout.minimumHeight: childrenRect.height + 20\n                            Layout.alignment: Qt.AlignRight | Qt.AlignTop\n\n                            ColumnLayout {\n                                spacing: 0\n                                MySettingsButton {\n                                    id: downloadButton\n                                    text: isDownloading ? qsTr(\"Cancel\") : isIncomplete ? qsTr(\"Resume\") : qsTr(\"Download\")\n                                    font.pixelSize: theme.fontSizeLarge\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    visible: !installed && !calcHash && downloadError === \"\"\n                                    Accessible.description: qsTr(\"Stop/restart/start the download\")\n                                    onClicked: {\n                                        if (!isDownloading) {\n                                            Download.downloadModel(filename);\n                                        } else {\n                                            Download.cancelDownload(filename);\n                                        }\n                                    }\n                                }\n\n                                MySettingsDestructiveButton {\n                                    id: removeButton\n                                    text: qsTr(\"Remove\")\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    visible: !isDownloading && (installed || isIncomplete)\n                                    Accessible.description: qsTr(\"Remove model from filesystem\")\n                                    onClicked: {\n                                        Download.removeModel(filename);\n                                    }\n                                }\n\n                                ColumnLayout {\n                                    spacing: 0\n                                    Label {\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        visible: downloadError !== \"\"\n                                        textFormat: Text.StyledText\n                                        text: qsTr(\"<strong><font size=\\\"1\\\"><a href=\\\"#error\\\">Error</a></strong></font>\")\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeLarge\n                                        linkColor: theme.textErrorColor\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Describes an error that occurred when downloading\")\n                                        onLinkActivated: {\n                                            downloadingErrorPopup.text = downloadError;\n                                            downloadingErrorPopup.open();\n                                        }\n                                    }\n\n                                    Label {\n                                        visible: LLM.systemTotalRAMInGB() < ramrequired\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.maximumWidth: 300\n                                        textFormat: Text.StyledText\n                                        text: qsTr(\"<strong><font size=\\\"2\\\">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font>\").arg(ramrequired).arg(LLM.systemTotalRAMInGBString())\n                                        color: theme.textErrorColor\n                                        font.pixelSize: theme.fontSizeLarge\n                                        wrapMode: Text.WordWrap\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Error for incompatible hardware\")\n                                        onLinkActivated: {\n                                            downloadingErrorPopup.text = downloadError;\n                                            downloadingErrorPopup.open();\n                                        }\n                                    }\n                                }\n\n                                ColumnLayout {\n                                    visible: isDownloading && !calcHash\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    spacing: 20\n\n                                    ProgressBar {\n                                        id: itemProgressBar\n                                        Layout.fillWidth: true\n                                        width: 200\n                                        value: bytesReceived / bytesTotal\n                                        background: Rectangle {\n                                            implicitHeight: 45\n                                            color: theme.progressBackground\n                                            radius: 3\n                                        }\n                                        contentItem: Item {\n                                            implicitHeight: 40\n\n                                            Rectangle {\n                                                width: itemProgressBar.visualPosition * parent.width\n                                                height: parent.height\n                                                radius: 2\n                                                color: theme.progressForeground\n                                            }\n                                        }\n                                        Accessible.role: Accessible.ProgressBar\n                                        Accessible.name: qsTr(\"Download progressBar\")\n                                        Accessible.description: qsTr(\"Shows the progress made in the download\")\n                                    }\n\n                                    Label {\n                                        id: speedLabel\n                                        color: theme.textColor\n                                        Layout.alignment: Qt.AlignRight\n                                        text: speed\n                                        font.pixelSize: theme.fontSizeLarge\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: qsTr(\"Download speed\")\n                                        Accessible.description: qsTr(\"Download speed in bytes/kilobytes/megabytes per second\")\n                                    }\n                                }\n\n                                RowLayout {\n                                    visible: calcHash\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.maximumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    clip: true\n\n                                    Label {\n                                        id: calcHashLabel\n                                        color: theme.textColor\n                                        text: qsTr(\"Calculating...\")\n                                        font.pixelSize: theme.fontSizeLarge\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                    }\n\n                                    MyBusyIndicator {\n                                        id: busyCalcHash\n                                        running: calcHash\n                                        Accessible.role: Accessible.Animation\n                                        Accessible.name: qsTr(\"Busy indicator\")\n                                        Accessible.description: qsTr(\"Displayed when the file hash is being calculated\")\n                                    }\n                                }\n                            }\n                        }\n                    }\n\n                    Item  {\n                        Layout.minimumWidth: childrenRect.width\n                        Layout.minimumHeight: childrenRect.height\n                        Layout.bottomMargin: 10\n                        RowLayout {\n                            id: paramRow\n                            anchors.centerIn: parent\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"File size\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: filesize\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"RAM required\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: ramrequired >= 0 ? qsTr(\"%1 GB\").arg(ramrequired) : qsTr(\"?\")\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"Parameters\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: parameters !== \"\" ? parameters : qsTr(\"?\")\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"Quant\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: quant\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"Type\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: type\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                        }\n\n                        Rectangle {\n                            color: \"transparent\"\n                            anchors.fill: paramRow\n                            border.color: theme.dividerColor\n                            border.width: 1\n                            radius: 10\n                        }\n                    }\n\n                    Rectangle {\n                        Layout.fillWidth: true\n                        height: 1\n                        color: theme.dividerColor\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/AddHFModelView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\n\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nColumnLayout {\n    Layout.fillWidth: true\n    Layout.fillHeight: true\n    Layout.alignment: Qt.AlignTop\n    spacing: 5\n\n    Label {\n        Layout.topMargin: 0\n        Layout.bottomMargin: 25\n        Layout.rightMargin: 150 * theme.fontScale\n        Layout.alignment: Qt.AlignTop\n        Layout.fillWidth: true\n        verticalAlignment: Text.AlignTop\n        text: qsTr(\"Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these \" +\n                   \"will work. Many will require additional configuration before they can be used.\")\n        font.pixelSize: theme.fontSizeLarger\n        color: theme.textColor\n        wrapMode: Text.WordWrap\n    }\n\n    RowLayout {\n        Layout.fillWidth: true\n        Layout.fillHeight: true\n        Layout.alignment: Qt.AlignCenter\n        Layout.margins: 0\n        spacing: 10\n        MyTextField {\n            id: discoverField\n            property string textBeingSearched: \"\"\n            readOnly: ModelList.discoverInProgress\n            Layout.alignment: Qt.AlignCenter\n            Layout.fillWidth: true\n            font.pixelSize: theme.fontSizeLarger\n            placeholderText: qsTr(\"Discover and download models by keyword search...\")\n            Accessible.role: Accessible.EditableText\n            Accessible.name: placeholderText\n            Accessible.description: qsTr(\"Text field for discovering and filtering downloadable models\")\n            Connections {\n                target: ModelList\n                function onDiscoverInProgressChanged() {\n                    if (ModelList.discoverInProgress) {\n                        discoverField.textBeingSearched = discoverField.text;\n                        discoverField.text = qsTr(\"Searching \\u00B7 %1\").arg(discoverField.textBeingSearched);\n                    } else {\n                        discoverField.text = discoverField.textBeingSearched;\n                        discoverField.textBeingSearched = \"\";\n                    }\n                }\n            }\n            background: ProgressBar {\n                id: discoverProgressBar\n                indeterminate: ModelList.discoverInProgress && ModelList.discoverProgress === 0.0\n                value: ModelList.discoverProgress\n                background: Rectangle {\n                    color: theme.controlBackground\n                    border.color: theme.controlBorder\n                    radius: 10\n                }\n                contentItem: Item {\n                    Rectangle {\n                        visible: ModelList.discoverInProgress\n                        anchors.bottom: parent.bottom\n                        width: discoverProgressBar.visualPosition * parent.width\n                        height: 10\n                        radius: 2\n                        color: theme.progressForeground\n                    }\n                }\n            }\n\n            Keys.onReturnPressed: (event)=> {\n                                      if (event.modifiers & Qt.ControlModifier || event.modifiers & Qt.ShiftModifier)\n                                      event.accepted = false;\n                                      else {\n                                          editingFinished();\n                                          sendDiscovery()\n                                      }\n                                  }\n            function sendDiscovery() {\n                ModelList.huggingFaceDownloadableModels.discoverAndFilter(discoverField.text);\n            }\n            RowLayout {\n                spacing: 0\n                anchors.right: discoverField.right\n                anchors.verticalCenter: discoverField.verticalCenter\n                anchors.rightMargin: 15\n                visible: !ModelList.discoverInProgress\n                MyMiniButton {\n                    id: clearDiscoverButton\n                    backgroundColor: theme.textColor\n                    backgroundColorHovered: theme.iconBackgroundDark\n                    visible: discoverField.text !== \"\"\n                    source: \"qrc:/gpt4all/icons/close.svg\"\n                    onClicked: {\n                        discoverField.text = \"\"\n                        discoverField.sendDiscovery() // should clear results\n                    }\n                }\n                MyMiniButton {\n                    backgroundColor: theme.textColor\n                    backgroundColorHovered: theme.iconBackgroundDark\n                    source: \"qrc:/gpt4all/icons/settings.svg\"\n                    onClicked: {\n                        discoveryTools.visible = !discoveryTools.visible\n                    }\n                }\n                MyMiniButton {\n                    id: sendButton\n                    enabled: !ModelList.discoverInProgress\n                    backgroundColor: theme.textColor\n                    backgroundColorHovered: theme.iconBackgroundDark\n                    source: \"qrc:/gpt4all/icons/send_message.svg\"\n                    Accessible.name: qsTr(\"Initiate model discovery and filtering\")\n                    Accessible.description: qsTr(\"Triggers discovery and filtering of models\")\n                    onClicked: {\n                        discoverField.sendDiscovery()\n                    }\n                }\n            }\n        }\n    }\n\n    RowLayout {\n        id: discoveryTools\n        Layout.fillWidth: true\n        Layout.alignment: Qt.AlignCenter\n        Layout.margins: 0\n        spacing: 20\n        visible: false\n        MyComboBox {\n            id: comboSort\n            model: ListModel {\n                ListElement { name: qsTr(\"Default\") }\n                ListElement { name: qsTr(\"Likes\") }\n                ListElement { name: qsTr(\"Downloads\") }\n                ListElement { name: qsTr(\"Recent\") }\n            }\n            currentIndex: ModelList.discoverSort\n            contentItem: Text {\n                anchors.horizontalCenter: parent.horizontalCenter\n                rightPadding: 30\n                color: theme.textColor\n                text: {\n                    return qsTr(\"Sort by: %1\").arg(comboSort.displayText)\n                }\n                font.pixelSize: theme.fontSizeLarger\n                verticalAlignment: Text.AlignVCenter\n                horizontalAlignment: Text.AlignHCenter\n                elide: Text.ElideRight\n            }\n            onActivated: function (index) {\n                ModelList.discoverSort = index;\n            }\n        }\n        MyComboBox {\n            id: comboSortDirection\n            model: ListModel {\n                ListElement { name: qsTr(\"Asc\") }\n                ListElement { name: qsTr(\"Desc\") }\n            }\n            currentIndex: {\n                if (ModelList.discoverSortDirection === 1)\n                    return 0\n                else\n                    return 1;\n            }\n            contentItem: Text {\n                anchors.horizontalCenter: parent.horizontalCenter\n                rightPadding: 30\n                color: theme.textColor\n                text: {\n                    return qsTr(\"Sort dir: %1\").arg(comboSortDirection.displayText)\n                }\n                font.pixelSize: theme.fontSizeLarger\n                verticalAlignment: Text.AlignVCenter\n                horizontalAlignment: Text.AlignHCenter\n                elide: Text.ElideRight\n            }\n            onActivated: function (index) {\n                if (index === 0)\n                    ModelList.discoverSortDirection = 1;\n                else\n                    ModelList.discoverSortDirection = -1;\n            }\n        }\n        MyComboBox {\n            id: comboLimit\n            model: ListModel {\n                ListElement { name: \"5\" }\n                ListElement { name: \"10\" }\n                ListElement { name: \"20\" }\n                ListElement { name: \"50\" }\n                ListElement { name: \"100\" }\n                ListElement { name: qsTr(\"None\") }\n            }\n\n            currentIndex: {\n                if (ModelList.discoverLimit === 5)\n                    return 0;\n                else if (ModelList.discoverLimit === 10)\n                    return 1;\n                else if (ModelList.discoverLimit === 20)\n                    return 2;\n                else if (ModelList.discoverLimit === 50)\n                    return 3;\n                else if (ModelList.discoverLimit === 100)\n                    return 4;\n                else if (ModelList.discoverLimit === -1)\n                    return 5;\n            }\n            contentItem: Text {\n                anchors.horizontalCenter: parent.horizontalCenter\n                rightPadding: 30\n                color: theme.textColor\n                text: {\n                    return qsTr(\"Limit: %1\").arg(comboLimit.displayText)\n                }\n                font.pixelSize: theme.fontSizeLarger\n                verticalAlignment: Text.AlignVCenter\n                horizontalAlignment: Text.AlignHCenter\n                elide: Text.ElideRight\n            }\n            onActivated: function (index) {\n                switch (index) {\n                case 0:\n                    ModelList.discoverLimit = 5; break;\n                case 1:\n                    ModelList.discoverLimit = 10; break;\n                case 2:\n                    ModelList.discoverLimit = 20; break;\n                case 3:\n                    ModelList.discoverLimit = 50; break;\n                case 4:\n                    ModelList.discoverLimit = 100; break;\n                case 5:\n                    ModelList.discoverLimit = -1; break;\n                }\n            }\n        }\n    }\n\n    ScrollView {\n        id: scrollView\n        ScrollBar.vertical.policy: ScrollBar.AsNeeded\n        Layout.fillWidth: true\n        Layout.fillHeight: true\n        clip: true\n\n        ListView {\n            id: modelListView\n            model: ModelList.huggingFaceDownloadableModels\n            boundsBehavior: Flickable.StopAtBounds\n            spacing: 30\n\n            delegate: Rectangle {\n                id: delegateItem\n                width: modelListView.width\n                height: childrenRect.height + 60\n                color: theme.conversationBackground\n                radius: 10\n                border.width: 1\n                border.color: theme.controlBorder\n\n                ColumnLayout {\n                    anchors.top: parent.top\n                    anchors.left: parent.left\n                    anchors.right: parent.right\n                    anchors.margins: 30\n\n                    Text {\n                        Layout.fillWidth: true\n                        Layout.alignment: Qt.AlignLeft\n                        text: name\n                        elide: Text.ElideRight\n                        color: theme.titleTextColor\n                        font.pixelSize: theme.fontSizeLargest\n                        font.bold: true\n                        Accessible.role: Accessible.Paragraph\n                        Accessible.name: qsTr(\"Model file\")\n                        Accessible.description: qsTr(\"Model file to be downloaded\")\n                    }\n\n\n                    Rectangle {\n                        Layout.fillWidth: true\n                        height: 1\n                        color: theme.dividerColor\n                    }\n\n                    RowLayout {\n                        Layout.topMargin: 10\n                        Layout.fillWidth: true\n                        Text {\n                            id: descriptionText\n                            text: description\n                            font.pixelSize: theme.fontSizeLarge\n                            Layout.fillWidth: true\n                            wrapMode: Text.WordWrap\n                            textFormat: Text.StyledText\n                            color: theme.textColor\n                            linkColor: theme.textColor\n                            Accessible.role: Accessible.Paragraph\n                            Accessible.name: qsTr(\"Description\")\n                            Accessible.description: qsTr(\"File description\")\n                            onLinkActivated: function(link) { Qt.openUrlExternally(link); }\n                            MouseArea {\n                                anchors.fill: parent\n                                acceptedButtons: Qt.NoButton // pass clicks to parent\n                                cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor\n                            }\n                        }\n\n                        // FIXME Need to overhaul design here which must take into account\n                        // features not present in current figma including:\n                        // * Ability to cancel a current download\n                        // * Ability to resume a download\n                        // * The presentation of an error if encountered\n                        // * Whether to show already installed models\n                        // * Install of remote models with API keys\n                        // * The presentation of the progress bar\n                        Rectangle {\n                            id: actionBox\n                            width: childrenRect.width + 20\n                            color: \"transparent\"\n                            border.width: 1\n                            border.color: theme.dividerColor\n                            radius: 10\n                            Layout.rightMargin: 20\n                            Layout.bottomMargin: 20\n                            Layout.minimumHeight: childrenRect.height + 20\n                            Layout.alignment: Qt.AlignRight | Qt.AlignTop\n\n                            ColumnLayout {\n                                spacing: 0\n                                MySettingsButton {\n                                    id: downloadButton\n                                    text: isDownloading ? qsTr(\"Cancel\") : isIncomplete ? qsTr(\"Resume\") : qsTr(\"Download\")\n                                    font.pixelSize: theme.fontSizeLarge\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    visible: !isOnline && !installed && !calcHash && downloadError === \"\"\n                                    Accessible.description: qsTr(\"Stop/restart/start the download\")\n                                    onClicked: {\n                                        if (!isDownloading) {\n                                            Download.downloadModel(filename);\n                                        } else {\n                                            Download.cancelDownload(filename);\n                                        }\n                                    }\n                                }\n\n                                MySettingsDestructiveButton {\n                                    id: removeButton\n                                    text: qsTr(\"Remove\")\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    visible: !isDownloading && (installed || isIncomplete)\n                                    Accessible.description: qsTr(\"Remove model from filesystem\")\n                                    onClicked: {\n                                        Download.removeModel(filename);\n                                    }\n                                }\n\n                                MySettingsButton {\n                                    id: installButton\n                                    visible: !installed && isOnline\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    text: qsTr(\"Install\")\n                                    font.pixelSize: theme.fontSizeLarge\n                                    onClicked: {\n                                        var apiKeyText = apiKey.text.trim(),\n                                        baseUrlText = baseUrl.text.trim(),\n                                        modelNameText = modelName.text.trim();\n\n                                        var apiKeyOk = apiKeyText !== \"\",\n                                        baseUrlOk = !isCompatibleApi || baseUrlText !== \"\",\n                                        modelNameOk = !isCompatibleApi || modelNameText !== \"\";\n\n                                        if (!apiKeyOk)\n                                            apiKey.showError();\n                                        if (!baseUrlOk)\n                                            baseUrl.showError();\n                                        if (!modelNameOk)\n                                            modelName.showError();\n\n                                        if (!apiKeyOk || !baseUrlOk || !modelNameOk)\n                                            return;\n\n                                        if (!isCompatibleApi)\n                                            Download.installModel(\n                                                        filename,\n                                                        apiKeyText,\n                                                        );\n                                        else\n                                            Download.installCompatibleModel(\n                                                        modelNameText,\n                                                        apiKeyText,\n                                                        baseUrlText,\n                                                        );\n                                    }\n                                    Accessible.role: Accessible.Button\n                                    Accessible.name: qsTr(\"Install\")\n                                    Accessible.description: qsTr(\"Install online model\")\n                                }\n\n                                ColumnLayout {\n                                    spacing: 0\n                                    Label {\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        visible: downloadError !== \"\"\n                                        textFormat: Text.StyledText\n                                        text: qsTr(\"<strong><font size=\\\"1\\\"><a href=\\\"#error\\\">Error</a></strong></font>\")\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeLarge\n                                        linkColor: theme.textErrorColor\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Describes an error that occurred when downloading\")\n                                        onLinkActivated: {\n                                            downloadingErrorPopup.text = downloadError;\n                                            downloadingErrorPopup.open();\n                                        }\n                                    }\n\n                                    Label {\n                                        visible: LLM.systemTotalRAMInGB() < ramrequired\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.maximumWidth: 300\n                                        textFormat: Text.StyledText\n                                        text: qsTr(\"<strong><font size=\\\"2\\\">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font>\").arg(ramrequired).arg(LLM.systemTotalRAMInGBString())\n                                        color: theme.textErrorColor\n                                        font.pixelSize: theme.fontSizeLarge\n                                        wrapMode: Text.WordWrap\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Error for incompatible hardware\")\n                                        onLinkActivated: {\n                                            downloadingErrorPopup.text = downloadError;\n                                            downloadingErrorPopup.open();\n                                        }\n                                    }\n                                }\n\n                                ColumnLayout {\n                                    visible: isDownloading && !calcHash\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    spacing: 20\n\n                                    ProgressBar {\n                                        id: itemProgressBar\n                                        Layout.fillWidth: true\n                                        width: 200\n                                        value: bytesReceived / bytesTotal\n                                        background: Rectangle {\n                                            implicitHeight: 45\n                                            color: theme.progressBackground\n                                            radius: 3\n                                        }\n                                        contentItem: Item {\n                                            implicitHeight: 40\n\n                                            Rectangle {\n                                                width: itemProgressBar.visualPosition * parent.width\n                                                height: parent.height\n                                                radius: 2\n                                                color: theme.progressForeground\n                                            }\n                                        }\n                                        Accessible.role: Accessible.ProgressBar\n                                        Accessible.name: qsTr(\"Download progressBar\")\n                                        Accessible.description: qsTr(\"Shows the progress made in the download\")\n                                    }\n\n                                    Label {\n                                        id: speedLabel\n                                        color: theme.textColor\n                                        Layout.alignment: Qt.AlignRight\n                                        text: speed\n                                        font.pixelSize: theme.fontSizeLarge\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: qsTr(\"Download speed\")\n                                        Accessible.description: qsTr(\"Download speed in bytes/kilobytes/megabytes per second\")\n                                    }\n                                }\n\n                                RowLayout {\n                                    visible: calcHash\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.maximumWidth: 200\n                                    Layout.fillWidth: true\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    clip: true\n\n                                    Label {\n                                        id: calcHashLabel\n                                        color: theme.textColor\n                                        text: qsTr(\"Calculating...\")\n                                        font.pixelSize: theme.fontSizeLarge\n                                        Accessible.role: Accessible.Paragraph\n                                        Accessible.name: text\n                                        Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                    }\n\n                                    MyBusyIndicator {\n                                        id: busyCalcHash\n                                        running: calcHash\n                                        Accessible.role: Accessible.Animation\n                                        Accessible.name: qsTr(\"Busy indicator\")\n                                        Accessible.description: qsTr(\"Displayed when the file hash is being calculated\")\n                                    }\n                                }\n\n                                MyTextField {\n                                    id: apiKey\n                                    visible: !installed && isOnline\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    wrapMode: Text.WrapAnywhere\n                                    function showError() {\n                                        messageToast.show(qsTr(\"ERROR: $API_KEY is empty.\"));\n                                        apiKey.placeholderTextColor = theme.textErrorColor;\n                                    }\n                                    onTextChanged: {\n                                        apiKey.placeholderTextColor = theme.mutedTextColor;\n                                    }\n                                    placeholderText: qsTr(\"enter $API_KEY\")\n                                    Accessible.role: Accessible.EditableText\n                                    Accessible.name: placeholderText\n                                    Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                }\n\n                                MyTextField {\n                                    id: baseUrl\n                                    visible: !installed && isOnline && isCompatibleApi\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    wrapMode: Text.WrapAnywhere\n                                    function showError() {\n                                        messageToast.show(qsTr(\"ERROR: $BASE_URL is empty.\"));\n                                        baseUrl.placeholderTextColor = theme.textErrorColor;\n                                    }\n                                    onTextChanged: {\n                                        baseUrl.placeholderTextColor = theme.mutedTextColor;\n                                    }\n                                    placeholderText: qsTr(\"enter $BASE_URL\")\n                                    Accessible.role: Accessible.EditableText\n                                    Accessible.name: placeholderText\n                                    Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                }\n\n                                MyTextField {\n                                    id: modelName\n                                    visible: !installed && isOnline && isCompatibleApi\n                                    Layout.topMargin: 20\n                                    Layout.leftMargin: 20\n                                    Layout.minimumWidth: 200\n                                    Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                    wrapMode: Text.WrapAnywhere\n                                    function showError() {\n                                        messageToast.show(qsTr(\"ERROR: $MODEL_NAME is empty.\"))\n                                        modelName.placeholderTextColor = theme.textErrorColor;\n                                    }\n                                    onTextChanged: {\n                                        modelName.placeholderTextColor = theme.mutedTextColor;\n                                    }\n                                    placeholderText: qsTr(\"enter $MODEL_NAME\")\n                                    Accessible.role: Accessible.EditableText\n                                    Accessible.name: placeholderText\n                                    Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                }\n                            }\n                        }\n                    }\n\n                    Item  {\n                        Layout.minimumWidth: childrenRect.width\n                        Layout.minimumHeight: childrenRect.height\n                        Layout.bottomMargin: 10\n                        RowLayout {\n                            id: paramRow\n                            anchors.centerIn: parent\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"File size\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: filesize\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"Quant\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: quant\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                            Rectangle {\n                                width: 1\n                                Layout.fillHeight: true\n                                color: theme.dividerColor\n                            }\n                            ColumnLayout {\n                                Layout.topMargin: 10\n                                Layout.bottomMargin: 10\n                                Layout.leftMargin: 20\n                                Layout.rightMargin: 20\n                                Text {\n                                    text: qsTr(\"Type\")\n                                    font.pixelSize: theme.fontSizeSmall\n                                    color: theme.mutedDarkTextColor\n                                }\n                                Text {\n                                    text: type\n                                    color: theme.textColor\n                                    font.pixelSize: theme.fontSizeSmall\n                                    font.bold: true\n                                }\n                            }\n                        }\n\n                        Rectangle {\n                            color: \"transparent\"\n                            anchors.fill: paramRow\n                            border.color: theme.dividerColor\n                            border.width: 1\n                            radius: 10\n                        }\n                    }\n\n                    Rectangle {\n                        Layout.fillWidth: true\n                        height: 1\n                        color: theme.dividerColor\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/AddModelView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nRectangle {\n    id: addModelView\n\n    Theme {\n        id: theme\n    }\n\n    color: theme.viewBackground\n    signal modelsViewRequested()\n\n    ToastManager {\n        id: messageToast\n    }\n\n    PopupDialog {\n        id: downloadingErrorPopup\n        anchors.centerIn: parent\n        shouldTimeOut: false\n    }\n\n    ColumnLayout {\n        id: mainArea\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.margins: 30\n        spacing: 10\n\n        ColumnLayout {\n            Layout.fillWidth: true\n            Layout.alignment: Qt.AlignTop\n            spacing: 10\n\n            MyButton {\n                id: backButton\n                Layout.alignment: Qt.AlignTop | Qt.AlignLeft\n                text: qsTr(\"\\u2190 Existing Models\")\n\n                borderWidth: 0\n                backgroundColor: theme.lighterButtonBackground\n                backgroundColorHovered: theme.lighterButtonBackgroundHovered\n                backgroundRadius: 5\n                padding: 15\n                topPadding: 8\n                bottomPadding: 8\n                textColor: theme.lighterButtonForeground\n                fontPixelSize: theme.fontSizeLarge\n                fontPixelBold: true\n\n                onClicked: {\n                    modelsViewRequested()\n                }\n            }\n\n            Text {\n                id: welcome\n                text: qsTr(\"Explore Models\")\n                font.pixelSize: theme.fontSizeBanner\n                color: theme.titleTextColor\n            }\n        }\n\n        RowLayout {\n            id: bar\n            implicitWidth: 600\n            spacing: 10\n            MyTabButton {\n                text: qsTr(\"GPT4All\")\n                isSelected: gpt4AllModelView.isShown()\n                onPressed: {\n                    gpt4AllModelView.show();\n                }\n            }\n            MyTabButton {\n                text: qsTr(\"Remote Providers\")\n                isSelected: remoteModelView.isShown()\n                onPressed: {\n                    remoteModelView.show();\n                }\n            }\n            MyTabButton {\n                text: qsTr(\"HuggingFace\")\n                isSelected: huggingfaceModelView.isShown()\n                onPressed: {\n                    huggingfaceModelView.show();\n                }\n            }\n        }\n\n        StackLayout {\n            id: stackLayout\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            AddGPT4AllModelView {\n                id: gpt4AllModelView\n                Layout.fillWidth: true\n                Layout.fillHeight: true\n\n                function show() {\n                    stackLayout.currentIndex = 0;\n                }\n                function isShown() {\n                    return stackLayout.currentIndex === 0;\n                }\n            }\n\n            AddRemoteModelView {\n                id: remoteModelView\n                Layout.fillWidth: true\n                Layout.fillHeight: true\n\n                function show() {\n                    stackLayout.currentIndex = 1;\n                }\n                function isShown() {\n                    return stackLayout.currentIndex === 1;\n                }\n            }\n\n            AddHFModelView {\n                id: huggingfaceModelView\n                Layout.fillWidth: true\n                Layout.fillHeight: true\n                // FIXME: This generates a warning and should not be used inside a layout, but without\n                // it the text field inside this qml does not display at full width so it looks like\n                // a bug in stacklayout\n                anchors.fill: parent\n\n                function show() {\n                    stackLayout.currentIndex = 2;\n                }\n                function isShown() {\n                    return stackLayout.currentIndex === 2;\n                }\n            }\n        }\n    }\n\n    Connections {\n        target: Download\n        function onToastMessage(message) {\n            messageToast.show(message);\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/AddRemoteModelView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\n\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nColumnLayout {\n    Layout.fillWidth: true\n    Layout.alignment: Qt.AlignTop\n    spacing: 5\n\n    Label {\n        Layout.topMargin: 0\n        Layout.bottomMargin: 25\n        Layout.rightMargin: 150 * theme.fontScale\n        Layout.alignment: Qt.AlignTop\n        Layout.fillWidth: true\n        verticalAlignment: Text.AlignTop\n        text: qsTr(\"Various remote model providers that use network resources for inference.\")\n        font.pixelSize: theme.fontSizeLarger\n        color: theme.textColor\n        wrapMode: Text.WordWrap\n    }\n\n    ScrollView {\n        id: scrollView\n        ScrollBar.vertical.policy: ScrollBar.AsNeeded\n        Layout.fillWidth: true\n        Layout.fillHeight: true\n        contentWidth: availableWidth\n        clip: true\n        Flow {\n            anchors.left: parent.left\n            anchors.right: parent.right\n            spacing: 20\n            bottomPadding: 20\n            property int childWidth: 330 * theme.fontScale\n            property int childHeight: 400 + 166 * theme.fontScale\n            RemoteModelCard {\n                width: parent.childWidth\n                height: parent.childHeight\n                providerBaseUrl: \"https://api.groq.com/openai/v1/\"\n                providerName: qsTr(\"Groq\")\n                providerImage: \"qrc:/gpt4all/icons/groq.svg\"\n                providerDesc: qsTr('Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.<br><br>Get your API key: <a href=\"https://console.groq.com/keys\">https://groq.com/</a>')\n                modelWhitelist: [\n                    // last updated 2025-02-24\n                    \"deepseek-r1-distill-llama-70b\",\n                    \"deepseek-r1-distill-qwen-32b\",\n                    \"gemma2-9b-it\",\n                    \"llama-3.1-8b-instant\",\n                    \"llama-3.2-1b-preview\",\n                    \"llama-3.2-3b-preview\",\n                    \"llama-3.3-70b-specdec\",\n                    \"llama-3.3-70b-versatile\",\n                    \"llama3-70b-8192\",\n                    \"llama3-8b-8192\",\n                    \"mixtral-8x7b-32768\",\n                    \"qwen-2.5-32b\",\n                    \"qwen-2.5-coder-32b\",\n                ]\n            }\n            RemoteModelCard {\n                width: parent.childWidth\n                height: parent.childHeight\n                providerBaseUrl: \"https://api.openai.com/v1/\"\n                providerName: qsTr(\"OpenAI\")\n                providerImage: \"qrc:/gpt4all/icons/openai.svg\"\n                providerDesc: qsTr('OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.<br><br>Get your API key: <a href=\"https://platform.openai.com/signup\">https://openai.com/</a>')\n                modelWhitelist: [\n                    // last updated 2025-02-24\n                    \"gpt-3.5-turbo\",\n                    \"gpt-3.5-turbo-16k\",\n                    \"gpt-4\",\n                    \"gpt-4-32k\",\n                    \"gpt-4-turbo\",\n                    \"gpt-4o\",\n                ]\n            }\n            RemoteModelCard {\n                width: parent.childWidth\n                height: parent.childHeight\n                providerBaseUrl: \"https://api.mistral.ai/v1/\"\n                providerName: qsTr(\"Mistral\")\n                providerImage: \"qrc:/gpt4all/icons/mistral.svg\"\n                providerDesc: qsTr('Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.<br><br>Get your API key: <a href=\"https://mistral.ai/\">https://mistral.ai/</a>')\n                modelWhitelist: [\n                    // last updated 2025-02-24\n                    \"codestral-2405\",\n                    \"codestral-2411-rc5\",\n                    \"codestral-2412\",\n                    \"codestral-2501\",\n                    \"codestral-latest\",\n                    \"codestral-mamba-2407\",\n                    \"codestral-mamba-latest\",\n                    \"ministral-3b-2410\",\n                    \"ministral-3b-latest\",\n                    \"ministral-8b-2410\",\n                    \"ministral-8b-latest\",\n                    \"mistral-large-2402\",\n                    \"mistral-large-2407\",\n                    \"mistral-large-2411\",\n                    \"mistral-large-latest\",\n                    \"mistral-medium-2312\",\n                    \"mistral-medium-latest\",\n                    \"mistral-saba-2502\",\n                    \"mistral-saba-latest\",\n                    \"mistral-small-2312\",\n                    \"mistral-small-2402\",\n                    \"mistral-small-2409\",\n                    \"mistral-small-2501\",\n                    \"mistral-small-latest\",\n                    \"mistral-tiny-2312\",\n                    \"mistral-tiny-2407\",\n                    \"mistral-tiny-latest\",\n                    \"open-codestral-mamba\",\n                    \"open-mistral-7b\",\n                    \"open-mistral-nemo\",\n                    \"open-mistral-nemo-2407\",\n                    \"open-mixtral-8x22b\",\n                    \"open-mixtral-8x22b-2404\",\n                    \"open-mixtral-8x7b\",\n                ]\n            }\n            RemoteModelCard {\n                width: parent.childWidth\n                height: parent.childHeight\n                providerIsCustom: true\n                providerName: qsTr(\"Custom\")\n                providerImage: \"qrc:/gpt4all/icons/antenna_3.svg\"\n                providerDesc: qsTr(\"The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ApplicationSettings.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport modellist\nimport mysettings\nimport network\nimport llm\n\nMySettingsTab {\n    onRestoreDefaults: {\n        MySettings.restoreApplicationDefaults();\n    }\n    title: qsTr(\"Application\")\n\n    NetworkDialog {\n        id: networkDialog\n        anchors.centerIn: parent\n        width: Math.min(1024, window.width - (window.width * .2))\n        height: Math.min(600, window.height - (window.height * .2))\n        Item {\n            Accessible.role: Accessible.Dialog\n            Accessible.name: qsTr(\"Network dialog\")\n            Accessible.description: qsTr(\"opt-in to share feedback/conversations\")\n        }\n    }\n\n    Dialog {\n        id: checkForUpdatesError\n        anchors.centerIn: parent\n        modal: false\n        padding: 20\n        width: 40 + 400 * theme.fontScale\n        Text {\n            anchors.fill: parent\n            horizontalAlignment: Text.AlignJustify\n            text: qsTr(\"ERROR: Update system could not find the MaintenanceTool used to check for updates!<br/><br/>\"\n                  + \"Did you install this application using the online installer? If so, the MaintenanceTool \"\n                  + \"executable should be located one directory above where this application resides on your \"\n                  + \"filesystem.<br/><br/>If you can't start it manually, then I'm afraid you'll have to reinstall.\")\n            wrapMode: Text.WordWrap\n            color: theme.textErrorColor\n            font.pixelSize: theme.fontSizeLarge\n            Accessible.role: Accessible.Dialog\n            Accessible.name: text\n            Accessible.description: qsTr(\"Error dialog\")\n        }\n        background: Rectangle {\n            anchors.fill: parent\n            color: theme.containerBackground\n            border.width: 1\n            border.color: theme.dialogBorder\n            radius: 10\n        }\n    }\n\n    contentItem: GridLayout {\n        id: applicationSettingsTabInner\n        columns: 3\n        rowSpacing: 30\n        columnSpacing: 10\n\n        Label {\n            Layout.row: 0\n            Layout.column: 0\n            Layout.bottomMargin: 10\n            color: theme.settingsTitleTextColor\n            font.pixelSize: theme.fontSizeBannerSmall\n            font.bold: true\n            text: qsTr(\"Application Settings\")\n        }\n\n        ColumnLayout {\n            Layout.row: 1\n            Layout.column: 0\n            Layout.columnSpan: 3\n            Layout.fillWidth: true\n            spacing: 10\n            Label {\n                color: theme.styledTextColor\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"General\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.settingsDivider\n            }\n        }\n\n        MySettingsLabel {\n            id: themeLabel\n            text: qsTr(\"Theme\")\n            helpText: qsTr(\"The application color scheme.\")\n            Layout.row: 2\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: themeBox\n            Layout.row: 2\n            Layout.column: 2\n            Layout.minimumWidth: 200\n            Layout.maximumWidth: 200\n            Layout.fillWidth: false\n            Layout.alignment: Qt.AlignRight\n            // NOTE: indices match values of ChatTheme enum, keep them in sync\n            model: ListModel {\n                ListElement { name: qsTr(\"Light\") }\n                ListElement { name: qsTr(\"Dark\") }\n                ListElement { name: qsTr(\"LegacyDark\") }\n            }\n            Accessible.name: themeLabel.text\n            Accessible.description: themeLabel.helpText\n            function updateModel() {\n                themeBox.currentIndex = MySettings.chatTheme;\n            }\n            Component.onCompleted: {\n                themeBox.updateModel()\n            }\n            Connections {\n                target: MySettings\n                function onChatThemeChanged() {\n                    themeBox.updateModel()\n                }\n            }\n            onActivated: {\n                MySettings.chatTheme = themeBox.currentIndex\n            }\n        }\n        MySettingsLabel {\n            id: fontLabel\n            text: qsTr(\"Font Size\")\n            helpText: qsTr(\"The size of text in the application.\")\n            Layout.row: 3\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: fontBox\n            Layout.row: 3\n            Layout.column: 2\n            Layout.minimumWidth: 200\n            Layout.maximumWidth: 200\n            Layout.fillWidth: false\n            Layout.alignment: Qt.AlignRight\n            // NOTE: indices match values of FontSize enum, keep them in sync\n            model: ListModel {\n                ListElement { name: qsTr(\"Small\") }\n                ListElement { name: qsTr(\"Medium\") }\n                ListElement { name: qsTr(\"Large\") }\n            }\n            Accessible.name: fontLabel.text\n            Accessible.description: fontLabel.helpText\n            function updateModel() {\n                fontBox.currentIndex = MySettings.fontSize;\n            }\n            Component.onCompleted: {\n                fontBox.updateModel()\n            }\n            Connections {\n                target: MySettings\n                function onFontSizeChanged() {\n                    fontBox.updateModel()\n                }\n            }\n            onActivated: {\n                MySettings.fontSize = fontBox.currentIndex\n            }\n        }\n        MySettingsLabel {\n            id: languageLabel\n            visible: MySettings.uiLanguages.length > 1\n            text: qsTr(\"Language and Locale\")\n            helpText: qsTr(\"The language and locale you wish to use.\")\n            Layout.row: 4\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: languageBox\n            visible: MySettings.uiLanguages.length > 1\n            Layout.row: 4\n            Layout.column: 2\n            Layout.minimumWidth: 200\n            Layout.maximumWidth: 200\n            Layout.fillWidth: false\n            Layout.alignment: Qt.AlignRight\n            model: ListModel {\n                Component.onCompleted: {\n                    for (var i = 0; i < MySettings.uiLanguages.length; ++i)\n                        append({\"text\": MySettings.uiLanguages[i]});\n                    languageBox.updateModel();\n                }\n                ListElement { text: qsTr(\"System Locale\") }\n            }\n\n            Accessible.name: languageLabel.text\n            Accessible.description: languageLabel.helpText\n            function updateModel() {\n                // This usage of 'System Locale' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (MySettings.languageAndLocale === \"System Locale\")\n                    languageBox.currentIndex = 0\n                else\n                    languageBox.currentIndex = languageBox.indexOfValue(MySettings.languageAndLocale);\n            }\n            Component.onCompleted: {\n                languageBox.updateModel()\n            }\n            onActivated: {\n                // This usage of 'System Locale' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (languageBox.currentIndex === 0)\n                    MySettings.languageAndLocale = \"System Locale\";\n                else\n                    MySettings.languageAndLocale = languageBox.currentText;\n            }\n        }\n        MySettingsLabel {\n            id: deviceLabel\n            text: qsTr(\"Device\")\n            helpText: qsTr('The compute device used for text generation.')\n            Layout.row: 5\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: deviceBox\n            Layout.row: 5\n            Layout.column: 2\n            Layout.minimumWidth: 400\n            Layout.maximumWidth: 400\n            Layout.fillWidth: false\n            Layout.alignment: Qt.AlignRight\n            model: ListModel {\n                Component.onCompleted: {\n                    for (var i = 0; i < MySettings.deviceList.length; ++i)\n                        append({\"text\": MySettings.deviceList[i]});\n                    deviceBox.updateModel();\n                }\n                ListElement { text: qsTr(\"Application default\") }\n            }\n\n            Accessible.name: deviceLabel.text\n            Accessible.description: deviceLabel.helpText\n            function updateModel() {\n                // This usage of 'Auto' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (MySettings.device === \"Auto\")\n                    deviceBox.currentIndex = 0\n                else\n                    deviceBox.currentIndex = deviceBox.indexOfValue(MySettings.device);\n            }\n            Component.onCompleted: {\n                deviceBox.updateModel();\n            }\n            Connections {\n                target: MySettings\n                function onDeviceChanged() {\n                    deviceBox.updateModel();\n                }\n            }\n            onActivated: {\n                // This usage of 'Auto' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (deviceBox.currentIndex === 0)\n                    MySettings.device = \"Auto\";\n                else\n                    MySettings.device = deviceBox.currentText;\n            }\n        }\n        MySettingsLabel {\n            id: defaultModelLabel\n            text: qsTr(\"Default Model\")\n            helpText: qsTr(\"The preferred model for new chats. Also used as the local server fallback.\")\n            Layout.row: 6\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: defaultModelBox\n            Layout.row: 6\n            Layout.column: 2\n            Layout.minimumWidth: 400\n            Layout.maximumWidth: 400\n            Layout.alignment: Qt.AlignRight\n            model: ListModel {\n                id: defaultModelBoxModel\n                Component.onCompleted: {\n                    defaultModelBox.rebuildModel()\n                }\n            }\n            Accessible.name: defaultModelLabel.text\n            Accessible.description: defaultModelLabel.helpText\n            function rebuildModel() {\n                defaultModelBoxModel.clear();\n                defaultModelBoxModel.append({\"text\": qsTr(\"Application default\")});\n                for (var i = 0; i < ModelList.selectableModelList.length; ++i)\n                    defaultModelBoxModel.append({\"text\": ModelList.selectableModelList[i].name});\n                defaultModelBox.updateModel();\n            }\n            function updateModel() {\n                // This usage of 'Application default' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (MySettings.userDefaultModel === \"Application default\")\n                    defaultModelBox.currentIndex = 0\n                else\n                    defaultModelBox.currentIndex = defaultModelBox.indexOfValue(MySettings.userDefaultModel);\n            }\n            onActivated: {\n                // This usage of 'Application default' should not be translated\n                // FIXME: Make this refer to a string literal variable accessed by both QML and C++\n                if (defaultModelBox.currentIndex === 0)\n                    MySettings.userDefaultModel = \"Application default\";\n                else\n                    MySettings.userDefaultModel = defaultModelBox.currentText;\n            }\n            Connections {\n                target: MySettings\n                function onUserDefaultModelChanged() {\n                    defaultModelBox.updateModel()\n                }\n            }\n            Connections {\n                target: MySettings\n                function onLanguageAndLocaleChanged() {\n                    defaultModelBox.rebuildModel()\n                }\n            }\n            Connections {\n                target: ModelList\n                function onSelectableModelListChanged() {\n                    defaultModelBox.rebuildModel()\n                }\n            }\n        }\n        MySettingsLabel {\n            id: suggestionModeLabel\n            text: qsTr(\"Suggestion Mode\")\n            helpText: qsTr(\"Generate suggested follow-up questions at the end of responses.\")\n            Layout.row: 7\n            Layout.column: 0\n        }\n        MyComboBox {\n            id: suggestionModeBox\n            Layout.row: 7\n            Layout.column: 2\n            Layout.minimumWidth: 400\n            Layout.maximumWidth: 400\n            Layout.alignment: Qt.AlignRight\n            // NOTE: indices match values of SuggestionMode enum, keep them in sync\n            model: ListModel {\n                ListElement { name: qsTr(\"When chatting with LocalDocs\") }\n                ListElement { name: qsTr(\"Whenever possible\") }\n                ListElement { name: qsTr(\"Never\") }\n            }\n            Accessible.name: suggestionModeLabel.text\n            Accessible.description: suggestionModeLabel.helpText\n            onActivated: {\n                MySettings.suggestionMode = suggestionModeBox.currentIndex;\n            }\n            Component.onCompleted: {\n                suggestionModeBox.currentIndex = MySettings.suggestionMode;\n            }\n        }\n        MySettingsLabel {\n            id: modelPathLabel\n            text: qsTr(\"Download Path\")\n            helpText: qsTr(\"Where to store local models and the LocalDocs database.\")\n            Layout.row: 8\n            Layout.column: 0\n        }\n\n        RowLayout {\n            Layout.row: 8\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            Layout.minimumWidth: 400\n            Layout.maximumWidth: 400\n            spacing: 10\n            MyDirectoryField {\n                id: modelPathDisplayField\n                text: MySettings.modelPath\n                font.pixelSize: theme.fontSizeLarge\n                implicitWidth: 300\n                Layout.fillWidth: true\n                Accessible.name: modelPathLabel.text\n                Accessible.description: modelPathLabel.helpText\n                onEditingFinished: {\n                    if (isValid) {\n                        MySettings.modelPath = modelPathDisplayField.text\n                    } else {\n                        text = MySettings.modelPath\n                    }\n                }\n            }\n            MyFolderDialog {\n                id: folderDialog\n            }\n            MySettingsButton {\n                text: qsTr(\"Browse\")\n                Accessible.description: qsTr(\"Choose where to save model files\")\n                onClicked: {\n                    folderDialog.openFolderDialog(\"file://\" + MySettings.modelPath, function(selectedFolder) {\n                        MySettings.modelPath = selectedFolder\n                    })\n                }\n            }\n        }\n\n        MySettingsLabel {\n            id: dataLakeLabel\n            text: qsTr(\"Enable Datalake\")\n            helpText: qsTr(\"Send chats and feedback to the GPT4All Open-Source Datalake.\")\n            Layout.row: 9\n            Layout.column: 0\n        }\n        MyCheckBox {\n            id: dataLakeBox\n            Layout.row: 9\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            Component.onCompleted: { dataLakeBox.checked = MySettings.networkIsActive; }\n            Connections {\n                target: MySettings\n                function onNetworkIsActiveChanged() { dataLakeBox.checked = MySettings.networkIsActive; }\n            }\n            onClicked: {\n                if (MySettings.networkIsActive)\n                    MySettings.networkIsActive = false;\n                else\n                    networkDialog.open();\n                dataLakeBox.checked = MySettings.networkIsActive;\n            }\n        }\n\n        ColumnLayout {\n            Layout.row: 10\n            Layout.column: 0\n            Layout.columnSpan: 3\n            Layout.fillWidth: true\n            spacing: 10\n            Label {\n                color: theme.styledTextColor\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"Advanced\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.settingsDivider\n            }\n        }\n\n        MySettingsLabel {\n            id: nThreadsLabel\n            text: qsTr(\"CPU Threads\")\n            helpText: qsTr(\"The number of CPU threads used for inference and embedding.\")\n            Layout.row: 11\n            Layout.column: 0\n        }\n        MyTextField {\n            text: MySettings.threadCount\n            color: theme.textColor\n            font.pixelSize: theme.fontSizeLarge\n            Layout.alignment: Qt.AlignRight\n            Layout.row: 11\n            Layout.column: 2\n            Layout.minimumWidth: 200\n            Layout.maximumWidth: 200\n            validator: IntValidator {\n                bottom: 1\n            }\n            onEditingFinished: {\n                var val = parseInt(text)\n                if (!isNaN(val)) {\n                    MySettings.threadCount = val\n                    focus = false\n                } else {\n                    text = MySettings.threadCount\n                }\n            }\n            Accessible.role: Accessible.EditableText\n            Accessible.name: nThreadsLabel.text\n            Accessible.description: ToolTip.text\n        }\n        MySettingsLabel {\n            id: trayLabel\n            text: qsTr(\"Enable System Tray\")\n            helpText: qsTr(\"The application will minimize to the system tray when the window is closed.\")\n            Layout.row: 13\n            Layout.column: 0\n        }\n        MyCheckBox {\n            id: trayBox\n            Layout.row: 13\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            checked: MySettings.systemTray\n            onClicked: {\n                MySettings.systemTray = !MySettings.systemTray\n            }\n        }\n        MySettingsLabel {\n            id: serverChatLabel\n            text: qsTr(\"Enable Local API Server\")\n            helpText: qsTr(\"Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.\")\n            Layout.row: 14\n            Layout.column: 0\n        }\n        MyCheckBox {\n            id: serverChatBox\n            Layout.row: 14\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            checked: MySettings.serverChat\n            onClicked: {\n                MySettings.serverChat = !MySettings.serverChat\n            }\n        }\n        MySettingsLabel {\n            id: serverPortLabel\n            text: qsTr(\"API Server Port\")\n            helpText: qsTr(\"The port to use for the local server. Requires restart.\")\n            Layout.row: 15\n            Layout.column: 0\n        }\n        MyTextField {\n            id: serverPortField\n            text: MySettings.networkPort\n            color: theme.textColor\n            font.pixelSize: theme.fontSizeLarge\n            Layout.row: 15\n            Layout.column: 2\n            Layout.minimumWidth: 200\n            Layout.maximumWidth: 200\n            Layout.alignment: Qt.AlignRight\n            validator: IntValidator {\n                bottom: 1\n            }\n            onEditingFinished: {\n                var val = parseInt(text)\n                if (!isNaN(val)) {\n                    MySettings.networkPort = val\n                    focus = false\n                } else {\n                    text = MySettings.networkPort\n                }\n            }\n            Accessible.role: Accessible.EditableText\n            Accessible.name: serverPortLabel.text\n            Accessible.description: serverPortLabel.helpText\n        }\n\n        /*MySettingsLabel {\n            id: gpuOverrideLabel\n            text: qsTr(\"Force Metal (macOS+arm)\")\n            Layout.row: 13\n            Layout.column: 0\n        }\n        MyCheckBox {\n            id: gpuOverrideBox\n            Layout.row: 13\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            checked: MySettings.forceMetal\n            onClicked: {\n                MySettings.forceMetal = !MySettings.forceMetal\n            }\n            ToolTip.text: qsTr(\"WARNING: On macOS with arm (M1+) this setting forces usage of the GPU. Can cause crashes if the model requires more RAM than the system supports. Because of crash possibility the setting will not persist across restarts of the application. This has no effect on non-macs or intel.\")\n            ToolTip.visible: hovered\n        }*/\n\n        MySettingsLabel {\n            id: updatesLabel\n            text: qsTr(\"Check For Updates\")\n            helpText: qsTr(\"Manually check for an update to GPT4All.\");\n            Layout.row: 16\n            Layout.column: 0\n        }\n\n        MySettingsButton {\n            Layout.row: 16\n            Layout.column: 2\n            Layout.alignment: Qt.AlignRight\n            text: qsTr(\"Updates\");\n            onClicked: {\n                if (!LLM.checkForUpdates())\n                    checkForUpdatesError.open()\n            }\n        }\n\n        Rectangle {\n            Layout.row: 17\n            Layout.column: 0\n            Layout.columnSpan: 3\n            Layout.fillWidth: true\n            height: 1\n            color: theme.settingsDivider\n        }\n    }\n}\n\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatCollapsibleItem.qml",
    "content": "import Qt5Compat.GraphicalEffects\nimport QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nimport gpt4all\nimport mysettings\nimport toolenums\n\nColumnLayout {\n    property alias textContent: innerTextItem.textContent\n    property bool isCurrent: false\n    property bool isError: false\n    property bool isThinking: false\n    property int  thinkingTime: 0\n\n    Layout.topMargin: 10\n    Layout.bottomMargin: 10\n\n    Item {\n        Layout.preferredWidth: childrenRect.width\n        Layout.preferredHeight: 38\n        RowLayout {\n            anchors.left: parent.left\n            anchors.top: parent.top\n            anchors.bottom: parent.bottom\n\n            Item {\n                Layout.preferredWidth: myTextArea.implicitWidth\n                Layout.preferredHeight: myTextArea.implicitHeight\n                TextArea {\n                    id: myTextArea\n                    text: {\n                        if (isError)\n                            return qsTr(\"Analysis encountered error\");\n                        if (isCurrent)\n                            return isThinking ? qsTr(\"Thinking\") : qsTr(\"Analyzing\");\n                        return isThinking\n                            ? qsTr(\"Thought for %1 %2\")\n                                  .arg(Math.ceil(thinkingTime / 1000.0))\n                                  .arg(Math.ceil(thinkingTime / 1000.0) === 1 ? qsTr(\"second\") : qsTr(\"seconds\"))\n                            : qsTr(\"Analyzed\");\n                    }\n                    padding: 0\n                    font.pixelSize: theme.fontSizeLarger\n                    enabled: false\n                    focus: false\n                    readOnly: true\n                    color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor\n                    hoverEnabled: false\n                }\n\n                Item {\n                    id: textColorOverlay\n                    anchors.fill: parent\n                    clip: true\n                    visible: false\n                    Rectangle {\n                        id: animationRec\n                        width: myTextArea.width * 0.3\n                        anchors.top: parent.top\n                        anchors.bottom: parent.bottom\n                        color: theme.textColor\n\n                        SequentialAnimation {\n                            running: isCurrent\n                            loops: Animation.Infinite\n                            NumberAnimation {\n                                target: animationRec;\n                                property: \"x\";\n                                from: -animationRec.width;\n                                to: myTextArea.width * 3;\n                                duration: 2000\n                            }\n                        }\n                    }\n                }\n                OpacityMask {\n                    visible: isCurrent\n                    anchors.fill: parent\n                    maskSource: myTextArea\n                    source: textColorOverlay\n                }\n            }\n\n            Item {\n                id: caret\n                Layout.preferredWidth: contentCaret.width\n                Layout.preferredHeight: contentCaret.height\n                Image {\n                    id: contentCaret\n                    anchors.centerIn: parent\n                    visible: false\n                    sourceSize.width: theme.fontSizeLarge\n                    sourceSize.height: theme.fontSizeLarge\n                    mipmap: true\n                    source: {\n                        if (contentLayout.state === \"collapsed\")\n                            return \"qrc:/gpt4all/icons/caret_right.svg\";\n                        else\n                            return \"qrc:/gpt4all/icons/caret_down.svg\";\n                    }\n                }\n\n                ColorOverlay {\n                    anchors.fill: contentCaret\n                    source: contentCaret\n                    color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor\n                }\n            }\n        }\n\n        MouseArea {\n            id: headerMA\n            hoverEnabled: true\n            anchors.fill: parent\n            onClicked: {\n                if (contentLayout.state === \"collapsed\")\n                    contentLayout.state = \"expanded\";\n                else\n                    contentLayout.state = \"collapsed\";\n            }\n        }\n    }\n\n    ColumnLayout {\n        id: contentLayout\n        spacing: 0\n        state: \"collapsed\"\n        clip: true\n\n        states: [\n            State {\n                name: \"expanded\"\n                PropertyChanges { target: contentLayout; Layout.preferredHeight: innerContentLayout.height }\n            },\n            State {\n                name: \"collapsed\"\n                PropertyChanges { target: contentLayout; Layout.preferredHeight: 0 }\n            }\n        ]\n\n        transitions: [\n            Transition {\n                SequentialAnimation {\n                    PropertyAnimation {\n                        target: contentLayout\n                        property: \"Layout.preferredHeight\"\n                        duration: 300\n                        easing.type: Easing.InOutQuad\n                    }\n                }\n            }\n        ]\n\n        ColumnLayout {\n            id: innerContentLayout\n            Layout.leftMargin: 30\n            ChatTextItem {\n                id: innerTextItem\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatDrawer.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport chatlistmodel\nimport llm\nimport download\nimport network\nimport mysettings\n\nRectangle {\n    id: chatDrawer\n\n    Theme {\n        id: theme\n    }\n\n    color: theme.viewBackground\n\n    Rectangle {\n        id: borderRight\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.right: parent.right\n        width: 1\n        color: theme.dividerColor\n    }\n\n    Item {\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: parent.left\n        anchors.right: borderRight.left\n\n        Accessible.role: Accessible.Pane\n        Accessible.name: qsTr(\"Drawer\")\n        Accessible.description: qsTr(\"Main navigation drawer\")\n\n        MySettingsButton {\n            id: newChat\n            anchors.top: parent.top\n            anchors.left: parent.left\n            anchors.right: parent.right\n            anchors.margins: 20\n            font.pixelSize: theme.fontSizeLarger\n            topPadding: 24\n            bottomPadding: 24\n            text: qsTr(\"\\uFF0B New Chat\")\n            Accessible.description: qsTr(\"Create a new chat\")\n            onClicked: {\n                ChatListModel.addChat()\n                conversationList.positionViewAtIndex(0, ListView.Beginning)\n                Network.trackEvent(\"new_chat\", {\"number_of_chats\": ChatListModel.count})\n            }\n        }\n\n        Rectangle {\n            id: divider\n            anchors.top: newChat.bottom\n            anchors.margins: 20\n            anchors.topMargin: 14\n            anchors.left: parent.left\n            anchors.right: parent.right\n            height: 1\n            color: theme.dividerColor\n        }\n\n        ScrollView {\n            anchors.left: parent.left\n            anchors.right: parent.right\n            anchors.topMargin: 15\n            anchors.top: divider.bottom\n            anchors.bottom: parent.bottom\n            anchors.bottomMargin: 15\n            ScrollBar.vertical.policy: ScrollBar.AlwaysOff\n            clip: true\n\n            ListView {\n                id: conversationList\n                anchors.fill: parent\n                anchors.leftMargin: 10\n                anchors.rightMargin: 10\n                model: ChatListModel\n\n                Component.onCompleted: ChatListModel.loadChats()\n\n                ScrollBar.vertical: ScrollBar {\n                    parent: conversationList.parent\n                    anchors.top: conversationList.top\n                    anchors.left: conversationList.right\n                    anchors.bottom: conversationList.bottom\n                }\n\n                Component {\n                    id: sectionHeading\n                    Rectangle {\n                        width: ListView.view.width\n                        height: childrenRect.height\n                        color: \"transparent\"\n                        property bool isServer: ChatListModel.get(parent.index) && ChatListModel.get(parent.index).isServer\n                        visible: !isServer || MySettings.serverChat\n\n                        required property string section\n\n                        Text {\n                            leftPadding: 10\n                            rightPadding: 10\n                            topPadding: 15\n                            bottomPadding: 5\n                            text: parent.section\n                            color: theme.chatDrawerSectionHeader\n                            font.pixelSize: theme.fontSizeSmallest\n                        }\n                    }\n                }\n\n                section.property: \"section\"\n                section.criteria: ViewSection.FullString\n                section.delegate: sectionHeading\n\n                delegate: Rectangle {\n                    id: chatRectangle\n                    width: conversationList.width\n                    height: chatNameBox.height + 20\n                    property bool isCurrent: ChatListModel.currentChat === ChatListModel.get(index)\n                    property bool isServer: ChatListModel.get(index) && ChatListModel.get(index).isServer\n                    property bool trashQuestionDisplayed: false\n                    visible: !isServer || MySettings.serverChat\n                    z: isCurrent ? 199 : 1\n                    color: isCurrent ? theme.selectedBackground : \"transparent\"\n                    border.width: isCurrent\n                    border.color: theme.dividerColor\n                    radius: 10\n\n                    Rectangle {\n                        id: chatNameBox\n                        height: chatName.height\n                        anchors.left: parent.left\n                        anchors.right: trashButton.left\n                        anchors.verticalCenter: chatRectangle.verticalCenter\n                        anchors.leftMargin: 5\n                        anchors.rightMargin: 5\n                        radius: 5\n                        color: chatName.readOnly ? \"transparent\" : theme.chatNameEditBgColor\n\n                        TextField {\n                            id: chatName\n                            anchors.left: parent.left\n                            anchors.right: editButton.left\n                            anchors.verticalCenter: chatNameBox.verticalCenter\n                            topPadding: 5\n                            bottomPadding: 5\n                            color: theme.styledTextColor\n                            focus: false\n                            readOnly: true\n                            wrapMode: Text.NoWrap\n                            hoverEnabled: false // Disable hover events on the TextArea\n                            selectByMouse: false // Disable text selection in the TextArea\n                            font.pixelSize: theme.fontSizeLarge\n                            font.bold: true\n                            text: readOnly ? metrics.elidedText : name\n                            horizontalAlignment: TextInput.AlignLeft\n                            opacity: trashQuestionDisplayed ? 0.5 : 1.0\n                            TextMetrics {\n                                id: metrics\n                                font: chatName.font\n                                text: name\n                                elide: Text.ElideRight\n                                elideWidth: chatName.width - 15\n                            }\n                            background: Rectangle {\n                                color: \"transparent\"\n                            }\n                            onEditingFinished: {\n                                // Work around a bug in qml where we're losing focus when the whole window\n                                // goes out of focus even though this textfield should be marked as not\n                                // having focus\n                                if (chatName.readOnly)\n                                    return;\n                                changeName();\n                            }\n                            function changeName() {\n                                Network.trackChatEvent(\"rename_chat\");\n                                ChatListModel.get(index).name = chatName.text;\n                                chatName.focus = false;\n                                chatName.readOnly = true;\n                                chatName.selectByMouse = false;\n                            }\n                            TapHandler {\n                                onTapped: {\n                                    if (isCurrent)\n                                        return;\n                                    ChatListModel.currentChat = ChatListModel.get(index);\n                                }\n                            }\n                            Accessible.role: Accessible.Button\n                            Accessible.name: text\n                            Accessible.description: qsTr(\"Select the current chat or edit the chat when in edit mode\")\n                        }\n                        MyToolButton {\n                            id: editButton\n                            anchors.verticalCenter: parent.verticalCenter\n                            anchors.right: parent.right\n                            anchors.rightMargin: 5\n                            imageWidth: 24\n                            imageHeight: 24\n                            visible: isCurrent && !isServer && chatName.readOnly\n                            opacity: trashQuestionDisplayed ? 0.5 : 1.0\n                            source: \"qrc:/gpt4all/icons/edit.svg\"\n                            onClicked: {\n                                chatName.focus = true;\n                                chatName.readOnly = false;\n                                chatName.selectByMouse = true;\n                            }\n                            Accessible.name: qsTr(\"Edit chat name\")\n                        }\n                        MyToolButton {\n                            id: okButton\n                            anchors.verticalCenter: parent.verticalCenter\n                            anchors.right: parent.right\n                            anchors.rightMargin: 5\n                            imageWidth: 24\n                            imageHeight: 24\n                            visible: isCurrent && !isServer && !chatName.readOnly\n                            opacity: trashQuestionDisplayed ? 0.5 : 1.0\n                            source: \"qrc:/gpt4all/icons/check.svg\"\n                            onClicked: chatName.changeName()\n                            Accessible.name: qsTr(\"Save chat name\")\n                        }\n                    }\n\n                    MyToolButton {\n                        id: trashButton\n                        anchors.verticalCenter: chatNameBox.verticalCenter\n                        anchors.right: chatRectangle.right\n                        anchors.rightMargin: 10\n                        imageWidth: 24\n                        imageHeight: 24\n                        visible: isCurrent && !isServer\n                        source: \"qrc:/gpt4all/icons/trash.svg\"\n                        onClicked: {\n                            trashQuestionDisplayed = true\n                            timer.start()\n                        }\n                        Accessible.name: qsTr(\"Delete chat\")\n                    }\n                    Rectangle {\n                        id: trashSureQuestion\n                        anchors.top: trashButton.bottom\n                        anchors.topMargin: 10\n                        anchors.right: trashButton.right\n                        width: childrenRect.width\n                        height: childrenRect.height\n                        color: chatRectangle.color\n                        visible: isCurrent && trashQuestionDisplayed\n                        opacity: 1.0\n                        radius: 10\n                        z: 200\n                        Row {\n                            spacing: 10\n                            Button {\n                                id: checkMark\n                                width: 30\n                                height: 30\n                                contentItem: Text {\n                                    color: theme.textErrorColor\n                                    text: \"\\u2713\"\n                                    font.pixelSize: theme.fontSizeLarger\n                                    horizontalAlignment: Text.AlignHCenter\n                                    verticalAlignment: Text.AlignVCenter\n                                }\n                                background: Rectangle {\n                                    width: 30\n                                    height: 30\n                                    color: \"transparent\"\n                                }\n                                onClicked: {\n                                    Network.trackChatEvent(\"remove_chat\")\n                                    ChatListModel.removeChat(ChatListModel.get(index))\n                                }\n                                Accessible.role: Accessible.Button\n                                Accessible.name: qsTr(\"Confirm chat deletion\")\n                            }\n                            Button {\n                                id: cancel\n                                width: 30\n                                height: 30\n                                contentItem: Text {\n                                    color: theme.textColor\n                                    text: \"\\u2715\"\n                                    font.pixelSize: theme.fontSizeLarger\n                                    horizontalAlignment: Text.AlignHCenter\n                                    verticalAlignment: Text.AlignVCenter\n                                }\n                                background: Rectangle {\n                                    width: 30\n                                    height: 30\n                                    color: \"transparent\"\n                                }\n                                onClicked: {\n                                    trashQuestionDisplayed = false\n                                }\n                                Accessible.role: Accessible.Button\n                                Accessible.name: qsTr(\"Cancel chat deletion\")\n                            }\n                        }\n                    }\n                    Timer {\n                        id: timer\n                        interval: 3000; running: false; repeat: false\n                        onTriggered: trashQuestionDisplayed = false\n                    }\n                }\n\n                Accessible.role: Accessible.List\n                Accessible.name: qsTr(\"List of chats\")\n                Accessible.description: qsTr(\"List of chats in the drawer dialog\")\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatItemView.qml",
    "content": "import Qt5Compat.GraphicalEffects\nimport QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt.labs.qmlmodels\n\nimport gpt4all\nimport mysettings\nimport toolenums\n\nColumnLayout {\n\nproperty var inputBoxText: null\nsignal setInputBoxText(text: string)\n\nItem {\n\nLayout.fillWidth: true\nLayout.maximumWidth: parent.width\nLayout.preferredHeight: gridLayout.height\n\nHoverHandler { id: hoverArea }\n\nGridLayout {\n    id: gridLayout\n    anchors.left: parent.left\n    anchors.right: parent.right\n    columns: 2\n\n    Item {\n        Layout.row: 0\n        Layout.column: 0\n        Layout.alignment: Qt.AlignVCenter | Qt.AlignRight\n        Layout.preferredWidth: 32\n        Layout.preferredHeight: 32\n        Layout.topMargin: model.index > 0 ? 25 : 0\n\n        Image {\n            id: logo\n            sourceSize: Qt.size(32, 32)\n            fillMode: Image.PreserveAspectFit\n            mipmap: true\n            visible: false\n            source: name !== \"Response: \" ? \"qrc:/gpt4all/icons/you.svg\" : \"qrc:/gpt4all/icons/gpt4all_transparent.svg\"\n        }\n\n        ColorOverlay {\n            id: colorOver\n            anchors.fill: logo\n            source: logo\n            color: theme.conversationHeader\n            RotationAnimation {\n                id: rotationAnimation\n                target: colorOver\n                property: \"rotation\"\n                from: 0\n                to: 360\n                duration: 1000\n                loops: Animation.Infinite\n                running: isCurrentResponse && currentChat.responseInProgress\n            }\n        }\n    }\n\n    Item {\n        Layout.row: 0\n        Layout.column: 1\n        Layout.fillWidth: true\n        Layout.preferredHeight: 38\n        Layout.topMargin: model.index > 0 ? 25 : 0\n\n        RowLayout {\n            spacing: 5\n            anchors.left: parent.left\n            anchors.top: parent.top\n            anchors.bottom: parent.bottom\n\n            TextArea {\n                text: {\n                    if (name === \"Response: \")\n                        return qsTr(\"GPT4All\");\n                    return qsTr(\"You\");\n                }\n                padding: 0\n                font.pixelSize: theme.fontSizeLarger\n                font.bold: true\n                color: theme.conversationHeader\n                enabled: false\n                focus: false\n                readOnly: true\n            }\n            Text {\n                visible: name === \"Response: \"\n                font.pixelSize: theme.fontSizeLarger\n                text: currentModelName()\n                color: theme.mutedTextColor\n            }\n            RowLayout {\n                visible: isCurrentResponse && (content === \"\" && currentChat.responseInProgress)\n                Text {\n                    color: theme.mutedTextColor\n                    font.pixelSize: theme.fontSizeLarger\n                    text: {\n                        switch (currentChat.responseState) {\n                        case Chat.ResponseStopped: return qsTr(\"response stopped ...\");\n                        case Chat.LocalDocsRetrieval: return qsTr(\"retrieving localdocs: %1 ...\").arg(currentChat.collectionList.join(\", \"));\n                        case Chat.LocalDocsProcessing: return qsTr(\"searching localdocs: %1 ...\").arg(currentChat.collectionList.join(\", \"));\n                        case Chat.PromptProcessing: return qsTr(\"processing ...\")\n                        case Chat.ResponseGeneration: return qsTr(\"generating response ...\");\n                        case Chat.GeneratingQuestions: return qsTr(\"generating questions ...\");\n                        case Chat.ToolCallGeneration: return qsTr(\"generating toolcall ...\");\n                        default: return \"\"; // handle unexpected values\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    ColumnLayout {\n        Layout.row: 1\n        Layout.column: 1\n        Layout.fillWidth: true\n        spacing: 10\n        Flow {\n            id: attachedUrlsFlow\n            Layout.fillWidth: true\n            Layout.bottomMargin: 10\n            spacing: 10\n            visible: promptAttachments.length !== 0\n            Repeater {\n                model: promptAttachments\n\n                delegate: Rectangle {\n                    width: 350\n                    height: 50\n                    radius: 5\n                    color: theme.attachmentBackground\n                    border.color: theme.controlBorder\n\n                    Row {\n                        spacing: 5\n                        anchors.fill: parent\n                        anchors.margins: 5\n\n                        MyFileIcon {\n                            iconSize: 40\n                            fileName: modelData.file\n                        }\n\n                        Text {\n                            width: 295\n                            height: 40\n                            text: modelData.file\n                            color: theme.textColor\n                            horizontalAlignment: Text.AlignHLeft\n                            verticalAlignment: Text.AlignVCenter\n                            font.pixelSize: theme.fontSizeMedium\n                            font.bold: true\n                            wrapMode: Text.WrapAnywhere\n                            elide: Qt.ElideRight\n                        }\n                    }\n                }\n            }\n        }\n\n        Repeater {\n            model: childItems\n\n            DelegateChooser {\n                id: chooser\n                role: \"name\"\n                DelegateChoice {\n                    roleValue: \"Text: \";\n                    ChatTextItem {\n                        Layout.fillWidth: true\n                        textContent: modelData.content\n                    }\n                }\n                DelegateChoice {\n                    roleValue: \"ToolCall: \";\n                    ChatCollapsibleItem {\n                        Layout.fillWidth: true\n                        textContent: modelData.content\n                        isCurrent: modelData.isCurrentResponse\n                        isError: modelData.isToolCallError\n                    }\n                }\n                DelegateChoice {\n                    roleValue: \"Think: \";\n                    ChatCollapsibleItem {\n                        Layout.fillWidth: true\n                        textContent: modelData.content\n                        isCurrent: modelData.isCurrentResponse\n                        isError: false\n                        isThinking: true\n                        thinkingTime: modelData.thinkingTime\n                        visible: modelData.content !== \"\"\n                    }\n                }\n            }\n\n            delegate: chooser\n        }\n\n        ChatTextItem {\n            Layout.fillWidth: true\n            textContent: content\n        }\n\n        ThumbsDownDialog {\n            id: thumbsDownDialog\n            x: Math.round((parent.width - width) / 2)\n            y: Math.round((parent.height - height) / 2)\n            width: 640\n            height: 300\n            property string text: content\n            response: newResponse === undefined || newResponse === \"\" ? text : newResponse\n            onAccepted: {\n                var responseHasChanged = response !== text && response !== newResponse\n                if (thumbsDownState && !thumbsUpState && !responseHasChanged)\n                    return\n\n                chatModel.updateNewResponse(model.index, response)\n                chatModel.updateThumbsUpState(model.index, false)\n                chatModel.updateThumbsDownState(model.index, true)\n                Network.sendConversation(currentChat.id, getConversationJson());\n            }\n        }\n    }\n\n    Item {\n        Layout.row: 2\n        Layout.column: 1\n        Layout.topMargin: 5\n        Layout.alignment: Qt.AlignVCenter\n        Layout.preferredWidth: childrenRect.width\n        Layout.preferredHeight: childrenRect.height\n        visible: {\n            if (name !== \"Response: \")\n                return false\n            if (consolidatedSources.length === 0)\n                return false\n            if (!MySettings.localDocsShowReferences)\n                return false\n            if (isCurrentResponse && currentChat.responseInProgress\n                    && currentChat.responseState !== Chat.GeneratingQuestions )\n                return false\n            return true\n        }\n\n        MyButton {\n            backgroundColor: theme.sourcesBackground\n            backgroundColorHovered: theme.sourcesBackgroundHovered\n            contentItem: RowLayout {\n                anchors.centerIn: parent\n\n                Item {\n                    Layout.preferredWidth: 24\n                    Layout.preferredHeight: 24\n\n                    Image {\n                        id: sourcesIcon\n                        visible: false\n                        anchors.fill: parent\n                        sourceSize.width: 24\n                        sourceSize.height: 24\n                        mipmap: true\n                        source: \"qrc:/gpt4all/icons/db.svg\"\n                    }\n\n                    ColorOverlay {\n                        anchors.fill: sourcesIcon\n                        source: sourcesIcon\n                        color: theme.textColor\n                    }\n                }\n\n                Text {\n                    text: qsTr(\"%n Source(s)\", \"\", consolidatedSources.length)\n                    padding: 0\n                    font.pixelSize: theme.fontSizeLarge\n                    font.bold: true\n                    color: theme.styledTextColor\n                }\n\n                Item {\n                    Layout.preferredWidth: caret.width\n                    Layout.preferredHeight: caret.height\n                    Image {\n                        id: caret\n                        anchors.centerIn: parent\n                        visible: false\n                        sourceSize.width: theme.fontSizeLarge\n                        sourceSize.height: theme.fontSizeLarge\n                        mipmap: true\n                        source: {\n                            if (sourcesLayout.state === \"collapsed\")\n                                return \"qrc:/gpt4all/icons/caret_right.svg\";\n                            else\n                                return \"qrc:/gpt4all/icons/caret_down.svg\";\n                        }\n                    }\n\n                    ColorOverlay {\n                        anchors.fill: caret\n                        source: caret\n                        color: theme.textColor\n                    }\n                }\n            }\n\n            onClicked: {\n                if (sourcesLayout.state === \"collapsed\")\n                    sourcesLayout.state = \"expanded\";\n                else\n                    sourcesLayout.state = \"collapsed\";\n            }\n        }\n    }\n\n    ColumnLayout {\n        id: sourcesLayout\n        Layout.row: 3\n        Layout.column: 1\n        Layout.topMargin: 5\n        visible: {\n            if (consolidatedSources.length === 0)\n                return false\n            if (!MySettings.localDocsShowReferences)\n                return false\n            if (isCurrentResponse && currentChat.responseInProgress\n                    && currentChat.responseState !== Chat.GeneratingQuestions )\n                return false\n            return true\n        }\n        clip: true\n        Layout.fillWidth: true\n        Layout.preferredHeight: 0\n        state: \"collapsed\"\n        states: [\n            State {\n                name: \"expanded\"\n                PropertyChanges { target: sourcesLayout; Layout.preferredHeight: sourcesFlow.height }\n            },\n            State {\n                name: \"collapsed\"\n                PropertyChanges { target: sourcesLayout; Layout.preferredHeight: 0 }\n            }\n        ]\n\n        transitions: [\n            Transition {\n                SequentialAnimation {\n                    PropertyAnimation {\n                        target: sourcesLayout\n                        property: \"Layout.preferredHeight\"\n                        duration: 300\n                        easing.type: Easing.InOutQuad\n                    }\n                }\n            }\n        ]\n\n        Flow {\n            id: sourcesFlow\n            Layout.fillWidth: true\n            spacing: 10\n            visible: consolidatedSources.length !== 0\n            Repeater {\n                model: consolidatedSources\n\n                delegate: Rectangle {\n                    radius: 10\n                    color: ma.containsMouse ? theme.sourcesBackgroundHovered : theme.sourcesBackground\n                    width: 200\n                    height: 75\n\n                    MouseArea {\n                        id: ma\n                        enabled: modelData.path !== \"\"\n                        anchors.fill: parent\n                        hoverEnabled: true\n                        onClicked: function() {\n                            Qt.openUrlExternally(modelData.fileUri)\n                        }\n                    }\n\n                    Rectangle {\n                        id: debugTooltip\n                        anchors.right: parent.right\n                        anchors.bottom: parent.bottom\n                        width: 24\n                        height: 24\n                        color: \"transparent\"\n                        ToolTip {\n                            parent: debugTooltip\n                            visible: debugMouseArea.containsMouse\n                            text: modelData.text\n                            contentWidth: 900\n                            delay: 500\n                        }\n                        MouseArea {\n                            id: debugMouseArea\n                            anchors.fill: parent\n                            hoverEnabled: true\n                        }\n                    }\n\n                    ColumnLayout {\n                        anchors.left: parent.left\n                        anchors.top: parent.top\n                        anchors.margins: 10\n                        spacing: 0\n                        RowLayout {\n                            id: title\n                            spacing: 5\n                            Layout.maximumWidth: 180\n                            MyFileIcon {\n                                iconSize: 24\n                                fileName: modelData.file\n                                Layout.preferredWidth: iconSize\n                                Layout.preferredHeight: iconSize\n                            }\n                            Text {\n                                Layout.maximumWidth: 156\n                                text: modelData.collection !== \"\" ? modelData.collection : qsTr(\"LocalDocs\")\n                                font.pixelSize: theme.fontSizeLarge\n                                font.bold: true\n                                color: theme.styledTextColor\n                                elide: Qt.ElideRight\n                            }\n                            Rectangle {\n                                Layout.fillWidth: true\n                                color: \"transparent\"\n                                height: 1\n                            }\n                        }\n                        Text {\n                            Layout.fillHeight: true\n                            Layout.maximumWidth: 180\n                            Layout.maximumHeight: 55 - title.height\n                            text: modelData.file\n                            color: theme.textColor\n                            font.pixelSize: theme.fontSizeSmall\n                            elide: Qt.ElideRight\n                            wrapMode: Text.WrapAnywhere\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    ConfirmationDialog {\n        id: editPromptDialog\n        dialogTitle: qsTr(\"Edit this message?\")\n        description: qsTr(\"All following messages will be permanently erased.\")\n        onAccepted: {\n            const msg = currentChat.popPrompt(index);\n            if (msg !== null)\n                setInputBoxText(msg);\n        }\n    }\n\n    ConfirmationDialog {\n        id: redoResponseDialog\n        dialogTitle: qsTr(\"Redo this response?\")\n        description: qsTr(\"All following messages will be permanently erased.\")\n        onAccepted: currentChat.regenerateResponse(index)\n    }\n\n    RowLayout {\n        id: buttonRow\n        Layout.row: 4\n        Layout.column: 1\n        Layout.maximumWidth: parent.width\n        Layout.fillWidth: false\n        Layout.alignment: Qt.AlignLeft | Qt.AlignTop\n        spacing: 3\n        visible: !isCurrentResponse || !currentChat.responseInProgress\n        enabled: opacity > 0\n        opacity: hoverArea.hovered\n\n        Behavior on opacity {\n            OpacityAnimator { duration: 30 }\n        }\n\n        ChatMessageButton {\n            readonly property var editingDisabledReason: {\n                if (!currentChat.isModelLoaded)\n                    return qsTr(\"Cannot edit chat without a loaded model.\");\n                if (currentChat.responseInProgress)\n                    return qsTr(\"Cannot edit chat while the model is generating.\");\n                return null;\n            }\n            visible: !currentChat.isServer && model.name === \"Prompt: \"\n            enabled: editingDisabledReason === null\n            Layout.maximumWidth: 24\n            Layout.maximumHeight: 24\n            Layout.alignment: Qt.AlignVCenter\n            Layout.fillWidth: false\n            name: editingDisabledReason ?? qsTr(\"Edit\")\n            source: \"qrc:/gpt4all/icons/edit.svg\"\n            onClicked: {\n                if (inputBoxText === \"\")\n                    editPromptDialog.open();\n            }\n        }\n\n        ChatMessageButton {\n            readonly property var editingDisabledReason: {\n                if (!currentChat.isModelLoaded)\n                    return qsTr(\"Cannot redo response without a loaded model.\");\n                if (currentChat.responseInProgress)\n                    return qsTr(\"Cannot redo response while the model is generating.\");\n                return null;\n            }\n            visible: !currentChat.isServer && model.name === \"Response: \"\n            enabled: editingDisabledReason === null\n            Layout.maximumWidth: 24\n            Layout.maximumHeight: 24\n            Layout.alignment: Qt.AlignVCenter\n            Layout.fillWidth: false\n            name: editingDisabledReason ?? qsTr(\"Redo\")\n            source: \"qrc:/gpt4all/icons/regenerate.svg\"\n            onClicked: {\n                if (index == chatModel.count - 1) {\n                    // regenerate last message without confirmation\n                    currentChat.regenerateResponse(index);\n                    return;\n                }\n                redoResponseDialog.open();\n            }\n        }\n\n        ChatMessageButton {\n            Layout.maximumWidth: 24\n            Layout.maximumHeight: 24\n            Layout.alignment: Qt.AlignVCenter\n            Layout.fillWidth: false\n            name: qsTr(\"Copy\")\n            source: \"qrc:/gpt4all/icons/copy.svg\"\n            onClicked: {\n                chatModel.copyToClipboard(index);\n            }\n        }\n\n        Item {\n            visible: name === \"Response: \" && MySettings.networkIsActive\n            Layout.alignment: Qt.AlignVCenter\n            Layout.preferredWidth: childrenRect.width\n            Layout.preferredHeight: childrenRect.height\n            Layout.fillWidth: false\n\n            ChatMessageButton {\n                id: thumbsUp\n                anchors.left: parent.left\n                anchors.verticalCenter: parent.verticalCenter\n                opacity: thumbsUpState || thumbsUpState == thumbsDownState ? 1.0 : 0.2\n                source: \"qrc:/gpt4all/icons/thumbs_up.svg\"\n                name: qsTr(\"Like response\")\n                onClicked: {\n                    if (thumbsUpState && !thumbsDownState)\n                        return\n\n                    chatModel.updateNewResponse(index, \"\")\n                    chatModel.updateThumbsUpState(index, true)\n                    chatModel.updateThumbsDownState(index, false)\n                    Network.sendConversation(currentChat.id, getConversationJson());\n                }\n            }\n\n            ChatMessageButton {\n                id: thumbsDown\n                anchors.top: thumbsUp.top\n                anchors.topMargin: buttonRow.spacing\n                anchors.left: thumbsUp.right\n                anchors.leftMargin: buttonRow.spacing\n                checked: thumbsDownState\n                opacity: thumbsDownState || thumbsUpState == thumbsDownState ? 1.0 : 0.2\n                bgTransform: [\n                    Matrix4x4 {\n                        matrix: Qt.matrix4x4(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)\n                    },\n                    Translate {\n                        x: thumbsDown.width\n                    }\n                ]\n                source: \"qrc:/gpt4all/icons/thumbs_down.svg\"\n                name: qsTr(\"Dislike response\")\n                onClicked: {\n                    thumbsDownDialog.open()\n                }\n            }\n        }\n    }\n} // GridLayout\n\n} // Item\n\nGridLayout {\n    Layout.fillWidth: true\n    Layout.maximumWidth: parent.width\n\n    function shouldShowSuggestions() {\n        if (!isCurrentResponse)\n            return false;\n        if (MySettings.suggestionMode === 2) // Off\n            return false;\n        if (MySettings.suggestionMode === 0 && consolidatedSources.length === 0) // LocalDocs only\n            return false;\n        return currentChat.responseState === Chat.GeneratingQuestions || currentChat.generatedQuestions.length !== 0;\n    }\n\n    Item {\n        visible: parent.shouldShowSuggestions()\n        Layout.row: 5\n        Layout.column: 0\n        Layout.topMargin: 20\n        Layout.alignment: Qt.AlignVCenter | Qt.AlignRight\n        Layout.preferredWidth: 28\n        Layout.preferredHeight: 28\n        Image {\n            id: stack\n            sourceSize: Qt.size(28, 28)\n            fillMode: Image.PreserveAspectFit\n            mipmap: true\n            visible: false\n            source: \"qrc:/gpt4all/icons/stack.svg\"\n        }\n\n        ColorOverlay {\n            anchors.fill: stack\n            source: stack\n            color: theme.conversationHeader\n        }\n    }\n\n    Item {\n        visible: parent.shouldShowSuggestions()\n        Layout.row: 5\n        Layout.column: 1\n        Layout.topMargin: 20\n        Layout.fillWidth: true\n        Layout.preferredHeight: 38\n        RowLayout {\n            spacing: 5\n            anchors.left: parent.left\n            anchors.top: parent.top\n            anchors.bottom: parent.bottom\n\n            TextArea {\n                text: qsTr(\"Suggested follow-ups\")\n                padding: 0\n                font.pixelSize: theme.fontSizeLarger\n                font.bold: true\n                color: theme.conversationHeader\n                enabled: false\n                focus: false\n                readOnly: true\n            }\n        }\n    }\n\n    ColumnLayout {\n        visible: parent.shouldShowSuggestions()\n        Layout.row: 6\n        Layout.column: 1\n        Layout.fillWidth: true\n        Layout.minimumHeight: 1\n        spacing: 10\n        Repeater {\n            model: currentChat.generatedQuestions\n            TextArea {\n                id: followUpText\n                Layout.fillWidth: true\n                Layout.alignment: Qt.AlignLeft\n                rightPadding: 40\n                topPadding: 10\n                leftPadding: 20\n                bottomPadding: 10\n                text: modelData\n                focus: false\n                readOnly: true\n                wrapMode: Text.WordWrap\n                hoverEnabled: !currentChat.responseInProgress\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                background: Rectangle {\n                    color: hovered ? theme.sourcesBackgroundHovered : theme.sourcesBackground\n                    radius: 10\n                }\n                MouseArea {\n                    id: maFollowUp\n                    anchors.fill: parent\n                    enabled: !currentChat.responseInProgress\n                    onClicked: function() {\n                        var chat = window.currentChat\n                        var followup = modelData\n                        chat.stopGenerating()\n                        chat.newPromptResponsePair(followup)\n                    }\n                }\n                Item {\n                    anchors.right: parent.right\n                    anchors.verticalCenter: parent.verticalCenter\n                    width: 40\n                    height: 40\n                    visible: !currentChat.responseInProgress\n                    Image {\n                        id: plusImage\n                        anchors.verticalCenter: parent.verticalCenter\n                        sourceSize.width: 20\n                        sourceSize.height: 20\n                        mipmap: true\n                        visible: false\n                        source: \"qrc:/gpt4all/icons/plus.svg\"\n                    }\n\n                    ColorOverlay {\n                        anchors.fill: plusImage\n                        source: plusImage\n                        color: theme.styledTextColor\n                    }\n                }\n            }\n        }\n\n        Rectangle {\n            Layout.fillWidth: true\n            color: \"transparent\"\n            radius: 10\n            Layout.preferredHeight: currentChat.responseInProgress ? 40 : 0\n            clip: true\n            ColumnLayout {\n                id: followUpLayout\n                anchors.fill: parent\n                Rectangle {\n                    id: myRect1\n                    Layout.preferredWidth: 0\n                    Layout.minimumWidth: 0\n                    Layout.maximumWidth: parent.width\n                    height: 12\n                    color: theme.sourcesBackgroundHovered\n                }\n\n                Rectangle {\n                    id: myRect2\n                    Layout.preferredWidth: 0\n                    Layout.minimumWidth: 0\n                    Layout.maximumWidth: parent.width\n                    height: 12\n                    color: theme.sourcesBackgroundHovered\n                }\n\n                SequentialAnimation {\n                    id: followUpProgressAnimation\n                    ParallelAnimation {\n                        PropertyAnimation {\n                            target: myRect1\n                            property: \"Layout.preferredWidth\"\n                            from: 0\n                            to: followUpLayout.width\n                            duration: 1000\n                        }\n                        PropertyAnimation {\n                            target: myRect2\n                            property: \"Layout.preferredWidth\"\n                            from: 0\n                            to: followUpLayout.width / 2\n                            duration: 1000\n                        }\n                    }\n                    SequentialAnimation {\n                        loops: Animation.Infinite\n                        ParallelAnimation {\n                            PropertyAnimation {\n                                target: myRect1\n                                property: \"opacity\"\n                                from: 1\n                                to: 0.2\n                                duration: 1500\n                            }\n                            PropertyAnimation {\n                                target: myRect2\n                                property: \"opacity\"\n                                from: 1\n                                to: 0.2\n                                duration: 1500\n                            }\n                        }\n                        ParallelAnimation {\n                            PropertyAnimation {\n                                target: myRect1\n                                property: \"opacity\"\n                                from: 0.2\n                                to: 1\n                                duration: 1500\n                            }\n                            PropertyAnimation {\n                                target: myRect2\n                                property: \"opacity\"\n                                from: 0.2\n                                to: 1\n                                duration: 1500\n                            }\n                        }\n                    }\n                }\n\n                onVisibleChanged: {\n                    if (visible)\n                        followUpProgressAnimation.start();\n                }\n            }\n\n            Behavior on Layout.preferredHeight {\n                NumberAnimation {\n                    duration: 300\n                    easing.type: Easing.InOutQuad\n                }\n            }\n        }\n    }\n\n} // GridLayout\n\n} // ColumnLayout\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatMessageButton.qml",
    "content": "import QtQuick\nimport QtQuick.Controls\n\nimport gpt4all\n\nMyToolButton {\n    property string name\n\n    width: 24\n    height: 24\n    imageWidth: width\n    imageHeight: height\n    ToolTip {\n        visible: parent.hovered\n        y: parent.height * 1.5\n        text: name\n        delay: Qt.styleHints.mousePressAndHoldInterval\n    }\n    Accessible.name: name\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatTextItem.qml",
    "content": "import Qt5Compat.GraphicalEffects\nimport QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nimport gpt4all\nimport mysettings\nimport toolenums\n\nTextArea {\n    id: myTextArea\n    property string textContent: \"\"\n    visible: textContent != \"\"\n    Layout.fillWidth: true\n    padding: 0\n    color: {\n        if (!currentChat.isServer)\n            return theme.textColor\n        return theme.white\n    }\n    wrapMode: Text.WordWrap\n    textFormat: TextEdit.PlainText\n    focus: false\n    readOnly: true\n    font.pixelSize: theme.fontSizeLarge\n    cursorVisible: isCurrentResponse ? currentChat.responseInProgress : false\n    cursorPosition: text.length\n    TapHandler {\n        id: tapHandler\n        onTapped: function(eventPoint, button) {\n            var clickedPos = myTextArea.positionAt(eventPoint.position.x, eventPoint.position.y);\n            var success = textProcessor.tryCopyAtPosition(clickedPos);\n            if (success)\n                copyCodeMessage.open();\n        }\n    }\n\n    MouseArea {\n        id: conversationMouseArea\n        anchors.fill: parent\n        acceptedButtons: Qt.RightButton\n\n        onClicked: (mouse) => {\n                       if (mouse.button === Qt.RightButton) {\n                           conversationContextMenu.x = conversationMouseArea.mouseX\n                           conversationContextMenu.y = conversationMouseArea.mouseY\n                           conversationContextMenu.open()\n                       }\n                   }\n    }\n\n    onLinkActivated: function(link) {\n        if (!isCurrentResponse || !currentChat.responseInProgress)\n            Qt.openUrlExternally(link)\n    }\n\n    onLinkHovered: function (link) {\n        if (!isCurrentResponse || !currentChat.responseInProgress)\n            statusBar.externalHoveredLink = link\n    }\n\n    MyMenu {\n        id: conversationContextMenu\n        MyMenuItem {\n            text: qsTr(\"Copy\")\n            enabled: myTextArea.selectedText !== \"\"\n            height: enabled ? implicitHeight : 0\n            onTriggered: myTextArea.copy()\n        }\n        MyMenuItem {\n            text: qsTr(\"Copy Message\")\n            enabled: myTextArea.selectedText === \"\"\n            height: enabled ? implicitHeight : 0\n            onTriggered: {\n                myTextArea.selectAll()\n                myTextArea.copy()\n                myTextArea.deselect()\n            }\n        }\n        MyMenuItem {\n            text: textProcessor.shouldProcessText ? qsTr(\"Disable markdown\") : qsTr(\"Enable markdown\")\n            height: enabled ? implicitHeight : 0\n            onTriggered: {\n                textProcessor.shouldProcessText = !textProcessor.shouldProcessText;\n                textProcessor.setValue(textContent);\n            }\n        }\n    }\n\n    ChatViewTextProcessor {\n        id: textProcessor\n    }\n\n    function resetChatViewTextProcessor() {\n        textProcessor.fontPixelSize                = myTextArea.font.pixelSize\n        textProcessor.codeColors.defaultColor      = theme.codeDefaultColor\n        textProcessor.codeColors.keywordColor      = theme.codeKeywordColor\n        textProcessor.codeColors.functionColor     = theme.codeFunctionColor\n        textProcessor.codeColors.functionCallColor = theme.codeFunctionCallColor\n        textProcessor.codeColors.commentColor      = theme.codeCommentColor\n        textProcessor.codeColors.stringColor       = theme.codeStringColor\n        textProcessor.codeColors.numberColor       = theme.codeNumberColor\n        textProcessor.codeColors.headerColor       = theme.codeHeaderColor\n        textProcessor.codeColors.backgroundColor   = theme.codeBackgroundColor\n        textProcessor.textDocument                 = textDocument\n        textProcessor.setValue(textContent);\n    }\n\n    property bool textProcessorReady: false\n\n    Component.onCompleted: {\n        resetChatViewTextProcessor();\n        textProcessorReady = true;\n    }\n\n    Connections {\n        target: myTextArea\n        function onTextContentChanged() {\n            if (myTextArea.textProcessorReady)\n                textProcessor.setValue(textContent);\n        }\n    }\n\n    Connections {\n        target: MySettings\n        function onFontSizeChanged() {\n            myTextArea.resetChatViewTextProcessor();\n        }\n        function onChatThemeChanged() {\n            myTextArea.resetChatViewTextProcessor();\n        }\n    }\n\n    Accessible.role: Accessible.Paragraph\n    Accessible.name: text\n    Accessible.description: name === \"Response: \" ? \"The response by the model\" : \"The prompt by the user\"\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ChatView.qml",
    "content": "import Qt5Compat.GraphicalEffects\nimport QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Dialogs\nimport QtQuick.Layouts\n\nimport chatlistmodel\nimport download\nimport gpt4all\nimport llm\nimport localdocs\nimport modellist\nimport mysettings\nimport network\n\nRectangle {\n    id: window\n\n    Theme {\n        id: theme\n    }\n\n    property var currentChat: ChatListModel.currentChat\n    property var chatModel: currentChat.chatModel\n    property var currentModelInfo: currentChat && currentChat.modelInfo\n    property var currentModelId: null\n    onCurrentModelInfoChanged: {\n        const newId = currentModelInfo && currentModelInfo.id;\n        if (currentModelId !== newId) { currentModelId = newId; }\n    }\n    signal addCollectionViewRequested()\n    signal addModelViewRequested()\n\n    color: theme.viewBackground\n\n    Connections {\n        target: currentChat\n        // FIXME: https://github.com/nomic-ai/gpt4all/issues/3334\n        // function onResponseInProgressChanged() {\n        //    if (MySettings.networkIsActive && !currentChat.responseInProgress)\n        //        Network.sendConversation(currentChat.id, getConversationJson());\n        // }\n        function onModelLoadingErrorChanged() {\n            if (currentChat.modelLoadingError !== \"\")\n                modelLoadingErrorPopup.open()\n        }\n        function onModelLoadingWarning(warning) {\n            modelLoadingWarningPopup.open_(warning)\n        }\n    }\n\n    function currentModelName() {\n        return ModelList.modelInfo(currentChat.modelInfo.id).name;\n    }\n\n    function currentModelInstalled() {\n        return currentModelName() !== \"\" && ModelList.modelInfo(currentChat.modelInfo.id).installed;\n    }\n\n    PopupDialog {\n        id: modelLoadingErrorPopup\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        text: qsTr(\"<h3>Encountered an error loading model:</h3><br>\"\n              + \"<i>\\\"%1\\\"</i>\"\n              + \"<br><br>Model loading failures can happen for a variety of reasons, but the most common \"\n              + \"causes include a bad file format, an incomplete or corrupted download, the wrong file \"\n              + \"type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:\"\n              + \"<br><ul>\"\n              + \"<li>Ensure the model file has a compatible format and type\"\n              + \"<li>Check the model file is complete in the download folder\"\n              + \"<li>You can find the download folder in the settings dialog\"\n              + \"<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum\"\n              + \"<li>Read more about what models are supported in our <a href=\\\"https://docs.gpt4all.io/\\\">documentation</a> for the gui\"\n              + \"<li>Check out our <a href=\\\"https://discord.gg/4M2QFmTt2k\\\">discord channel</a> for help\").arg(currentChat.modelLoadingError);\n    }\n\n    PopupDialog {\n        id: modelLoadingWarningPopup\n        property string message\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        text: qsTr(\"<h3>Warning</h3><p>%1</p>\").arg(message)\n        function open_(msg) { message = msg; open(); }\n    }\n\n    ConfirmationDialog {\n        id: switchModelDialog\n        property int index: -1\n        dialogTitle: qsTr(\"Erase conversation?\")\n        description: qsTr(\"Changing the model will erase the current conversation.\")\n    }\n\n    PopupDialog {\n        id: copyMessage\n        anchors.centerIn: parent\n        text: qsTr(\"Conversation copied to clipboard.\")\n        font.pixelSize: theme.fontSizeLarge\n    }\n\n    PopupDialog {\n        id: copyCodeMessage\n        anchors.centerIn: parent\n        text: qsTr(\"Code copied to clipboard.\")\n        font.pixelSize: theme.fontSizeLarge\n    }\n\n    ConfirmationDialog {\n        id: resetContextDialog\n        dialogTitle: qsTr(\"Erase conversation?\")\n        description: qsTr(\"The entire chat will be erased.\")\n        onAccepted: {\n            Network.trackChatEvent(\"reset_context\", { \"length\": chatModel.count });\n            currentChat.reset();\n        }\n    }\n\n    // FIXME: https://github.com/nomic-ai/gpt4all/issues/3334\n    // function getConversation() {\n    //     var conversation = \"\";\n    //     for (var i = 0; i < chatModel.count; i++) {\n    //         var item = chatModel.get(i)\n    //         var string = item.name;\n    //         var isResponse = item.name === \"Response: \"\n    //         string += chatModel.get(i).value\n    //         if (isResponse && item.stopped)\n    //             string += \" <stopped>\"\n    //         string += \"\\n\"\n    //         conversation += string\n    //     }\n    //     return conversation\n    // }\n\n    // FIXME: https://github.com/nomic-ai/gpt4all/issues/3334\n    // function getConversationJson() {\n    //     var str = \"{\\\"conversation\\\": [\";\n    //     for (var i = 0; i < chatModel.count; i++) {\n    //         var item = chatModel.get(i)\n    //         var isResponse = item.name === \"Response: \"\n    //         str += \"{\\\"content\\\": \";\n    //         str += JSON.stringify(item.value)\n    //         str += \", \\\"role\\\": \\\"\" + (isResponse ? \"assistant\" : \"user\") + \"\\\"\";\n    //         if (isResponse && item.thumbsUpState !== item.thumbsDownState)\n    //             str += \", \\\"rating\\\": \\\"\" + (item.thumbsUpState ? \"positive\" : \"negative\") + \"\\\"\";\n    //         if (isResponse && item.newResponse !== \"\")\n    //             str += \", \\\"edited_content\\\": \" + JSON.stringify(item.newResponse);\n    //         if (isResponse && item.stopped)\n    //             str += \", \\\"stopped\\\": \\\"true\\\"\"\n    //         if (!isResponse)\n    //             str += \"},\"\n    //         else\n    //             str += ((i < chatModel.count - 1) ? \"},\" : \"}\")\n    //     }\n    //     return str + \"]}\"\n    // }\n\n    ChatDrawer {\n        id: chatDrawer\n        anchors.left: parent.left\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        width: Math.max(180, Math.min(600, 0.23 * window.width))\n    }\n\n    PopupDialog {\n        id: referenceContextDialog\n        anchors.centerIn: parent\n        shouldTimeOut: false\n        shouldShowBusy: false\n        modal: true\n    }\n\n    Item {\n        id: mainArea\n        anchors.left: chatDrawer.right\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        state: \"expanded\"\n\n        states: [\n            State {\n                name: \"expanded\"\n                AnchorChanges {\n                    target: mainArea\n                    anchors.left: chatDrawer.right\n                }\n            },\n            State {\n                name: \"collapsed\"\n                AnchorChanges {\n                    target: mainArea\n                    anchors.left: parent.left\n                }\n            }\n        ]\n\n        function toggleLeftPanel() {\n            if (mainArea.state === \"expanded\") {\n                mainArea.state = \"collapsed\";\n            } else {\n                mainArea.state = \"expanded\";\n            }\n        }\n\n        transitions: Transition {\n            AnchorAnimation {\n                easing.type: Easing.InOutQuad\n                duration: 200\n            }\n        }\n\n        Rectangle {\n            id: header\n            anchors.left: parent.left\n            anchors.right: parent.right\n            anchors.top: parent.top\n            height: 100\n            color: theme.conversationBackground\n\n            RowLayout {\n                id: comboLayout\n                height: 80\n                anchors.left: parent.left\n                anchors.right: parent.right\n                anchors.verticalCenter: parent.verticalCenter\n                spacing: 0\n\n                Rectangle {\n                    Layout.alignment: Qt.AlignLeft\n                    Layout.leftMargin: 30\n                    Layout.fillWidth: true\n                    color: \"transparent\"\n                    Layout.preferredHeight: childrenRect.height\n                    MyToolButton {\n                        id: drawerButton\n                        anchors.left: parent.left\n                        backgroundColor: theme.iconBackgroundLight\n                        width: 40\n                        height: 40\n                        imageWidth: 40\n                        imageHeight: 40\n                        padding: 15\n                        source: mainArea.state === \"expanded\" ? \"qrc:/gpt4all/icons/left_panel_open.svg\" : \"qrc:/gpt4all/icons/left_panel_closed.svg\"\n                        Accessible.role: Accessible.ButtonMenu\n                        Accessible.name: qsTr(\"Chat panel\")\n                        Accessible.description: qsTr(\"Chat panel with options\")\n                        onClicked: {\n                            mainArea.toggleLeftPanel()\n                        }\n                    }\n                }\n\n                ComboBox {\n                    id: comboBox\n                    Layout.alignment: Qt.AlignHCenter\n                    Layout.fillHeight: true\n                    Layout.preferredWidth: 550\n                    Layout.leftMargin: {\n                        // This function works in tandem with the preferredWidth and the layout to\n                        // provide the maximum size combobox we can have at the smallest window width\n                        // we allow with the largest font size we allow. It is unfortunately based\n                        // upon a magic number that was produced through trial and error for something\n                        // I don't fully understand.\n                        return -Math.max(0, comboBox.width / 2 + collectionsButton.width + 110 /*magic*/ - comboLayout.width / 2);\n                    }\n                    enabled: !currentChat.isServer\n                        && !currentChat.trySwitchContextInProgress\n                        && !currentChat.isCurrentlyLoading\n                        && ModelList.selectableModels.count !== 0\n                    model: ModelList.selectableModels\n                    valueRole: \"id\"\n                    textRole: \"name\"\n\n                    function changeModel(index) {\n                        currentChat.stopGenerating()\n                        currentChat.reset();\n                        currentChat.modelInfo = ModelList.modelInfo(comboBox.valueAt(index))\n                    }\n\n                    Connections {\n                        target: switchModelDialog\n                        function onAccepted() {\n                            comboBox.changeModel(switchModelDialog.index)\n                        }\n                    }\n\n                    background: Rectangle {\n                        color: theme.mainComboBackground\n                        radius: 10\n                        ProgressBar {\n                            id: modelProgress\n                            anchors.bottom: parent.bottom\n                            anchors.horizontalCenter: parent.horizontalCenter\n                            width: contentRow.width + 20\n                            visible: currentChat.isCurrentlyLoading\n                            height: 10\n                            value: currentChat.modelLoadingPercentage\n                            background: Rectangle {\n                                color: theme.progressBackground\n                                radius: 10\n                            }\n                            contentItem: Item {\n                                Rectangle {\n                                    anchors.bottom: parent.bottom\n                                    width: modelProgress.visualPosition * parent.width\n                                    height: 10\n                                    radius: 2\n                                    color: theme.progressForeground\n                                }\n                            }\n                        }\n                    }\n\n                    contentItem: Item {\n                        RowLayout {\n                            id: contentRow\n                            anchors.centerIn: parent\n                            spacing: 0\n                            Layout.maximumWidth: 550\n                            RowLayout {\n                                id: miniButtonsRow\n                                clip: true\n                                Layout.maximumWidth: 550\n                                Behavior on Layout.preferredWidth {\n                                    NumberAnimation {\n                                        duration: 300\n                                        easing.type: Easing.InOutQuad\n                                    }\n                                }\n\n                                Layout.preferredWidth: {\n                                    if (!(comboBox.hovered || reloadButton.hovered || ejectButton.hovered))\n                                        return 0\n                                    return (reloadButton.visible ? reloadButton.width : 0) + (ejectButton.visible ? ejectButton.width : 0)\n                                }\n\n                                MyMiniButton {\n                                    id: reloadButton\n                                    Layout.alignment: Qt.AlignCenter\n                                    visible: currentChat.modelLoadingError === \"\"\n                                        && !currentChat.trySwitchContextInProgress\n                                        && !currentChat.isCurrentlyLoading\n                                        && (currentChat.isModelLoaded || currentModelInstalled())\n                                    source: \"qrc:/gpt4all/icons/regenerate.svg\"\n                                    backgroundColor: theme.textColor\n                                    backgroundColorHovered: theme.styledTextColor\n                                    onClicked: {\n                                        if (currentChat.isModelLoaded)\n                                            currentChat.forceReloadModel();\n                                        else\n                                            currentChat.reloadModel();\n                                    }\n                                    ToolTip.text: qsTr(\"Reload the currently loaded model\")\n                                    ToolTip.visible: hovered\n                                }\n\n                                MyMiniButton {\n                                    id: ejectButton\n                                    Layout.alignment: Qt.AlignCenter\n                                    visible: currentChat.isModelLoaded && !currentChat.isCurrentlyLoading\n                                    source: \"qrc:/gpt4all/icons/eject.svg\"\n                                    backgroundColor: theme.textColor\n                                    backgroundColorHovered: theme.styledTextColor\n                                    onClicked: {\n                                        currentChat.forceUnloadModel();\n                                    }\n                                    ToolTip.text: qsTr(\"Eject the currently loaded model\")\n                                    ToolTip.visible: hovered\n                                }\n                            }\n\n                            Text {\n                                Layout.maximumWidth: 520\n                                id: comboBoxText\n                                leftPadding: 10\n                                rightPadding: 10\n                                text: {\n                                    if (ModelList.selectableModels.count === 0)\n                                        return qsTr(\"No model installed.\")\n                                    if (currentChat.modelLoadingError !== \"\")\n                                        return qsTr(\"Model loading error.\")\n                                    if (currentChat.trySwitchContextInProgress === 1)\n                                        return qsTr(\"Waiting for model...\")\n                                    if (currentChat.trySwitchContextInProgress === 2)\n                                        return qsTr(\"Switching context...\")\n                                    if (currentModelName() === \"\")\n                                        return qsTr(\"Choose a model...\")\n                                    if (!currentModelInstalled())\n                                        return qsTr(\"Not found: %1\").arg(currentModelName())\n                                    if (currentChat.modelLoadingPercentage === 0.0)\n                                        return qsTr(\"Reload \\u00B7 %1\").arg(currentModelName())\n                                    if (currentChat.isCurrentlyLoading)\n                                        return qsTr(\"Loading \\u00B7 %1\").arg(currentModelName())\n                                    return currentModelName()\n                                }\n                                font.pixelSize: theme.fontSizeLarger\n                                color: theme.iconBackgroundLight\n                                verticalAlignment: Text.AlignVCenter\n                                horizontalAlignment: Text.AlignHCenter\n                                elide: Text.ElideRight\n                            }\n                            Item {\n                                Layout.minimumWidth: updown.width\n                                Layout.minimumHeight: updown.height\n                                Image {\n                                    id: updown\n                                    anchors.verticalCenter: parent.verticalCenter\n                                    sourceSize.width: comboBoxText.font.pixelSize\n                                    sourceSize.height: comboBoxText.font.pixelSize\n                                    mipmap: true\n                                    visible: false\n                                    source: \"qrc:/gpt4all/icons/up_down.svg\"\n                                }\n\n                                ColorOverlay {\n                                    anchors.fill: updown\n                                    source: updown\n                                    color: comboBoxText.color\n                                }\n                            }\n                        }\n                    }\n                    delegate: ItemDelegate {\n                        id: comboItemDelegate\n                        width: comboItemPopup.width -20\n                        contentItem: Text {\n                            text: name\n                            color: theme.textColor\n                            font: comboBox.font\n                            elide: Text.ElideRight\n                            verticalAlignment: Text.AlignVCenter\n                        }\n                        background: Rectangle {\n                            radius: 10\n                            color: highlighted ? theme.menuHighlightColor : theme.menuBackgroundColor\n                        }\n                        highlighted: comboBox.highlightedIndex === index\n                    }\n                    indicator: Item {\n                    }\n                    popup: Popup {\n                        id: comboItemPopup\n                        y: comboBox.height - 1\n                        width: comboBox.width\n                        implicitHeight: Math.min(window.height - y, contentItem.implicitHeight + 20)\n                        padding: 0\n                        contentItem: Rectangle {\n                            implicitWidth: comboBox.width\n                            implicitHeight: comboItemPopupListView.implicitHeight\n                            color: \"transparent\"\n                            radius: 10\n                            ScrollView {\n                                anchors.fill: parent\n                                anchors.margins: 10\n                                clip: true\n                                ScrollBar.vertical.policy: ScrollBar.AsNeeded\n                                ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n                                ListView {\n                                    id: comboItemPopupListView\n                                    implicitHeight: contentHeight\n                                    model: comboBox.popup.visible ? comboBox.delegateModel : null\n                                    currentIndex: comboBox.highlightedIndex\n                                    ScrollIndicator.vertical: ScrollIndicator { }\n                                }\n                            }\n                        }\n\n                        background: Rectangle {\n                            border.color: theme.menuBorderColor\n                            border.width: 1\n                            color: theme.menuBackgroundColor\n                            radius: 10\n                        }\n                    }\n\n                    Accessible.name: currentModelName()\n                    Accessible.description: qsTr(\"The top item is the current model\")\n                    onActivated: function (index) {\n                        var newInfo = ModelList.modelInfo(comboBox.valueAt(index));\n                        if (newInfo === currentChat.modelInfo) {\n                            currentChat.reloadModel();\n                        } else if (currentModelName() !== \"\" && chatModel.count !== 0) {\n                            switchModelDialog.index = index;\n                            switchModelDialog.open();\n                        } else {\n                            comboBox.changeModel(index);\n                        }\n                    }\n                }\n\n                Rectangle {\n                    color: \"transparent\"\n                    Layout.alignment: Qt.AlignRight\n                    Layout.rightMargin: 30\n                    Layout.fillWidth: true\n                    Layout.preferredHeight: childrenRect.height\n                    clip: true\n\n                    MyButton {\n                        id: collectionsButton\n                        clip: true\n                        anchors.right: parent.right\n                        borderWidth: 0\n                        backgroundColor: theme.collectionsButtonBackground\n                        backgroundColorHovered: theme.collectionsButtonBackgroundHovered\n                        backgroundRadius: 5\n                        padding: 15\n                        topPadding: 8\n                        bottomPadding: 8\n\n                        contentItem: RowLayout {\n                            spacing: 10\n                            Item {\n                                visible: currentChat.collectionModel.count === 0\n                                Layout.minimumWidth: collectionsImage.width\n                                Layout.minimumHeight: collectionsImage.height\n                                Image {\n                                    id: collectionsImage\n                                    anchors.verticalCenter: parent.verticalCenter\n                                    sourceSize.width: 24\n                                    sourceSize.height: 24\n                                    mipmap: true\n                                    visible: false\n                                    source: \"qrc:/gpt4all/icons/db.svg\"\n                                }\n\n                                ColorOverlay {\n                                    anchors.fill: collectionsImage\n                                    source: collectionsImage\n                                    color: theme.collectionsButtonForeground\n                                }\n                            }\n\n                            MyBusyIndicator {\n                                visible: currentChat.collectionModel.updatingCount !== 0\n                                color: theme.collectionsButtonProgress\n                                size: 24\n                                Layout.minimumWidth: 24\n                                Layout.minimumHeight: 24\n                                Text {\n                                    anchors.centerIn: parent\n                                    text: currentChat.collectionModel.updatingCount\n                                    color: theme.collectionsButtonForeground\n                                    font.pixelSize: 14 // fixed regardless of theme\n                                }\n                            }\n\n                            Rectangle {\n                                visible: currentChat.collectionModel.count !== 0\n                                radius: 6\n                                color: theme.collectionsButtonForeground\n                                Layout.minimumWidth: collectionsImage.width\n                                Layout.minimumHeight: collectionsImage.height\n                                Text {\n                                    anchors.centerIn: parent\n                                    text: currentChat.collectionModel.count\n                                    color: theme.collectionsButtonText\n                                    font.pixelSize: 14 // fixed regardless of theme\n                                }\n                            }\n\n                            Text {\n                                text: qsTr(\"LocalDocs\")\n                                color: theme.collectionsButtonForeground\n                                font.pixelSize: theme.fontSizeLarge\n                            }\n                        }\n\n                        fontPixelSize: theme.fontSizeLarge\n\n                        background: Rectangle {\n                            radius: collectionsButton.backgroundRadius\n                            // TODO(jared): either use collectionsButton-specific theming, or don't - this is inconsistent\n                            color: conversation.state === \"expanded\" ? (\n                                collectionsButton.hovered ? theme.lightButtonBackgroundHovered : theme.lightButtonBackground\n                            ) : (\n                                collectionsButton.hovered ? theme.lighterButtonBackground : theme.lighterButtonBackgroundHovered\n                            )\n                        }\n\n                        Accessible.name: qsTr(\"Add documents\")\n                        Accessible.description: qsTr(\"add collections of documents to the chat\")\n\n                        onClicked: {\n                            conversation.toggleRightPanel()\n                        }\n                    }\n                }\n            }\n        }\n\n        Rectangle {\n            id: conversationDivider\n            anchors.top: header.bottom\n            anchors.left: parent.left\n            anchors.right: parent.right\n            color: theme.conversationDivider\n            height: 1\n        }\n\n        CollectionsDrawer {\n            id: collectionsDrawer\n            anchors.right: parent.right\n            anchors.top: conversationDivider.bottom\n            anchors.bottom: parent.bottom\n            width: Math.max(180, Math.min(600, 0.23 * window.width))\n            color: theme.conversationBackground\n            onAddDocsClicked: {\n                addCollectionViewRequested()\n            }\n        }\n\n        Rectangle {\n            id: conversation\n            color: theme.conversationBackground\n            anchors.left: parent.left\n            anchors.right: parent.right\n            anchors.bottom: parent.bottom\n            anchors.top: conversationDivider.bottom\n            state: \"collapsed\"\n\n            states: [\n                State {\n                    name: \"expanded\"\n                    AnchorChanges {\n                        target: conversation\n                        anchors.right: collectionsDrawer.left\n                    }\n                },\n                State {\n                    name: \"collapsed\"\n                    AnchorChanges {\n                        target: conversation\n                        anchors.right: parent.right\n                    }\n                }\n            ]\n\n            function toggleRightPanel() {\n                if (conversation.state === \"expanded\") {\n                    conversation.state = \"collapsed\";\n                } else {\n                    conversation.state = \"expanded\";\n                }\n            }\n\n            transitions: Transition {\n                AnchorAnimation {\n                    easing.type: Easing.InOutQuad\n                    duration: 300\n                }\n            }\n\n            ScrollView {\n                id: scrollView\n                anchors.left: parent.left\n                anchors.right: parent.right\n                anchors.top: parent.top\n                anchors.bottom: !currentChat.isServer ? textInputView.top : parent.bottom\n                anchors.bottomMargin: !currentChat.isServer ? 30 : 0\n                ScrollBar.vertical.policy: ScrollBar.AlwaysOff\n\n                Rectangle {\n                    anchors.fill: parent\n                    color: currentChat.isServer ? theme.black : theme.conversationBackground\n\n                    Rectangle {\n                        id: homePage\n                        color: \"transparent\"\n                        anchors.fill: parent\n                        z: 200\n                        visible: !currentChat.isModelLoaded && (ModelList.selectableModels.count === 0 || currentModelName() === \"\") && !currentChat.isServer\n\n                        ColumnLayout {\n                            visible: ModelList.selectableModels.count !== 0\n                            id: modelInstalledLabel\n                            anchors.centerIn: parent\n                            spacing: 0\n\n                            Rectangle {\n                                Layout.alignment: Qt.AlignCenter\n                                Layout.preferredWidth: image.width\n                                Layout.preferredHeight: image.height\n                                color: \"transparent\"\n\n                                Image {\n                                    id: image\n                                    anchors.centerIn: parent\n                                    sourceSize.width: 160\n                                    sourceSize.height: 110\n                                    fillMode: Image.PreserveAspectFit\n                                    mipmap: true\n                                    visible: false\n                                    source: \"qrc:/gpt4all/icons/nomic_logo.svg\"\n                                }\n\n                                ColorOverlay {\n                                    anchors.fill: image\n                                    source: image\n                                    color: theme.containerBackground\n                                }\n                            }\n                        }\n\n                        MyButton {\n                            id: loadDefaultModelButton\n                            visible: ModelList.selectableModels.count !== 0\n                            anchors.top: modelInstalledLabel.bottom\n                            anchors.topMargin: 50\n                            anchors.horizontalCenter: modelInstalledLabel.horizontalCenter\n                            rightPadding: 60\n                            leftPadding: 60\n                            property string defaultModel: \"\"\n                            property string defaultModelName: \"\"\n                            function updateDefaultModel() {\n                                var i = comboBox.find(MySettings.userDefaultModel)\n                                if (i !== -1) {\n                                    defaultModel = comboBox.valueAt(i);\n                                } else {\n                                    defaultModel = comboBox.count ? comboBox.valueAt(0) : \"\";\n                                }\n                                if (defaultModel !== \"\") {\n                                    defaultModelName = ModelList.modelInfo(defaultModel).name;\n                                } else {\n                                    defaultModelName = \"\";\n                                }\n                            }\n\n                            text: qsTr(\"Load \\u00B7 %1 (default) \\u2192\").arg(defaultModelName);\n                            onClicked: {\n                                var i = comboBox.find(MySettings.userDefaultModel)\n                                if (i !== -1) {\n                                    comboBox.changeModel(i);\n                                } else {\n                                    comboBox.changeModel(0);\n                                }\n                            }\n\n                            // This requires a bit of work because apparently the combobox valueAt\n                            // function only works after the combobox component is loaded so we have\n                            // to use our own component loaded to make this work along with a signal\n                            // from MySettings for when the setting for user default model changes\n                            Connections {\n                                target: MySettings\n                                function onUserDefaultModelChanged() {\n                                    loadDefaultModelButton.updateDefaultModel()\n                                }\n                            }\n                            Component.onCompleted: {\n                                loadDefaultModelButton.updateDefaultModel()\n                            }\n                            Accessible.role: Accessible.Button\n                            Accessible.name: qsTr(\"Load the default model\")\n                            Accessible.description: qsTr(\"Loads the default model which can be changed in settings\")\n                        }\n\n                        ColumnLayout {\n                            id: noModelInstalledLabel\n                            visible: ModelList.selectableModels.count === 0\n                            anchors.centerIn: parent\n                            spacing: 0\n\n                            Text {\n                                Layout.alignment: Qt.AlignCenter\n                                text: qsTr(\"No Model Installed\")\n                                color: theme.mutedLightTextColor\n                                font.pixelSize: theme.fontSizeBannerSmall\n                            }\n\n                            Text {\n                                Layout.topMargin: 15\n                                horizontalAlignment: Qt.AlignHCenter\n                                color: theme.mutedLighterTextColor\n                                text: qsTr(\"GPT4All requires that you install at least one\\nmodel to get started\")\n                                font.pixelSize: theme.fontSizeLarge\n                            }\n                        }\n\n                        MyButton {\n                            visible: ModelList.selectableModels.count === 0\n                            anchors.top: noModelInstalledLabel.bottom\n                            anchors.topMargin: 50\n                            anchors.horizontalCenter: noModelInstalledLabel.horizontalCenter\n                            rightPadding: 60\n                            leftPadding: 60\n                            text: qsTr(\"Install a Model\")\n                            onClicked: {\n                                addModelViewRequested();\n                            }\n                            Accessible.role: Accessible.Button\n                            Accessible.name: qsTr(\"Shows the add model view\")\n                        }\n                    }\n\n                    ColumnLayout {\n                        anchors.fill: parent\n                        visible: ModelList.selectableModels.count !== 0\n                        ListView {\n                            id: listView\n                            Layout.maximumWidth: 1280\n                            Layout.fillHeight: true\n                            Layout.fillWidth: true\n                            Layout.margins: 20\n                            Layout.leftMargin: 50\n                            Layout.rightMargin: 50\n                            Layout.alignment: Qt.AlignHCenter\n                            spacing: 10\n                            model: chatModel\n                            cacheBuffer: 2147483647\n\n                            ScrollBar.vertical: ScrollBar {\n                                policy: ScrollBar.AsNeeded\n                            }\n\n                            Accessible.role: Accessible.List\n                            Accessible.name: qsTr(\"Conversation with the model\")\n                            Accessible.description: qsTr(\"prompt / response pairs from the conversation\")\n\n                            delegate: ChatItemView {\n                                width: listView.contentItem.width - 15\n                                inputBoxText: textInput.text\n                                onSetInputBoxText: text => {\n                                    textInput.text = text;\n                                    textInput.forceActiveFocus();\n                                    textInput.cursorPosition = text.length;\n                                }\n                                height: visible ? implicitHeight : 0\n                                visible: name !== \"ToolResponse: \" && name !== \"System: \"\n                            }\n\n                            remove: Transition {\n                                OpacityAnimator { to: 0; duration: 500 }\n                            }\n\n                            function scrollToEnd() {\n                                listView.positionViewAtEnd()\n                            }\n\n                            onContentHeightChanged: {\n                                if (atYEnd)\n                                    scrollToEnd()\n                            }\n                        }\n                    }\n                }\n            }\n\n            Rectangle {\n                id: conversationTrayContent\n                anchors.bottom: conversationTrayButton.top\n                anchors.horizontalCenter: conversationTrayButton.horizontalCenter\n                width: conversationTrayContentLayout.width\n                height: conversationTrayContentLayout.height\n                color: theme.containerBackground\n                radius: 5\n                opacity: 0\n                visible: false\n                clip: true\n                z: 400\n\n                property bool isHovered: (\n                    conversationTrayButton.isHovered || resetContextButton.hovered || copyChatButton.hovered\n                )\n\n                state: conversationTrayContent.isHovered ? \"expanded\" : \"collapsed\"\n                states: [\n                    State {\n                        name: \"expanded\"\n                        PropertyChanges { target: conversationTrayContent; opacity: 1 }\n                    },\n                    State {\n                        name: \"collapsed\"\n                        PropertyChanges { target: conversationTrayContent; opacity: 0 }\n                    }\n                ]\n                transitions: [\n                    Transition {\n                        from: \"collapsed\"\n                        to: \"expanded\"\n                        SequentialAnimation {\n                            ScriptAction {\n                                script: conversationTrayContent.visible = true\n                            }\n                            PropertyAnimation {\n                                target: conversationTrayContent\n                                property: \"opacity\"\n                                duration: 300\n                                easing.type: Easing.InOutQuad\n                            }\n                        }\n                    },\n                    Transition {\n                        from: \"expanded\"\n                        to: \"collapsed\"\n                        SequentialAnimation {\n                            PropertyAnimation {\n                                target: conversationTrayContent\n                                property: \"opacity\"\n                                duration: 300\n                                easing.type: Easing.InOutQuad\n                            }\n                            ScriptAction {\n                                script: conversationTrayContent.visible = false\n                            }\n                        }\n                    }\n                ]\n\n                RowLayout {\n                    id: conversationTrayContentLayout\n                    spacing: 0\n                    MyToolButton {\n                        id: resetContextButton\n                        Layout.preferredWidth: 40\n                        Layout.preferredHeight: 40\n                        source: \"qrc:/gpt4all/icons/recycle.svg\"\n                        imageWidth: 20\n                        imageHeight: 20\n                        onClicked: resetContextDialog.open()\n                        ToolTip.visible: resetContextButton.hovered\n                        ToolTip.text: qsTr(\"Erase and reset chat session\")\n                    }\n                    MyToolButton {\n                        id: copyChatButton\n                        Layout.preferredWidth: 40\n                        Layout.preferredHeight: 40\n                        source: \"qrc:/gpt4all/icons/copy.svg\"\n                        imageWidth: 20\n                        imageHeight: 20\n                        TextEdit{\n                            id: copyEdit\n                            visible: false\n                        }\n                        onClicked: {\n                            chatModel.copyToClipboard()\n                            copyMessage.open()\n                        }\n                        ToolTip.visible: copyChatButton.hovered\n                        ToolTip.text: qsTr(\"Copy chat session to clipboard\")\n                    }\n                }\n            }\n\n            Item {\n                id: conversationTrayButton\n                anchors.bottom: textInputView.top\n                anchors.horizontalCenter: textInputView.horizontalCenter\n                width: 40\n                height: 30\n                visible: chatModel.count && !currentChat.isServer && currentChat.isModelLoaded\n                property bool isHovered: conversationTrayMouseAreaButton.containsMouse\n                MouseArea {\n                    id: conversationTrayMouseAreaButton\n                    anchors.fill: parent\n                    hoverEnabled: true\n                }\n                Text {\n                    id: conversationTrayTextButton\n                    anchors.centerIn: parent\n                    horizontalAlignment: Qt.AlignHCenter\n                    leftPadding: 5\n                    rightPadding: 5\n                    text: \"\\u00B7\\u00B7\\u00B7\"\n                    color: theme.textColor\n                    font.pixelSize: 30 // fixed size\n                    font.bold: true\n                }\n            }\n\n            MyButton {\n                anchors.bottom: textInputView.top\n                anchors.horizontalCenter: textInputView.horizontalCenter\n                anchors.bottomMargin: 20\n                textColor: theme.textColor\n                visible: !currentChat.isServer\n                    && !currentChat.isModelLoaded\n                    && currentChat.modelLoadingError === \"\"\n                    && !currentChat.trySwitchContextInProgress\n                    && !currentChat.isCurrentlyLoading\n                    && currentModelInstalled()\n\n                Image {\n                    anchors.verticalCenter: parent.verticalCenter\n                    anchors.left: parent.left\n                    anchors.leftMargin: 15\n                    sourceSize.width: 15\n                    sourceSize.height: 15\n                    source: \"qrc:/gpt4all/icons/regenerate.svg\"\n                }\n                leftPadding: 40\n                onClicked: {\n                    currentChat.reloadModel();\n                }\n\n                borderWidth: 1\n                backgroundColor: theme.conversationButtonBackground\n                backgroundColorHovered: theme.conversationButtonBackgroundHovered\n                backgroundRadius: 5\n                padding: 15\n                topPadding: 8\n                bottomPadding: 8\n                text: qsTr(\"Reload \\u00B7 %1\").arg(currentChat.modelInfo.name)\n                fontPixelSize: theme.fontSizeSmall\n                Accessible.description: qsTr(\"Reloads the model\")\n            }\n\n            Text {\n                id: statusBar\n                property string externalHoveredLink: \"\"\n                anchors.top: textInputView.bottom\n                anchors.bottom: parent.bottom\n                anchors.right: parent.right\n                anchors.rightMargin: 30\n                anchors.left: parent.left\n                anchors.leftMargin: 30\n                horizontalAlignment: Qt.AlignRight\n                verticalAlignment: Qt.AlignVCenter\n                color: textInputView.error !== null ? theme.textErrorColor : theme.mutedTextColor\n                visible: currentChat.tokenSpeed !== \"\" || externalHoveredLink !== \"\" || textInputView.error !== null\n                elide: Text.ElideRight\n                wrapMode: Text.WordWrap\n                text: {\n                    if (externalHoveredLink !== \"\")\n                        return externalHoveredLink\n                    if (textInputView.error !== null)\n                        return textInputView.error;\n\n                    const segments = [currentChat.tokenSpeed];\n                    const device = currentChat.device;\n                    const backend = currentChat.deviceBackend;\n                    if (device !== null) { // device is null if we have no model loaded\n                        var deviceSegment = device;\n                        if (backend === \"CUDA\" || backend === \"Vulkan\")\n                            deviceSegment += ` (${backend})`;\n                        segments.push(deviceSegment);\n                    }\n                    const fallbackReason = currentChat.fallbackReason;\n                    if (fallbackReason !== null && fallbackReason !== \"\")\n                        segments.push(fallbackReason);\n                    return segments.join(\" \\u00B7 \");\n                }\n                font.pixelSize: theme.fontSizeSmaller\n                font.bold: true\n                onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n            }\n\n            RectangularGlow {\n                id: effect\n                visible: !currentChat.isServer && ModelList.selectableModels.count !== 0\n                anchors.fill: textInputView\n                glowRadius: 50\n                spread: 0\n                color: theme.sendGlow\n                cornerRadius: 10\n                opacity: 0.1\n            }\n\n            ListModel {\n                id: attachmentModel\n\n                function getAttachmentUrls() {\n                    var urls = [];\n                    for (var i = 0; i < attachmentModel.count; i++) {\n                        var item = attachmentModel.get(i);\n                        urls.push(item.url);\n                    }\n                    return urls;\n                }\n            }\n\n            Rectangle {\n                id: textInputView\n                color: theme.controlBackground\n                border.width: error === null ? 1 : 2\n                border.color: error === null ? theme.controlBorder : theme.textErrorColor\n                radius: 10\n                anchors.left: parent.left\n                anchors.right: parent.right\n                anchors.bottom: parent.bottom\n                anchors.margins: 30\n                anchors.leftMargin: Math.max((parent.width - 1310) / 2, 30)\n                anchors.rightMargin: Math.max((parent.width - 1310) / 2, 30)\n                height: textInputViewLayout.implicitHeight\n                visible: !currentChat.isServer && ModelList.selectableModels.count !== 0\n\n                property var error: null\n                function checkError() {\n                    const info = currentModelInfo;\n                    if (info === null || !info.id) {\n                        error = null;\n                    } else if (info.chatTemplate.isLegacy) {\n                        error = qsTr(\"Legacy prompt template needs to be \" +\n                                     \"<a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">updated\" +\n                                     \"</a> in Settings.\");\n                    } else if (!info.chatTemplate.isSet) {\n                        error = qsTr(\"No <a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">\" +\n                                     \"chat template</a> configured.\");\n                    } else if (/^\\s*$/.test(info.chatTemplate.value)) {\n                        error = qsTr(\"The <a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">\" +\n                                     \"chat template</a> cannot be blank.\");\n                    } else if (info.systemMessage.isLegacy) {\n                        error = qsTr(\"Legacy system prompt needs to be \" +\n                                     \"<a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">updated\" +\n                                     \"</a> in Settings.\");\n                    } else\n                        error = null;\n                }\n                Component.onCompleted: checkError()\n                Connections {\n                    target: window\n                    function onCurrentModelIdChanged() { textInputView.checkError(); }\n                }\n                Connections {\n                    target: MySettings\n                    function onChatTemplateChanged(info)\n                    { if (info.id === window.currentModelId) textInputView.checkError(); }\n                    function onSystemMessageChanged(info)\n                    { if (info.id === window.currentModelId) textInputView.checkError(); }\n                }\n\n                MouseArea {\n                    id: textInputViewMouseArea\n                    anchors.fill: parent\n                    onClicked: (mouse) => {\n                        if (textInput.enabled)\n                            textInput.forceActiveFocus();\n                    }\n                }\n\n                GridLayout {\n                    id: textInputViewLayout\n                    anchors.left: parent.left\n                    anchors.right: parent.right\n                    rows: 2\n                    columns: 3\n                    rowSpacing: 10\n                    columnSpacing: 0\n                    Flow {\n                        id: attachmentsFlow\n                        visible: attachmentModel.count\n                        Layout.row: 0\n                        Layout.column: 1\n                        Layout.topMargin: 15\n                        Layout.leftMargin: 5\n                        Layout.rightMargin: 15\n                        spacing: 10\n\n                        Repeater {\n                            model: attachmentModel\n\n                            Rectangle {\n                                width: 350\n                                height: 50\n                                radius: 5\n                                color: theme.attachmentBackground\n                                border.color: theme.controlBorder\n\n                                Row {\n                                    spacing: 5\n                                    anchors.fill: parent\n                                    anchors.margins: 5\n\n                                    MyFileIcon {\n                                        iconSize: 40\n                                        fileName: model.file\n                                    }\n\n                                    Text {\n                                        width: 265\n                                        height: 40\n                                        text: model.file\n                                        color: theme.textColor\n                                        horizontalAlignment: Text.AlignHLeft\n                                        verticalAlignment: Text.AlignVCenter\n                                        font.pixelSize: theme.fontSizeMedium\n                                        font.bold: true\n                                        wrapMode: Text.WrapAnywhere\n                                        elide: Qt.ElideRight\n                                    }\n                                }\n\n                                MyMiniButton {\n                                    id: removeAttachmentButton\n                                    anchors.top: parent.top\n                                    anchors.right: parent.right\n                                    backgroundColor: theme.textColor\n                                    backgroundColorHovered: theme.iconBackgroundDark\n                                    source: \"qrc:/gpt4all/icons/close.svg\"\n                                    onClicked: {\n                                        attachmentModel.remove(index)\n                                        if (textInput.enabled)\n                                            textInput.forceActiveFocus();\n                                    }\n                                }\n                            }\n                        }\n                    }\n\n                    MyToolButton {\n                        id: plusButton\n                        Layout.row: 1\n                        Layout.column: 0\n                        Layout.leftMargin: 15\n                        Layout.rightMargin: 15\n                        Layout.alignment: Qt.AlignCenter\n                        backgroundColor: theme.conversationInputButtonBackground\n                        backgroundColorHovered: theme.conversationInputButtonBackgroundHovered\n                        imageWidth: theme.fontSizeLargest\n                        imageHeight: theme.fontSizeLargest\n                        visible: !currentChat.isServer && ModelList.selectableModels.count !== 0 && currentChat.isModelLoaded\n                        enabled: !currentChat.responseInProgress\n                        source: \"qrc:/gpt4all/icons/paperclip.svg\"\n                        Accessible.name: qsTr(\"Add media\")\n                        Accessible.description: qsTr(\"Adds media to the prompt\")\n\n                        onClicked: (mouse) => {\n                                       addMediaMenu.open()\n                                   }\n                    }\n\n                    ScrollView {\n                        id: textInputScrollView\n                        Layout.row: 1\n                        Layout.column: 1\n                        Layout.fillWidth: true\n                        Layout.leftMargin: plusButton.visible ? 5 : 15\n                        Layout.margins: 15\n                        height: Math.min(contentHeight, 200)\n\n                        MyTextArea {\n                            id: textInput\n                            color: theme.textColor\n                            padding: 0\n                            enabled: currentChat.isModelLoaded && !currentChat.isServer\n                            onEnabledChanged: {\n                                if (textInput.enabled)\n                                    textInput.forceActiveFocus();\n                            }\n                            font.pixelSize: theme.fontSizeLarger\n                            placeholderText: currentChat.isModelLoaded ? qsTr(\"Send a message...\") : qsTr(\"Load a model to continue...\")\n                            Accessible.role: Accessible.EditableText\n                            Accessible.name: placeholderText\n                            Accessible.description: qsTr(\"Send messages/prompts to the model\")\n                            Keys.onReturnPressed: event => {\n                                if (event.modifiers & Qt.ControlModifier || event.modifiers & Qt.ShiftModifier) {\n                                    event.accepted = false;\n                                } else if (!chatModel.hasError && textInputView.error === null) {\n                                    editingFinished();\n                                    sendMessage();\n                                }\n                            }\n                            function sendMessage() {\n                                if ((textInput.text === \"\" && attachmentModel.count === 0) || currentChat.responseInProgress)\n                                    return\n\n                                currentChat.stopGenerating()\n                                currentChat.newPromptResponsePair(textInput.text, attachmentModel.getAttachmentUrls())\n                                attachmentModel.clear();\n                                textInput.text = \"\"\n                            }\n\n                            MouseArea {\n                                id: textInputMouseArea\n                                anchors.fill: parent\n                                acceptedButtons: Qt.RightButton\n\n                                onClicked: (mouse) => {\n                                               if (mouse.button === Qt.RightButton) {\n                                                   textInputContextMenu.x = textInputMouseArea.mouseX\n                                                   textInputContextMenu.y = textInputMouseArea.mouseY\n                                                   textInputContextMenu.open()\n                                               }\n                                           }\n                            }\n\n                            background: Rectangle {\n                                implicitWidth: 150\n                                color: \"transparent\"\n                            }\n\n                            MyMenu {\n                                id: textInputContextMenu\n                                MyMenuItem {\n                                    text: qsTr(\"Cut\")\n                                    enabled: textInput.selectedText !== \"\"\n                                    height: enabled ? implicitHeight : 0\n                                    onTriggered: textInput.cut()\n                                }\n                                MyMenuItem {\n                                    text: qsTr(\"Copy\")\n                                    enabled: textInput.selectedText !== \"\"\n                                    height: enabled ? implicitHeight : 0\n                                    onTriggered: textInput.copy()\n                                }\n                                MyMenuItem {\n                                    text: qsTr(\"Paste\")\n                                    onTriggered: textInput.paste()\n                                }\n                                MyMenuItem {\n                                    text: qsTr(\"Select All\")\n                                    onTriggered: textInput.selectAll()\n                                }\n                            }\n                        }\n                    }\n\n                    Row {\n                        Layout.row: 1\n                        Layout.column: 2\n                        Layout.rightMargin: 15\n                        Layout.alignment: Qt.AlignCenter\n\n                        MyToolButton {\n                            id: stopButton\n                            backgroundColor: theme.conversationInputButtonBackground\n                            backgroundColorHovered: theme.conversationInputButtonBackgroundHovered\n                            visible: currentChat.responseInProgress && !currentChat.isServer\n\n                            background: Item {\n                                anchors.fill: parent\n                                Image {\n                                    id: stopImage\n                                    anchors.centerIn: parent\n                                    visible: false\n                                    fillMode: Image.PreserveAspectFit\n                                    mipmap: true\n                                    sourceSize.width: theme.fontSizeLargest\n                                    sourceSize.height: theme.fontSizeLargest\n                                    source: \"qrc:/gpt4all/icons/stop_generating.svg\"\n                                }\n                                Rectangle {\n                                    anchors.centerIn: stopImage\n                                    width: theme.fontSizeLargest + 8\n                                    height: theme.fontSizeLargest + 8\n                                    color: theme.viewBackground\n                                    border.pixelAligned: false\n                                    border.color: theme.controlBorder\n                                    border.width: 1\n                                    radius: width / 2\n                                }\n                                ColorOverlay {\n                                    anchors.fill: stopImage\n                                    source: stopImage\n                                    color: stopButton.hovered ? stopButton.backgroundColorHovered : stopButton.backgroundColor\n                                }\n                            }\n\n                            Accessible.name: qsTr(\"Stop generating\")\n                            Accessible.description: qsTr(\"Stop the current response generation\")\n                            ToolTip.visible: stopButton.hovered\n                            ToolTip.text: Accessible.description\n\n                            onClicked: {\n                                // FIXME: This no longer sets a 'stopped' field so conversations that\n                                // are copied to clipboard or to datalake don't indicate if the user\n                                // has prematurely stopped the response. This has been broken since\n                                // v3.0.0 at least.\n                                currentChat.stopGenerating()\n                            }\n                        }\n\n                        MyToolButton {\n                            id: sendButton\n                            backgroundColor: theme.conversationInputButtonBackground\n                            backgroundColorHovered: theme.conversationInputButtonBackgroundHovered\n                            imageWidth: theme.fontSizeLargest\n                            imageHeight: theme.fontSizeLargest\n                            visible: !currentChat.responseInProgress && !currentChat.isServer && ModelList.selectableModels.count !== 0\n                            enabled: !chatModel.hasError && textInputView.error === null\n                            source: \"qrc:/gpt4all/icons/send_message.svg\"\n                            Accessible.name: qsTr(\"Send message\")\n                            Accessible.description: qsTr(\"Sends the message/prompt contained in textfield to the model\")\n                            ToolTip.visible: sendButton.hovered\n                            ToolTip.text: Accessible.description\n\n                            onClicked: {\n                                textInput.sendMessage()\n                            }\n                        }\n                    }\n                }\n            }\n\n            MyFileDialog {\n                id: fileDialog\n                nameFilters: [\"All Supported Files (*.txt *.md *.rst *.xlsx)\", \"Text Files (*.txt *.md *.rst)\", \"Excel Worksheets (*.xlsx)\"]\n            }\n\n            MyMenu {\n                id: addMediaMenu\n                x: textInputView.x\n                y: textInputView.y - addMediaMenu.height - 10;\n                title: qsTr(\"Attach\")\n                MyMenuItem {\n                    text: qsTr(\"Single File\")\n                    icon.source: \"qrc:/gpt4all/icons/file.svg\"\n                    icon.width: 24\n                    icon.height: 24\n                    onClicked: {\n                        fileDialog.openFileDialog(StandardPaths.writableLocation(StandardPaths.HomeLocation), function(selectedFile) {\n                            if (selectedFile) {\n                                var file = selectedFile.toString().split(\"/\").pop()\n                                attachmentModel.append({\n                                    file: file,\n                                    url: selectedFile\n                                })\n                            }\n                            if (textInput.enabled)\n                                textInput.forceActiveFocus();\n                        })\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/CollectionsDrawer.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport chatlistmodel\nimport localdocs\nimport llm\n\nRectangle {\n    id: collectionsDrawer\n\n    color: \"transparent\"\n\n    signal addDocsClicked\n    property var currentChat: ChatListModel.currentChat\n\n    Rectangle {\n        id: borderLeft\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: parent.left\n        width: 1\n        color: theme.dividerColor\n    }\n\n    ScrollView {\n        id: scrollView\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.left: borderLeft.right\n        anchors.right: parent.right\n        anchors.margins: 2\n        anchors.bottomMargin: 10\n        clip: true\n        contentHeight: 300\n        ScrollBar.vertical.policy: ScrollBar.AsNeeded\n\n        ListView {\n            id: listView\n            model: LocalDocs.localDocsModel\n            anchors.fill: parent\n            anchors.margins: 13\n            anchors.bottomMargin: 5\n            boundsBehavior: Flickable.StopAtBounds\n            spacing: 15\n\n            delegate: Rectangle {\n                width: listView.width\n                height: childrenRect.height + 15\n                color: checkBox.checked ? theme.collectionsButtonBackground : \"transparent\"\n\n                RowLayout {\n                    anchors.top: parent.top\n                    anchors.left: parent.left\n                    anchors.right: parent.right\n                    anchors.margins: 7.5\n                    MyCheckBox {\n                        id: checkBox\n                        Layout.alignment: Qt.AlignLeft\n                        checked: currentChat.hasCollection(collection)\n                        onClicked: {\n                            if (checkBox.checked) {\n                                currentChat.addCollection(collection)\n                            } else {\n                                currentChat.removeCollection(collection)\n                            }\n                        }\n                        ToolTip.text: qsTr(\"Warning: searching collections while indexing can return incomplete results\")\n                        ToolTip.visible: hovered && model.indexing\n                    }\n                    ColumnLayout {\n                        Layout.fillWidth: true\n                        Layout.alignment: Qt.AlignLeft\n                        Text {\n                            Layout.fillWidth: true\n                            Layout.alignment: Qt.AlignLeft\n                            text: collection\n                            font.pixelSize: theme.fontSizeLarger\n                            elide: Text.ElideRight\n                            color: theme.textColor\n                        }\n                        Text {\n                            Layout.fillWidth: true\n                            Layout.alignment: Qt.AlignLeft\n                            text: \"%1 – %2\".arg(qsTr(\"%n file(s)\", \"\", model.totalDocs)).arg(qsTr(\"%n word(s)\", \"\", model.totalWords))\n                            elide: Text.ElideRight\n                            color: theme.mutedTextColor\n                            font.pixelSize: theme.fontSizeSmall\n                        }\n                        RowLayout {\n                            visible: model.updating\n                            Layout.fillWidth: true\n                            Layout.alignment: Qt.AlignLeft\n                            MyBusyIndicator {\n                                color: theme.accentColor\n                                size: 24\n                                Layout.minimumWidth: 24\n                                Layout.minimumHeight: 24\n                            }\n                            Text {\n                                text: qsTr(\"Updating\")\n                                elide: Text.ElideRight\n                                color: theme.accentColor\n                                font.pixelSize: theme.fontSizeSmall\n                                font.bold: true\n                            }\n                        }\n                    }\n                }\n            }\n\n            footer: ColumnLayout {\n                width: listView.width\n                spacing: 30\n                Rectangle {\n                    visible: listView.count !== 0\n                    Layout.topMargin: 30\n                    Layout.fillWidth: true\n                    height: 1\n                    color: theme.dividerColor\n                }\n                MySettingsButton {\n                    id: collectionSettings\n                    enabled: LocalDocs.databaseValid\n                    Layout.alignment: Qt.AlignCenter\n                    text: qsTr(\"\\uFF0B Add Docs\")\n                    font.pixelSize: theme.fontSizeLarger\n                    onClicked: {\n                        addDocsClicked()\n                    }\n                }\n                Text {\n                    Layout.fillWidth: true\n                    Layout.alignment: Qt.AlignLeft\n                    text: qsTr(\"Select a collection to make it available to the chat model.\")\n                    font.pixelSize: theme.fontSizeLarger\n                    wrapMode: Text.WordWrap\n                    elide: Text.ElideRight\n                    color: theme.mutedTextColor\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ConfirmationDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nMyDialog {\n    id: confirmationDialog\n    anchors.centerIn: parent\n    modal: true\n    padding: 20\n    property alias dialogTitle: titleText.text\n    property alias description: descriptionText.text\n\n    Theme { id: theme }\n\n    contentItem: ColumnLayout {\n        Text {\n            id: titleText\n            Layout.alignment: Qt.AlignHCenter\n            textFormat: Text.StyledText\n            color: theme.textColor\n            font.pixelSize: theme.fontSizeLarger\n            font.bold: true\n        }\n\n        Text {\n            id: descriptionText\n            Layout.alignment: Qt.AlignHCenter\n            textFormat: Text.StyledText\n            color: theme.textColor\n            font.pixelSize: theme.fontSizeMedium\n        }\n    }\n\n    footer: DialogButtonBox {\n        id: dialogBox\n        padding: 20\n        alignment: Qt.AlignRight\n        spacing: 10\n        MySettingsButton {\n            text: qsTr(\"OK\")\n            textColor: theme.mediumButtonText\n            backgroundColor: theme.mediumButtonBackground\n            backgroundColorHovered: theme.mediumButtonBackgroundHovered\n            DialogButtonBox.buttonRole: DialogButtonBox.AcceptRole\n        }\n        MySettingsButton {\n            text: qsTr(\"Cancel\")\n            DialogButtonBox.buttonRole: DialogButtonBox.RejectRole\n        }\n        background: Rectangle {\n            color: \"transparent\"\n        }\n        Keys.onEnterPressed: confirmationDialog.accept()\n        Keys.onReturnPressed: confirmationDialog.accept()\n    }\n    Component.onCompleted: dialogBox.forceActiveFocus()\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/HomeView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt5Compat.GraphicalEffects\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\n\nRectangle {\n    id: homeView\n\n    Theme {\n        id: theme\n    }\n\n    color: theme.viewBackground\n    signal chatViewRequested()\n    signal localDocsViewRequested()\n    signal settingsViewRequested(int page)\n    signal addModelViewRequested()\n    property bool shouldShowFirstStart: false\n\n    ColumnLayout {\n        id: mainArea\n        anchors.fill: parent\n        anchors.margins: 30\n        spacing: 30\n\n        ColumnLayout {\n            Layout.fillWidth: true\n            Layout.maximumWidth: 1530\n            Layout.alignment: Qt.AlignCenter\n            Layout.topMargin: 20\n            spacing: 30\n\n            ColumnLayout {\n                Layout.alignment: Qt.AlignHCenter\n                spacing: 5\n\n                Text {\n                    id: welcome\n                    Layout.alignment: Qt.AlignHCenter\n                    text: qsTr(\"Welcome to GPT4All\")\n                    font.pixelSize: theme.fontSizeBannerLarge\n                    color: theme.titleTextColor\n                }\n\n                Text {\n                    Layout.alignment: Qt.AlignHCenter\n                    text: qsTr(\"The privacy-first LLM chat application\")\n                    font.pixelSize: theme.fontSizeLarge\n                    color: theme.titleInfoTextColor\n                }\n            }\n\n            MyButton {\n                id: startChat\n                visible: shouldShowFirstStart\n                Layout.alignment: Qt.AlignHCenter\n                text: qsTr(\"Start chatting\")\n                onClicked: {\n                    chatViewRequested()\n                }\n            }\n\n            RowLayout {\n                spacing: 15\n                visible: !startChat.visible\n                Layout.alignment: Qt.AlignHCenter\n\n                MyWelcomeButton {\n                    Layout.fillWidth: true\n                    Layout.maximumWidth: 150 + 200 * theme.fontScale\n                    Layout.preferredHeight: 40 + 90 * theme.fontScale\n                    text: qsTr(\"Start Chatting\")\n                    description: qsTr(\"Chat with any LLM\")\n                    imageSource: \"qrc:/gpt4all/icons/chat.svg\"\n                    onClicked: {\n                        chatViewRequested()\n                    }\n                }\n                MyWelcomeButton {\n                    Layout.fillWidth: true\n                    Layout.maximumWidth: 150 + 200 * theme.fontScale\n                    Layout.preferredHeight: 40 + 90 * theme.fontScale\n                    text: qsTr(\"LocalDocs\")\n                    description: qsTr(\"Chat with your local files\")\n                    imageSource: \"qrc:/gpt4all/icons/db.svg\"\n                    onClicked: {\n                        localDocsViewRequested()\n                    }\n                }\n                MyWelcomeButton {\n                    Layout.fillWidth: true\n                    Layout.maximumWidth: 150 + 200 * theme.fontScale\n                    Layout.preferredHeight: 40 + 90 * theme.fontScale\n                    text: qsTr(\"Find Models\")\n                    description: qsTr(\"Explore and download models\")\n                    imageSource: \"qrc:/gpt4all/icons/models.svg\"\n                    onClicked: {\n                        addModelViewRequested()\n                    }\n                }\n            }\n\n            Item {\n                visible: !startChat.visible && Download.latestNews !== \"\"\n                Layout.fillWidth: true\n                Layout.fillHeight: true\n                Layout.minimumHeight: 120\n                Layout.maximumHeight: textAreaNews.height\n\n                Rectangle {\n                    id: roundedFrameNews // latest news\n                    anchors.fill: parent\n                    z: 299\n                    radius: 10\n                    border.width: 1\n                    border.color: theme.controlBorder\n                    color: \"transparent\"\n                    clip: true\n                }\n\n                Item {\n                    anchors.fill: parent\n                    layer.enabled: true\n                    layer.effect: OpacityMask {\n                        maskSource: Rectangle {\n                            width: roundedFrameNews.width\n                            height: roundedFrameNews.height\n                            radius: 10\n                        }\n                    }\n\n                    RowLayout {\n                        spacing: 0\n                        anchors.fill: parent\n                        Rectangle {\n                            color: \"transparent\"\n                            width: 82\n                            height: 100\n                            Image {\n                                id: newsImg\n                                anchors.centerIn: parent\n                                sourceSize: Qt.size(48, 48)\n                                mipmap: true\n                                visible: false\n                                source: \"qrc:/gpt4all/icons/gpt4all_transparent.svg\"\n                            }\n\n                            ColorOverlay {\n                                anchors.fill: newsImg\n                                source: newsImg\n                                color: theme.styledTextColor\n                            }\n                        }\n\n                        Item {\n                            id: myItem\n                            Layout.fillWidth: true\n                            Layout.fillHeight: true\n                            Rectangle {\n                                anchors.fill: parent\n                                color: theme.conversationBackground\n                            }\n\n                            ScrollView {\n                                id: newsScroll\n                                anchors.fill: parent\n                                clip: true\n                                ScrollBar.vertical.policy: ScrollBar.AsNeeded\n                                ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n                                Text {\n                                    id: textAreaNews\n                                    width: myItem.width\n                                    padding: 20\n                                    color: theme.styledTextColor\n                                    font.pixelSize: theme.fontSizeLarger\n                                    textFormat: TextEdit.MarkdownText\n                                    wrapMode: Text.WordWrap\n                                    text: Download.latestNews\n                                    focus: false\n                                    Accessible.role: Accessible.Paragraph\n                                    Accessible.name: qsTr(\"Latest news\")\n                                    Accessible.description: qsTr(\"Latest news from GPT4All\")\n                                    onLinkActivated: function(link) {\n                                        Qt.openUrlExternally(link);\n                                    }\n                                }\n                            }\n                        }\n                    }\n                }\n            }\n        }\n\n        Rectangle {\n            id: linkBar\n            Layout.alignment: Qt.AlignBottom\n            Layout.fillWidth: true\n            border.width: 1\n            border.color: theme.dividerColor\n            radius: 6\n            z: 200\n            height: 30\n            color: theme.conversationBackground\n\n            RowLayout {\n                anchors.fill: parent\n                spacing: 0\n                RowLayout {\n                    Layout.alignment: Qt.AlignLeft | Qt.AlignVCenter\n                    spacing: 4\n\n                    MyFancyLink {\n                        text: qsTr(\"Release Notes\")\n                        imageSource: \"qrc:/gpt4all/icons/notes.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://github.com/nomic-ai/gpt4all/releases\") }\n                    }\n\n                    MyFancyLink {\n                        text: qsTr(\"Documentation\")\n                        imageSource: \"qrc:/gpt4all/icons/info.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://docs.gpt4all.io/\") }\n                    }\n\n                    MyFancyLink {\n                        text: qsTr(\"Discord\")\n                        imageSource: \"qrc:/gpt4all/icons/discord.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://discord.gg/4M2QFmTt2k\") }\n                    }\n\n                    MyFancyLink {\n                        text: qsTr(\"X (Twitter)\")\n                        imageSource: \"qrc:/gpt4all/icons/twitter.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://twitter.com/nomic_ai\") }\n                    }\n\n                    MyFancyLink {\n                        text: qsTr(\"Github\")\n                        imageSource: \"qrc:/gpt4all/icons/github.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://github.com/nomic-ai/gpt4all\") }\n                    }\n                }\n\n                RowLayout {\n                    Layout.alignment: Qt.AlignRight | Qt.AlignVCenter\n                    spacing: 40\n\n                    MyFancyLink {\n                        text: qsTr(\"nomic.ai\")\n                        imageSource: \"qrc:/gpt4all/icons/globe.svg\"\n                        onClicked: { Qt.openUrlExternally(\"https://www.nomic.ai/gpt4all\") }\n                        rightPadding: 15\n                    }\n                }\n            }\n        }\n    }\n\n    Rectangle {\n        anchors.top: mainArea.top\n        anchors.right: mainArea.right\n        border.width: 1\n        border.color: theme.dividerColor\n        radius: 6\n        z: 200\n        height: 30\n        color: theme.conversationBackground\n        width: subscribeLink.width\n        RowLayout {\n            anchors.centerIn: parent\n            MyFancyLink {\n                id: subscribeLink\n                Layout.alignment: Qt.AlignCenter\n                text: qsTr(\"Subscribe to Newsletter\")\n                imageSource: \"qrc:/gpt4all/icons/email.svg\"\n                onClicked: { Qt.openUrlExternally(\"https://nomic.ai/gpt4all/#newsletter-form\") }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/LocalDocsSettings.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport localdocs\nimport modellist\nimport mysettings\nimport network\n\nMySettingsTab {\n    onRestoreDefaults: {\n        MySettings.restoreLocalDocsDefaults();\n    }\n\n    showRestoreDefaultsButton: true\n\n    title: qsTr(\"LocalDocs\")\n    contentItem: ColumnLayout {\n        id: root\n        spacing: 30\n\n        Label {\n            Layout.bottomMargin: 10\n            color: theme.settingsTitleTextColor\n            font.pixelSize: theme.fontSizeBannerSmall\n            font.bold: true\n            text: qsTr(\"LocalDocs Settings\")\n        }\n\n        ColumnLayout {\n            spacing: 10\n            Label {\n                color: theme.styledTextColor\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"Indexing\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.settingsDivider\n            }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                id: extsLabel\n                text: qsTr(\"Allowed File Extensions\")\n                helpText: qsTr(\"Comma-separated list. LocalDocs will only attempt to process files with these extensions.\")\n            }\n            MyTextField {\n                id: extsField\n                text: MySettings.localDocsFileExtensions.join(',')\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Layout.alignment: Qt.AlignRight\n                Layout.minimumWidth: 200\n                validator: RegularExpressionValidator {\n                    regularExpression: /([^ ,\\/\"']+,?)*/\n                }\n                onEditingFinished: {\n                    // split and remove empty elements\n                    var exts = text.split(',').filter(e => e);\n                    // normalize and deduplicate\n                    exts = exts.map(e => e.toLowerCase());\n                    exts = Array.from(new Set(exts));\n                    /* Blacklist common unsupported file extensions. We only support plain text and PDFs, and although we\n                     * reject binary data, we don't want to waste time trying to index files that we don't support. */\n                    exts = exts.filter(e => ![\n                        /* Microsoft documents  */ \"rtf\", \"ppt\", \"pptx\", \"xls\", \"xlsx\",\n                        /* OpenOffice           */ \"odt\", \"ods\", \"odp\", \"odg\",\n                        /* photos               */ \"jpg\", \"jpeg\", \"png\", \"gif\", \"bmp\", \"tif\", \"tiff\", \"webp\",\n                        /* audio                */ \"mp3\", \"wma\", \"m4a\", \"wav\", \"flac\",\n                        /* videos               */ \"mp4\", \"mov\", \"webm\", \"mkv\", \"avi\", \"flv\", \"wmv\",\n                        /* executables          */ \"exe\", \"com\", \"dll\", \"so\", \"dylib\", \"msi\",\n                        /* binary images        */ \"iso\", \"img\", \"dmg\",\n                        /* archives             */ \"zip\", \"jar\", \"apk\", \"rar\", \"7z\", \"tar\", \"gz\", \"xz\", \"bz2\", \"tar.gz\",\n                                                   \"tgz\", \"tar.xz\", \"tar.bz2\",\n                        /* misc                 */ \"bin\",\n                    ].includes(e));\n                    MySettings.localDocsFileExtensions = exts;\n                    extsField.text = exts.join(',');\n                    focus = false;\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: extsLabel.text\n                Accessible.description: extsLabel.helpText\n            }\n        }\n\n        ColumnLayout {\n            spacing: 10\n            Label {\n                color: theme.grayRed900\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"Embedding\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.grayRed500\n            }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                text: qsTr(\"Use Nomic Embed API\")\n                helpText: qsTr(\"Embed documents using the fast Nomic API instead of a private local model. Requires restart.\")\n            }\n\n            MyCheckBox {\n                id: useNomicAPIBox\n                Component.onCompleted: {\n                    useNomicAPIBox.checked = MySettings.localDocsUseRemoteEmbed;\n                }\n                onClicked: {\n                    MySettings.localDocsUseRemoteEmbed = useNomicAPIBox.checked && MySettings.localDocsNomicAPIKey !== \"\";\n                }\n            }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                id: apiKeyLabel\n                text: qsTr(\"Nomic API Key\")\n                helpText: qsTr('API key to use for Nomic Embed. Get one from the Atlas <a href=\"https://atlas.nomic.ai/cli-login\">API keys page</a>. Requires restart.')\n                onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n            }\n\n            MyTextField {\n                id: apiKeyField\n\n                property bool isValid: validate()\n                onTextChanged: { isValid = validate(); }\n                function validate() { return /^(nk-[a-zA-Z0-9_-]{43})?$/.test(apiKeyField.text); }\n\n                placeholderText: \"nk-\" + \"X\".repeat(43)\n                text: MySettings.localDocsNomicAPIKey\n                color: apiKeyField.isValid ? theme.textColor : theme.textErrorColor\n                font.pixelSize: theme.fontSizeLarge\n                Layout.alignment: Qt.AlignRight\n                Layout.minimumWidth: 200\n                enabled: useNomicAPIBox.checked\n                onEditingFinished: {\n                    if (apiKeyField.isValid) {\n                        MySettings.localDocsNomicAPIKey = apiKeyField.text;\n                        MySettings.localDocsUseRemoteEmbed = useNomicAPIBox.checked && MySettings.localDocsNomicAPIKey !== \"\";\n                    }\n                    focus = false;\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: apiKeyLabel.text\n                Accessible.description: apiKeyLabel.helpText\n            }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                id: deviceLabel\n                text: qsTr(\"Embeddings Device\")\n                helpText: qsTr('The compute device used for embeddings. Requires restart.')\n            }\n            MyComboBox {\n                id: deviceBox\n                enabled: !useNomicAPIBox.checked\n                Layout.minimumWidth: 400\n                Layout.maximumWidth: 400\n                Layout.fillWidth: false\n                Layout.alignment: Qt.AlignRight\n                model: ListModel {\n                    ListElement { text: qsTr(\"Application default\") }\n                    Component.onCompleted: {\n                        MySettings.embeddingsDeviceList.forEach(d => append({\"text\": d}));\n                        deviceBox.updateModel();\n                    }\n                }\n                Accessible.name: deviceLabel.text\n                Accessible.description: deviceLabel.helpText\n                function updateModel() {\n                    var device = MySettings.localDocsEmbedDevice;\n                    // This usage of 'Auto' should not be translated\n                    deviceBox.currentIndex = device === \"Auto\" ? 0 : deviceBox.indexOfValue(device);\n                }\n                Component.onCompleted: {\n                    deviceBox.updateModel();\n                }\n                Connections {\n                    target: MySettings\n                    function onDeviceChanged() {\n                        deviceBox.updateModel();\n                    }\n                }\n                onActivated: {\n                    // This usage of 'Auto' should not be translated\n                    MySettings.localDocsEmbedDevice = deviceBox.currentIndex === 0 ? \"Auto\" : deviceBox.currentText;\n                }\n            }\n        }\n\n        ColumnLayout {\n            spacing: 10\n            Label {\n                color: theme.grayRed900\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"Display\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.grayRed500\n            }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                id: showReferencesLabel\n                text: qsTr(\"Show Sources\")\n                helpText: qsTr(\"Display the sources used for each response.\")\n            }\n            MyCheckBox {\n                id: showReferencesBox\n                checked: MySettings.localDocsShowReferences\n                onClicked: {\n                    MySettings.localDocsShowReferences = !MySettings.localDocsShowReferences\n                }\n            }\n        }\n\n        ColumnLayout {\n            spacing: 10\n            Label {\n                color: theme.styledTextColor\n                font.pixelSize: theme.fontSizeLarge\n                font.bold: true\n                text: qsTr(\"Advanced\")\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 1\n                color: theme.settingsDivider\n            }\n        }\n\n        MySettingsLabel {\n            id: warningLabel\n            Layout.bottomMargin: 15\n            Layout.fillWidth: true\n            color: theme.textErrorColor\n            wrapMode: Text.WordWrap\n            text: qsTr(\"Warning: Advanced usage only.\")\n            helpText: qsTr(\"Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model's context window. More info <a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/localdocs.html\\\">here</a>.\")\n            onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n        }\n\n        RowLayout {\n            MySettingsLabel {\n                id: chunkLabel\n                Layout.fillWidth: true\n                text: qsTr(\"Document snippet size (characters)\")\n                helpText: qsTr(\"Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.\")\n            }\n\n            MyTextField {\n                id: chunkSizeTextField\n                text: MySettings.localDocsChunkSize\n                validator: IntValidator {\n                    bottom: 1\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.localDocsChunkSize = val\n                        focus = false\n                    } else {\n                        text = MySettings.localDocsChunkSize\n                    }\n                }\n            }\n        }\n\n        RowLayout {\n            Layout.topMargin: 15\n            MySettingsLabel {\n                id: contextItemsPerPrompt\n                text: qsTr(\"Max document snippets per prompt\")\n                helpText: qsTr(\"Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.\")\n\n            }\n\n            MyTextField {\n                text: MySettings.localDocsRetrievalSize\n                validator: IntValidator {\n                    bottom: 1\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.localDocsRetrievalSize = val\n                        focus = false\n                    } else {\n                        text = MySettings.localDocsRetrievalSize\n                    }\n                }\n            }\n        }\n\n        Rectangle {\n            Layout.topMargin: 15\n            Layout.fillWidth: true\n            height: 1\n            color: theme.settingsDivider\n        }\n     }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/LocalDocsView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt5Compat.GraphicalEffects\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\nRectangle {\n    id: localDocsView\n\n    Theme {\n        id: theme\n    }\n\n    color: theme.viewBackground\n    signal chatViewRequested()\n    signal localDocsViewRequested()\n    signal settingsViewRequested(int page)\n    signal addCollectionViewRequested()\n\n    ColumnLayout {\n        id: mainArea\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.margins: 30\n        spacing: 50\n\n        RowLayout {\n            Layout.fillWidth: true\n            Layout.alignment: Qt.AlignTop\n            visible: LocalDocs.databaseValid && LocalDocs.localDocsModel.count !== 0\n            spacing: 50\n\n            ColumnLayout {\n                Layout.fillWidth: true\n                Layout.alignment: Qt.AlignLeft\n                Layout.minimumWidth: 200\n                spacing: 5\n\n                Text {\n                    id: welcome\n                    text: qsTr(\"LocalDocs\")\n                    font.pixelSize: theme.fontSizeBanner\n                    color: theme.titleTextColor\n                }\n\n                Text {\n                    text: qsTr(\"Chat with your local files\")\n                    font.pixelSize: theme.fontSizeLarge\n                    color: theme.titleInfoTextColor\n                }\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 0\n            }\n\n            MyButton {\n                Layout.alignment: Qt.AlignTop | Qt.AlignRight\n                text: qsTr(\"\\uFF0B Add Collection\")\n                onClicked: {\n                    addCollectionViewRequested()\n                }\n            }\n        }\n\n        Rectangle {\n            id: warning\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            visible: !LocalDocs.databaseValid\n            Text {\n                anchors.centerIn: parent\n                text: qsTr(\"<h3>ERROR: The LocalDocs database cannot be accessed or is not valid.</h3><br>\"\n                         + \"<i>Note: You will need to restart after trying any of the following suggested fixes.</i><br>\"\n                         + \"<ul><li>Make sure that the folder set as <b>Download Path</b> exists on the file system.</li>\"\n                         + \"<li>Check ownership as well as read and write permissions of the <b>Download Path</b>.</li>\"\n                         + \"<li>If there is a <b>localdocs_v2.db</b> file, check its ownership and read/write \"\n                         + \"permissions, too.</li></ul><br>\"\n                         + \"If the problem persists and there are any 'localdocs_v*.db' files present, as a last resort you can<br>\"\n                         + \"try backing them up and removing them. You will have to recreate your collections, however.\")\n                color: theme.textErrorColor\n                font.pixelSize: theme.fontSizeLarger\n            }\n        }\n\n        Item {\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            visible: LocalDocs.databaseValid && LocalDocs.localDocsModel.count === 0\n            ColumnLayout {\n                id: noInstalledLabel\n                anchors.centerIn: parent\n                spacing: 0\n\n                Text {\n                    Layout.alignment: Qt.AlignCenter\n                    text: qsTr(\"No Collections Installed\")\n                    color: theme.mutedLightTextColor\n                    font.pixelSize: theme.fontSizeBannerSmall\n                }\n\n                Text {\n                    Layout.topMargin: 15\n                    horizontalAlignment: Qt.AlignHCenter\n                    color: theme.mutedLighterTextColor\n                    text: qsTr(\"Install a collection of local documents to get started using this feature\")\n                    font.pixelSize: theme.fontSizeLarge\n                }\n            }\n\n            MyButton {\n                anchors.top: noInstalledLabel.bottom\n                anchors.topMargin: 50\n                anchors.horizontalCenter: noInstalledLabel.horizontalCenter\n                rightPadding: 60\n                leftPadding: 60\n                text: qsTr(\"\\uFF0B Add Doc Collection\")\n                onClicked: {\n                    addCollectionViewRequested()\n                }\n                Accessible.role: Accessible.Button\n                Accessible.name: qsTr(\"Shows the add model view\")\n            }\n        }\n\n        ScrollView {\n            id: scrollView\n            ScrollBar.vertical.policy: ScrollBar.AsNeeded\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            clip: true\n            visible: LocalDocs.databaseValid && LocalDocs.localDocsModel.count !== 0\n\n            ListView {\n                id: collectionListView\n                model: LocalDocs.localDocsModel\n                boundsBehavior: Flickable.StopAtBounds\n                spacing: 30\n\n                delegate: Rectangle {\n                    width: collectionListView.width\n                    height: childrenRect.height + 60\n                    color: theme.conversationBackground\n                    radius: 10\n                    border.width: 1\n                    border.color: theme.controlBorder\n\n                    property bool removing: false\n\n                    ColumnLayout {\n                        anchors.top: parent.top\n                        anchors.left: parent.left\n                        anchors.right: parent.right\n                        anchors.margins: 30\n                        spacing: 10\n\n                        RowLayout {\n                            Layout.fillWidth: true\n                            Text {\n                                Layout.fillWidth: true\n                                Layout.alignment: Qt.AlignLeft\n                                text: collection\n                                elide: Text.ElideRight\n                                color: theme.titleTextColor\n                                font.pixelSize: theme.fontSizeLargest\n                                font.bold: true\n                            }\n\n                            Item {\n                                Layout.alignment: Qt.AlignRight\n                                Layout.preferredWidth: state.contentWidth + 50\n                                Layout.preferredHeight: state.contentHeight + 10\n                                ProgressBar {\n                                    id: itemProgressBar\n                                    anchors.fill: parent\n                                    value: {\n                                        if (model.error !== \"\")\n                                            return 0\n\n                                        if (model.indexing)\n                                            return (model.totalBytesToIndex - model.currentBytesToIndex) / model.totalBytesToIndex\n\n                                        if (model.currentEmbeddingsToIndex !== 0)\n                                            return (model.totalEmbeddingsToIndex - model.currentEmbeddingsToIndex) / model.totalEmbeddingsToIndex\n\n                                        return 0\n                                    }\n\n                                    background: Rectangle {\n                                        implicitHeight: 45\n                                        color: {\n                                            if (model.error !== \"\")\n                                                return \"transparent\"\n\n                                            if (model.indexing)\n                                                 return theme.altProgressBackground\n\n                                            if (model.currentEmbeddingsToIndex !== 0)\n                                                 return theme.altProgressBackground\n\n                                            if (model.forceIndexing)\n                                                return theme.red200\n\n                                            return theme.lightButtonBackground\n                                        }\n                                        radius: 6\n                                    }\n                                    contentItem: Item {\n                                        implicitHeight: 40\n\n                                        Rectangle {\n                                            width: itemProgressBar.visualPosition * parent.width\n                                            height: parent.height\n                                            radius: 2\n                                            color: theme.altProgressForeground\n                                        }\n                                    }\n                                    Accessible.role: Accessible.ProgressBar\n                                    Accessible.name: qsTr(\"Indexing progressBar\")\n                                    Accessible.description: qsTr(\"Shows the progress made in the indexing\")\n                                    ToolTip.text: model.error\n                                    ToolTip.visible: hovered && model.error !== \"\"\n                                }\n                                Label {\n                                    id: state\n                                    anchors.centerIn: itemProgressBar\n                                    horizontalAlignment: Text.AlignHCenter\n                                    color: {\n                                        if (model.error !== \"\")\n                                            return theme.textErrorColor\n\n                                        if (model.indexing)\n                                            return theme.altProgressText\n\n                                        if (model.currentEmbeddingsToIndex !== 0)\n                                            return theme.altProgressText\n\n                                        if (model.forceIndexing)\n                                            return theme.textErrorColor\n\n                                        return theme.lighterButtonForeground\n                                    }\n                                    text: {\n                                        if (model.error !== \"\")\n                                            return qsTr(\"ERROR\")\n\n                                        // indicates extracting snippets from documents\n                                        if (model.indexing)\n                                            return qsTr(\"INDEXING\")\n\n                                        // indicates generating the embeddings for any outstanding snippets\n                                        if (model.currentEmbeddingsToIndex !== 0)\n                                            return qsTr(\"EMBEDDING\")\n\n                                        if (model.forceIndexing)\n                                            return qsTr(\"REQUIRES UPDATE\")\n\n                                        if (model.installed)\n                                            return qsTr(\"READY\")\n\n                                        return qsTr(\"INSTALLING\")\n                                    }\n                                    elide: Text.ElideRight\n                                    font.bold: true\n                                    font.pixelSize: theme.fontSizeSmaller\n                                }\n                            }\n                        }\n\n                        RowLayout {\n                            Layout.fillWidth: true\n                            Text {\n                                Layout.fillWidth: true\n                                Layout.alignment: Qt.AlignLeft\n                                text: folder_path\n                                elide: Text.ElideRight\n                                color: theme.titleTextColor2\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n\n                            Text {\n                                Layout.alignment: Qt.AlignRight\n                                text: {\n                                    if (model.error !== \"\")\n                                        return model.error\n\n                                    if (model.indexing)\n                                        return qsTr(\"Indexing in progress\")\n\n                                    if (model.currentEmbeddingsToIndex !== 0)\n                                        return qsTr(\"Embedding in progress\")\n\n                                    if (model.forceIndexing)\n                                        return qsTr(\"This collection requires an update after version change\")\n\n                                    if (model.installed)\n                                        return qsTr(\"Automatically reindexes upon changes to the folder\")\n\n                                    return qsTr(\"Installation in progress\")\n                                }\n                                elide: Text.ElideRight\n                                color: theme.mutedDarkTextColor\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                            Text {\n                                visible: {\n                                    return model.indexing || model.currentEmbeddingsToIndex !== 0\n                                }\n                                Layout.alignment: Qt.AlignRight\n                                text: {\n                                    var percentComplete = Math.round(itemProgressBar.value * 100);\n                                    var formattedPercent = percentComplete < 10 ? \" \" + percentComplete : percentComplete.toString();\n                                    return formattedPercent + qsTr(\"%\")\n                                }\n                                elide: Text.ElideRight\n                                color: theme.mutedDarkTextColor\n                                font.family: \"monospace\"\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                        }\n\n                        RowLayout {\n                            spacing: 7\n                            Text {\n                                text: \"%1 – %2\".arg(qsTr(\"%n file(s)\", \"\", model.totalDocs)).arg(qsTr(\"%n word(s)\", \"\", model.totalWords))\n                                elide: Text.ElideRight\n                                color: theme.styledTextColor2\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                            Text {\n                                text: model.embeddingModel\n                                elide: Text.ElideRight\n                                color: theme.mutedDarkTextColor\n                                font.bold: true\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                            Text {\n                                visible: Qt.formatDateTime(model.lastUpdate) !== \"\"\n                                text: Qt.formatDateTime(model.lastUpdate)\n                                elide: Text.ElideRight\n                                color: theme.mutedTextColor\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                            Text {\n                                visible: model.currentEmbeddingsToIndex !== 0\n                                text: (model.totalEmbeddingsToIndex - model.currentEmbeddingsToIndex) + \" of \"\n                                      + model.totalEmbeddingsToIndex + \" embeddings\"\n                                elide: Text.ElideRight\n                                color: theme.mutedTextColor\n                                font.pixelSize: theme.fontSizeSmall\n                            }\n                        }\n\n                        Rectangle {\n                            Layout.fillWidth: true\n                            height: 1\n                            color: theme.dividerColor\n                        }\n\n                        RowLayout {\n                            id: fileProcessingRow\n                            Layout.topMargin: 15\n                            Layout.bottomMargin: 15\n                            visible: model.fileCurrentlyProcessing !== \"\" && (model.indexing || model.currentEmbeddingsToIndex !== 0)\n                            MyBusyIndicator {\n                                Layout.alignment: Qt.AlignCenter\n                                Layout.preferredWidth: 12\n                                Layout.preferredHeight: 12\n                                running: true\n                                size: 12\n                                color: theme.textColor\n                            }\n\n                            Text {\n                                id: filename\n                                Layout.alignment: Qt.AlignCenter\n                                text: model.fileCurrentlyProcessing\n                                elide: Text.ElideRight\n                                color: theme.textColor\n                                font.bold: true\n                                font.pixelSize: theme.fontSizeLarge\n                            }\n                        }\n\n                        Rectangle {\n                            visible: fileProcessingRow.visible\n                            Layout.fillWidth: true\n                            height: 1\n                            color: theme.dividerColor\n                        }\n\n                        RowLayout {\n                            Layout.fillWidth: true\n                            spacing: 30\n                            MySettingsButton {\n                                text: qsTr(\"Remove\")\n                                textColor: theme.red500\n                                onClicked: LocalDocs.removeFolder(collection, folder_path)\n                                backgroundColor: \"transparent\"\n                                backgroundColorHovered: theme.lighterButtonBackgroundHoveredRed\n                            }\n                            Item {\n                                Layout.fillWidth: true\n                            }\n                            MySettingsButton {\n                                id: rebuildButton\n                                visible: !model.forceIndexing && !model.indexing && model.currentEmbeddingsToIndex === 0\n                                text: qsTr(\"Rebuild\")\n                                textColor: theme.green500\n                                onClicked: LocalDocs.forceRebuildFolder(folder_path)\n                                toolTip: qsTr(\"Reindex this folder from scratch. This is slow and usually not needed.\")\n                                backgroundColor: \"transparent\"\n                                backgroundColorHovered: theme.lighterButtonBackgroundHovered\n                            }\n                            MySettingsButton {\n                                id: updateButton\n                                visible: model.forceIndexing\n                                text: qsTr(\"Update\")\n                                textColor: theme.green500\n                                onClicked: LocalDocs.forceIndexing(collection)\n                                toolTip: qsTr(\"Update the collection to the new version. This is a slow operation.\")\n                                backgroundColor: \"transparent\"\n                                backgroundColorHovered: theme.lighterButtonBackgroundHovered\n                            }\n                        }\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ModelSettings.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport modellist\nimport mysettings\nimport chatlistmodel\n\nMySettingsTab {\n    onRestoreDefaults: {\n        MySettings.restoreModelDefaults(root.currentModelInfo);\n    }\n    title: qsTr(\"Model\")\n\n    ConfirmationDialog {\n        id: resetSystemMessageDialog\n        property var index: null\n        property bool resetClears: false\n        dialogTitle: qsTr(\"%1 system message?\").arg(resetClears ? qsTr(\"Clear\") : qsTr(\"Reset\"))\n        description: qsTr(\"The system message will be %1.\").arg(resetClears ? qsTr(\"removed\") : qsTr(\"reset to the default\"))\n        onAccepted: MySettings.resetModelSystemMessage(ModelList.modelInfo(index))\n        function show(index_, resetClears_) { index = index_; resetClears = resetClears_; open(); }\n    }\n\n    ConfirmationDialog {\n        id: resetChatTemplateDialog\n        property bool resetClears: false\n        property var index: null\n        dialogTitle: qsTr(\"%1 chat template?\").arg(resetClears ? qsTr(\"Clear\") : qsTr(\"Reset\"))\n        description: qsTr(\"The chat template will be %1.\").arg(resetClears ? qsTr(\"erased\") : qsTr(\"reset to the default\"))\n        onAccepted: {\n            MySettings.resetModelChatTemplate(ModelList.modelInfo(index));\n            templateTextArea.resetText();\n        }\n        function show(index_, resetClears_) { index = index_; resetClears = resetClears_; open(); }\n    }\n\n    contentItem: GridLayout {\n        id: root\n        columns: 3\n        rowSpacing: 10\n        columnSpacing: 10\n        enabled: ModelList.selectableModels.count !== 0\n\n        property var currentModelName: comboBox.currentText\n        property var currentModelId: comboBox.currentValue\n        property var currentModelInfo: ModelList.modelInfo(root.currentModelId)\n\n        Label {\n            Layout.row: 1\n            Layout.column: 0\n            Layout.bottomMargin: 10\n            color: theme.settingsTitleTextColor\n            font.pixelSize: theme.fontSizeBannerSmall\n            font.bold: true\n            text: qsTr(\"Model Settings\")\n        }\n\n        RowLayout {\n            Layout.fillWidth: true\n            Layout.maximumWidth: parent.width\n            Layout.row: 2\n            Layout.column: 0\n            Layout.columnSpan: 2\n            spacing: 10\n\n            MyComboBox {\n                id: comboBox\n                Layout.fillWidth: true\n                model: ModelList.selectableModels\n                valueRole: \"id\"\n                textRole: \"name\"\n                currentIndex: {\n                    var i = comboBox.indexOfValue(ChatListModel.currentChat.modelInfo.id);\n                    if (i >= 0)\n                        return i;\n                    return 0;\n                }\n                contentItem: Text {\n                    leftPadding: 10\n                    rightPadding: 20\n                    text: comboBox.currentText\n                    font: comboBox.font\n                    color: theme.textColor\n                    verticalAlignment: Text.AlignVCenter\n                    elide: Text.ElideRight\n                }\n                delegate: ItemDelegate {\n                    width: comboBox.width -20\n                    contentItem: Text {\n                        text: name\n                        color: theme.textColor\n                        font: comboBox.font\n                        elide: Text.ElideRight\n                        verticalAlignment: Text.AlignVCenter\n                    }\n                    background: Rectangle {\n                        radius: 10\n                        color: highlighted ? theme.menuHighlightColor : theme.menuBackgroundColor\n                    }\n                    highlighted: comboBox.highlightedIndex === index\n                }\n            }\n\n            MySettingsButton {\n                id: cloneButton\n                text: qsTr(\"Clone\")\n                onClicked: {\n                    var id = ModelList.clone(root.currentModelInfo);\n                    comboBox.currentIndex = comboBox.indexOfValue(id);\n                }\n            }\n\n            MySettingsDestructiveButton {\n                id: removeButton\n                enabled: root.currentModelInfo.isClone\n                text: qsTr(\"Remove\")\n                onClicked: {\n                    ModelList.removeClone(root.currentModelInfo);\n                    comboBox.currentIndex = 0;\n                }\n            }\n        }\n\n        RowLayout {\n            Layout.row: 3\n            Layout.column: 0\n            Layout.topMargin: 15\n            spacing: 10\n            MySettingsLabel {\n                text: qsTr(\"Name\")\n            }\n        }\n\n        MyTextField {\n            id: uniqueNameField\n            text: root.currentModelName\n            font.pixelSize: theme.fontSizeLarge\n            enabled: root.currentModelInfo.isClone || root.currentModelInfo.description === \"\"\n            Layout.row: 4\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n            Connections {\n                target: MySettings\n                function onNameChanged() {\n                    uniqueNameField.text = root.currentModelInfo.name;\n                }\n            }\n            Connections {\n                target: root\n                function onCurrentModelInfoChanged() {\n                    uniqueNameField.text = root.currentModelInfo.name;\n                }\n            }\n            onTextChanged: {\n                if (text !== \"\" && ModelList.isUniqueName(text)) {\n                    MySettings.setModelName(root.currentModelInfo, text);\n                }\n            }\n        }\n\n        MySettingsLabel {\n            text: qsTr(\"Model File\")\n            Layout.row: 5\n            Layout.column: 0\n            Layout.topMargin: 15\n        }\n\n        MyTextField {\n            text: root.currentModelInfo.filename\n            font.pixelSize: theme.fontSizeLarge\n            enabled: false\n            Layout.row: 6\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n        }\n\n        RowLayout {\n            Layout.row: 7\n            Layout.columnSpan: 2\n            Layout.topMargin: 15\n            Layout.fillWidth: true\n            Layout.maximumWidth: parent.width\n            spacing: 10\n            MySettingsLabel {\n                id: systemMessageLabel\n                text: qsTr(\"System Message\")\n                helpText: qsTr(\"A message to set the context or guide the behavior of the model. Leave blank for \" +\n                               \"none. NOTE: Since GPT4All 3.5, this should not contain control tokens.\")\n                onReset: () => resetSystemMessageDialog.show(root.currentModelId, resetClears)\n                function updateResetButton() {\n                    const info = root.currentModelInfo;\n                    // NOTE: checks if the *override* is set, regardless of whether there is a default\n                    canReset = !!info.id && MySettings.isModelSystemMessageSet(info);\n                    resetClears = !info.defaultSystemMessage;\n                }\n                Component.onCompleted: updateResetButton()\n                Connections {\n                    target: root\n                    function onCurrentModelIdChanged() { systemMessageLabel.updateResetButton(); }\n                }\n                Connections {\n                    target: MySettings\n                    function onSystemMessageChanged(info)\n                    { if (info.id === root.currentModelId) systemMessageLabel.updateResetButton(); }\n                }\n            }\n            Label {\n                id: systemMessageLabelHelp\n                visible: systemMessageArea.errState !== \"ok\"\n                Layout.alignment: Qt.AlignBottom\n                Layout.fillWidth: true\n                Layout.rightMargin: 5\n                Layout.maximumHeight: systemMessageLabel.height\n                text: qsTr(\"System message is not \" +\n                           \"<a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">plain text</a>.\")\n                color: systemMessageArea.errState === \"error\" ? theme.textErrorColor : theme.textWarningColor\n                font.pixelSize: theme.fontSizeLarger\n                font.bold: true\n                wrapMode: Text.Wrap\n                elide: Text.ElideRight\n                onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n            }\n        }\n\n        Rectangle {\n            id: systemMessage\n            Layout.row: 8\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n            color: \"transparent\"\n            Layout.minimumHeight: Math.max(100, systemMessageArea.contentHeight + 20)\n            MyTextArea {\n                id: systemMessageArea\n                anchors.fill: parent\n                property bool isBeingReset: false\n                function resetText() {\n                    const info = root.currentModelInfo;\n                    isBeingReset = true;\n                    text = (info.id ? info.systemMessage.value : null) ?? \"\";\n                    isBeingReset = false;\n                }\n                Component.onCompleted: resetText()\n                Connections {\n                    target: MySettings\n                    function onSystemMessageChanged(info)\n                    { if (info.id === root.currentModelId) systemMessageArea.resetText(); }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelIdChanged() { systemMessageArea.resetText(); }\n                }\n                // strict validation, because setModelSystemMessage clears isLegacy\n                readonly property var reLegacyCheck: (\n                    /(?:^|\\s)(?:### *System\\b|S(?:ystem|YSTEM):)|<\\|(?:im_(?:start|end)|(?:start|end)_header_id|eot_id|SYSTEM_TOKEN)\\|>|<<SYS>>/m\n                )\n                onTextChanged: {\n                    const info = root.currentModelInfo;\n                    if (!info.id) {\n                        errState = \"ok\";\n                    } else if (info.systemMessage.isLegacy && (isBeingReset || reLegacyCheck.test(text))) {\n                        errState = \"error\";\n                    } else\n                        errState = reLegacyCheck.test(text) ? \"warning\" : \"ok\";\n                    if (info.id && errState !== \"error\" && !isBeingReset)\n                        MySettings.setModelSystemMessage(info, text);\n                    systemMessageLabel.updateResetButton();\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: systemMessageLabel.text\n                Accessible.description: systemMessageLabelHelp.text\n            }\n        }\n\n        RowLayout {\n            Layout.row: 9\n            Layout.columnSpan: 2\n            Layout.topMargin: 15\n            Layout.fillWidth: true\n            Layout.maximumWidth: parent.width\n            spacing: 10\n            MySettingsLabel {\n                id: chatTemplateLabel\n                text: qsTr(\"Chat Template\")\n                helpText: qsTr(\"This Jinja template turns the chat into input for the model.\")\n                onReset: () => resetChatTemplateDialog.show(root.currentModelId, resetClears)\n                function updateResetButton() {\n                    const info = root.currentModelInfo;\n                    canReset = !!info.id && (\n                        MySettings.isModelChatTemplateSet(info)\n                        || templateTextArea.text !== (info.chatTemplate.value ?? \"\")\n                    );\n                    resetClears = !info.defaultChatTemplate;\n                }\n                Component.onCompleted: updateResetButton()\n                Connections {\n                    target: root\n                    function onCurrentModelIdChanged() { chatTemplateLabel.updateResetButton(); }\n                }\n                Connections {\n                    target: MySettings\n                    function onChatTemplateChanged(info)\n                    { if (info.id === root.currentModelId) chatTemplateLabel.updateResetButton(); }\n                }\n            }\n            Label {\n                id: chatTemplateLabelHelp\n                visible: templateTextArea.errState !== \"ok\"\n                Layout.alignment: Qt.AlignBottom\n                Layout.fillWidth: true\n                Layout.rightMargin: 5\n                Layout.maximumHeight: chatTemplateLabel.height\n                text: templateTextArea.errMsg\n                color: templateTextArea.errState === \"error\" ? theme.textErrorColor : theme.textWarningColor\n                font.pixelSize: theme.fontSizeLarger\n                font.bold: true\n                wrapMode: Text.Wrap\n                elide: Text.ElideRight\n                onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n            }\n        }\n\n        Rectangle {\n            id: chatTemplate\n            Layout.row: 10\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n            Layout.minimumHeight: Math.max(100, templateTextArea.contentHeight + 20)\n            color: \"transparent\"\n            clip: true\n            MyTextArea {\n                id: templateTextArea\n                anchors.fill: parent\n                font: fixedFont\n                property bool isBeingReset: false\n                property var errMsg: null\n                function resetText() {\n                    const info = root.currentModelInfo;\n                    isBeingReset = true;\n                    text = (info.id ? info.chatTemplate.value : null) ?? \"\";\n                    isBeingReset = false;\n                }\n                Component.onCompleted: resetText()\n                Connections {\n                    target: MySettings\n                    function onChatTemplateChanged() { templateTextArea.resetText(); }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelIdChanged() { templateTextArea.resetText(); }\n                }\n                function legacyCheck() {\n                    return /%[12]\\b/.test(text) || !/\\{%.*%\\}.*\\{\\{.*\\}\\}.*\\{%.*%\\}/.test(text.replace(/\\n/g, ''))\n                        || !/\\bcontent\\b/.test(text);\n                }\n                onTextChanged: {\n                    const info = root.currentModelInfo;\n                    let jinjaError;\n                    if (!info.id) {\n                        errMsg = null;\n                        errState = \"ok\";\n                    } else if (info.chatTemplate.isLegacy && (isBeingReset || legacyCheck())) {\n                        errMsg = null;\n                        errState = \"error\";\n                    } else if (text === \"\" && !info.chatTemplate.isSet) {\n                        errMsg = qsTr(\"No <a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">\" +\n                                      \"chat template</a> configured.\");\n                        errState = \"error\";\n                    } else if (/^\\s*$/.test(text)) {\n                        errMsg = qsTr(\"The <a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">\" +\n                                      \"chat template</a> cannot be blank.\");\n                        errState = \"error\";\n                    } else if ((jinjaError = MySettings.checkJinjaTemplateError(text)) !== null) {\n                        errMsg = qsTr(\"<a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">Syntax\" +\n                                      \" error</a>: %1\").arg(jinjaError);\n                        errState = \"error\";\n                    } else if (legacyCheck()) {\n                        errMsg = qsTr(\"Chat template is not in \" +\n                                      \"<a href=\\\"https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html\\\">\" +\n                                      \"Jinja format</a>.\")\n                        errState = \"warning\";\n                    } else {\n                        errState = \"ok\";\n                    }\n                    if (info.id && errState !== \"error\" && !isBeingReset)\n                        MySettings.setModelChatTemplate(info, text);\n                    chatTemplateLabel.updateResetButton();\n                }\n                Keys.onPressed: event => {\n                    if (event.key === Qt.Key_Tab) {\n                        const a = templateTextArea;\n                        event.accepted = true;              // suppress tab\n                        a.insert(a.cursorPosition, '    '); // four spaces\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: chatTemplateLabel.text\n                Accessible.description: chatTemplateLabelHelp.text\n            }\n        }\n\n        MySettingsLabel {\n            id: chatNamePromptLabel\n            text: qsTr(\"Chat Name Prompt\")\n            helpText: qsTr(\"Prompt used to automatically generate chat names.\")\n            Layout.row: 11\n            Layout.column: 0\n            Layout.topMargin: 15\n        }\n\n        Rectangle {\n            id: chatNamePrompt\n            Layout.row: 12\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n            Layout.minimumHeight: Math.max(100, chatNamePromptTextArea.contentHeight + 20)\n            color: \"transparent\"\n            clip: true\n            MyTextArea {\n                id: chatNamePromptTextArea\n                anchors.fill: parent\n                text: root.currentModelInfo.chatNamePrompt\n                Connections {\n                    target: MySettings\n                    function onChatNamePromptChanged() {\n                        chatNamePromptTextArea.text = root.currentModelInfo.chatNamePrompt;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        chatNamePromptTextArea.text = root.currentModelInfo.chatNamePrompt;\n                    }\n                }\n                onTextChanged: {\n                    MySettings.setModelChatNamePrompt(root.currentModelInfo, text)\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: chatNamePromptLabel.text\n                Accessible.description: chatNamePromptLabel.text\n            }\n        }\n\n        MySettingsLabel {\n            id: suggestedFollowUpPromptLabel\n            text: qsTr(\"Suggested FollowUp Prompt\")\n            helpText: qsTr(\"Prompt used to generate suggested follow-up questions.\")\n            Layout.row: 13\n            Layout.column: 0\n            Layout.topMargin: 15\n        }\n\n        Rectangle {\n            id: suggestedFollowUpPrompt\n            Layout.row: 14\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.fillWidth: true\n            Layout.minimumHeight: Math.max(100, suggestedFollowUpPromptTextArea.contentHeight + 20)\n            color: \"transparent\"\n            clip: true\n            MyTextArea {\n                id: suggestedFollowUpPromptTextArea\n                anchors.fill: parent\n                text: root.currentModelInfo.suggestedFollowUpPrompt\n                Connections {\n                    target: MySettings\n                    function onSuggestedFollowUpPromptChanged() {\n                        suggestedFollowUpPromptTextArea.text = root.currentModelInfo.suggestedFollowUpPrompt;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        suggestedFollowUpPromptTextArea.text = root.currentModelInfo.suggestedFollowUpPrompt;\n                    }\n                }\n                onTextChanged: {\n                    MySettings.setModelSuggestedFollowUpPrompt(root.currentModelInfo, text)\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: suggestedFollowUpPromptLabel.text\n                Accessible.description: suggestedFollowUpPromptLabel.text\n            }\n        }\n\n        GridLayout {\n            Layout.row: 15\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.topMargin: 15\n            Layout.fillWidth: true\n            columns: 4\n            rowSpacing: 30\n            columnSpacing: 10\n\n            MySettingsLabel {\n                id: contextLengthLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Context Length\")\n                helpText: qsTr(\"Number of input and output tokens the model sees.\")\n                Layout.row: 0\n                Layout.column: 0\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            Item {\n                Layout.row: 0\n                Layout.column: 1\n                Layout.fillWidth: true\n                Layout.maximumWidth: 200\n                Layout.margins: 0\n                height: contextLengthField.height\n\n                MyTextField {\n                    id: contextLengthField\n                    anchors.left: parent.left\n                    anchors.verticalCenter: parent.verticalCenter\n                    visible: !root.currentModelInfo.isOnline\n                    text: root.currentModelInfo.contextLength\n                    font.pixelSize: theme.fontSizeLarge\n                    color: theme.textColor\n                    ToolTip.text: qsTr(\"Maximum combined prompt/response tokens before information is lost.\\nUsing more context than the model was trained on will yield poor results.\\nNOTE: Does not take effect until you reload the model.\")\n                    ToolTip.visible: hovered\n                    Connections {\n                        target: MySettings\n                        function onContextLengthChanged() {\n                            contextLengthField.text = root.currentModelInfo.contextLength;\n                        }\n                    }\n                    Connections {\n                        target: root\n                        function onCurrentModelInfoChanged() {\n                            contextLengthField.text = root.currentModelInfo.contextLength;\n                        }\n                    }\n                    onEditingFinished: {\n                        var val = parseInt(text)\n                        if (isNaN(val)) {\n                            text = root.currentModelInfo.contextLength\n                        } else {\n                            if (val < 8) {\n                                val = 8\n                                contextLengthField.text = val\n                            } else if (val > root.currentModelInfo.maxContextLength) {\n                                val = root.currentModelInfo.maxContextLength\n                                contextLengthField.text = val\n                            }\n                            MySettings.setModelContextLength(root.currentModelInfo, val)\n                            focus = false\n                        }\n                    }\n                    Accessible.role: Accessible.EditableText\n                    Accessible.name: contextLengthLabel.text\n                    Accessible.description: ToolTip.text\n                }\n            }\n\n            MySettingsLabel {\n                id: tempLabel\n                text: qsTr(\"Temperature\")\n                helpText: qsTr(\"Randomness of model output. Higher -> more variation.\")\n                Layout.row: 1\n                Layout.column: 2\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n\n            MyTextField {\n                id: temperatureField\n                text: root.currentModelInfo.temperature\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.textColor\n                ToolTip.text: qsTr(\"Temperature increases the chances of choosing less likely tokens.\\nNOTE: Higher temperature gives more creative but less predictable outputs.\")\n                ToolTip.visible: hovered\n                Layout.row: 1\n                Layout.column: 3\n                validator: DoubleValidator {\n                    locale: \"C\"\n                }\n                Connections {\n                    target: MySettings\n                    function onTemperatureChanged() {\n                        temperatureField.text = root.currentModelInfo.temperature;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        temperatureField.text = root.currentModelInfo.temperature;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseFloat(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelTemperature(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.temperature\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: tempLabel.text\n                Accessible.description: ToolTip.text\n            }\n            MySettingsLabel {\n                id: topPLabel\n                text: qsTr(\"Top-P\")\n                helpText: qsTr(\"Nucleus Sampling factor. Lower -> more predictable.\")\n                Layout.row: 2\n                Layout.column: 0\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: topPField\n                text: root.currentModelInfo.topP\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                ToolTip.text: qsTr(\"Only the most likely tokens up to a total probability of top_p can be chosen.\\nNOTE: Prevents choosing highly unlikely tokens.\")\n                ToolTip.visible: hovered\n                Layout.row: 2\n                Layout.column: 1\n                validator: DoubleValidator {\n                    locale: \"C\"\n                }\n                Connections {\n                    target: MySettings\n                    function onTopPChanged() {\n                        topPField.text = root.currentModelInfo.topP;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        topPField.text = root.currentModelInfo.topP;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseFloat(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelTopP(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.topP\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: topPLabel.text\n                Accessible.description: ToolTip.text\n            }\n            MySettingsLabel {\n                id: minPLabel\n                text: qsTr(\"Min-P\")\n                helpText: qsTr(\"Minimum token probability. Higher -> more predictable.\")\n                Layout.row: 3\n                Layout.column: 0\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: minPField\n                text: root.currentModelInfo.minP\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                ToolTip.text: qsTr(\"Sets the minimum relative probability for a token to be considered.\")\n                ToolTip.visible: hovered\n                Layout.row: 3\n                Layout.column: 1\n                validator: DoubleValidator {\n                    locale: \"C\"\n                }\n                Connections {\n                    target: MySettings\n                    function onMinPChanged() {\n                        minPField.text = root.currentModelInfo.minP;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        minPField.text = root.currentModelInfo.minP;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseFloat(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelMinP(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.minP\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: minPLabel.text\n                Accessible.description: ToolTip.text\n            }\n\n            MySettingsLabel {\n                id: topKLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Top-K\")\n                helpText: qsTr(\"Size of selection pool for tokens.\")\n                Layout.row: 2\n                Layout.column: 2\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: topKField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.topK\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                ToolTip.text: qsTr(\"Only the top K most likely tokens will be chosen from.\")\n                ToolTip.visible: hovered\n                Layout.row: 2\n                Layout.column: 3\n                validator: IntValidator {\n                    bottom: 1\n                }\n                Connections {\n                    target: MySettings\n                    function onTopKChanged() {\n                        topKField.text = root.currentModelInfo.topK;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        topKField.text = root.currentModelInfo.topK;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelTopK(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.topK\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: topKLabel.text\n                Accessible.description: ToolTip.text\n            }\n            MySettingsLabel {\n                id: maxLengthLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Max Length\")\n                helpText: qsTr(\"Maximum response length, in tokens.\")\n                Layout.row: 0\n                Layout.column: 2\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: maxLengthField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.maxLength\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Layout.row: 0\n                Layout.column: 3\n                validator: IntValidator {\n                    bottom: 1\n                }\n                Connections {\n                    target: MySettings\n                    function onMaxLengthChanged() {\n                        maxLengthField.text = root.currentModelInfo.maxLength;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        maxLengthField.text = root.currentModelInfo.maxLength;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelMaxLength(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.maxLength\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: maxLengthLabel.text\n                Accessible.description: ToolTip.text\n            }\n\n            MySettingsLabel {\n                id: batchSizeLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Prompt Batch Size\")\n                helpText: qsTr(\"The batch size used for prompt processing.\")\n                Layout.row: 1\n                Layout.column: 0\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: batchSizeField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.promptBatchSize\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                ToolTip.text: qsTr(\"Amount of prompt tokens to process at once.\\nNOTE: Higher values can speed up reading prompts but will use more RAM.\")\n                ToolTip.visible: hovered\n                Layout.row: 1\n                Layout.column: 1\n                validator: IntValidator {\n                    bottom: 1\n                }\n                Connections {\n                    target: MySettings\n                    function onPromptBatchSizeChanged() {\n                        batchSizeField.text = root.currentModelInfo.promptBatchSize;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        batchSizeField.text = root.currentModelInfo.promptBatchSize;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelPromptBatchSize(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.promptBatchSize\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: batchSizeLabel.text\n                Accessible.description: ToolTip.text\n            }\n            MySettingsLabel {\n                id: repeatPenaltyLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Repeat Penalty\")\n                helpText: qsTr(\"Repetition penalty factor. Set to 1 to disable.\")\n                Layout.row: 4\n                Layout.column: 2\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: repeatPenaltyField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.repeatPenalty\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Layout.row: 4\n                Layout.column: 3\n                validator: DoubleValidator {\n                    locale: \"C\"\n                }\n                Connections {\n                    target: MySettings\n                    function onRepeatPenaltyChanged() {\n                        repeatPenaltyField.text = root.currentModelInfo.repeatPenalty;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        repeatPenaltyField.text = root.currentModelInfo.repeatPenalty;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseFloat(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelRepeatPenalty(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.repeatPenalty\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: repeatPenaltyLabel.text\n                Accessible.description: ToolTip.text\n            }\n            MySettingsLabel {\n                id: repeatPenaltyTokensLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"Repeat Penalty Tokens\")\n                helpText: qsTr(\"Number of previous tokens used for penalty.\")\n                Layout.row: 3\n                Layout.column: 2\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: repeatPenaltyTokenField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.repeatPenaltyTokens\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Layout.row: 3\n                Layout.column: 3\n                validator: IntValidator {\n                    bottom: 1\n                }\n                Connections {\n                    target: MySettings\n                    function onRepeatPenaltyTokensChanged() {\n                        repeatPenaltyTokenField.text = root.currentModelInfo.repeatPenaltyTokens;\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        repeatPenaltyTokenField.text = root.currentModelInfo.repeatPenaltyTokens;\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (!isNaN(val)) {\n                        MySettings.setModelRepeatPenaltyTokens(root.currentModelInfo, val)\n                        focus = false\n                    } else {\n                        text = root.currentModelInfo.repeatPenaltyTokens\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: repeatPenaltyTokensLabel.text\n                Accessible.description: ToolTip.text\n            }\n\n            MySettingsLabel {\n                id: gpuLayersLabel\n                visible: !root.currentModelInfo.isOnline\n                text: qsTr(\"GPU Layers\")\n                helpText: qsTr(\"Number of model layers to load into VRAM.\")\n                Layout.row: 4\n                Layout.column: 0\n                Layout.maximumWidth: 300 * theme.fontScale\n            }\n            MyTextField {\n                id: gpuLayersField\n                visible: !root.currentModelInfo.isOnline\n                text: root.currentModelInfo.gpuLayers\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.textColor\n                ToolTip.text: qsTr(\"How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\\nLower values increase CPU load and RAM usage, and make inference slower.\\nNOTE: Does not take effect until you reload the model.\")\n                ToolTip.visible: hovered\n                Layout.row: 4\n                Layout.column: 1\n                Connections {\n                    target: MySettings\n                    function onGpuLayersChanged() {\n                        gpuLayersField.text = root.currentModelInfo.gpuLayers\n                    }\n                }\n                Connections {\n                    target: root\n                    function onCurrentModelInfoChanged() {\n                        if (root.currentModelInfo.gpuLayers === 100) {\n                            gpuLayersField.text = root.currentModelInfo.maxGpuLayers\n                        } else {\n                            gpuLayersField.text = root.currentModelInfo.gpuLayers\n                        }\n                    }\n                }\n                onEditingFinished: {\n                    var val = parseInt(text)\n                    if (isNaN(val)) {\n                        gpuLayersField.text = root.currentModelInfo.gpuLayers\n                    } else {\n                        if (val < 1) {\n                            val = 1\n                            gpuLayersField.text = val\n                        } else if (val > root.currentModelInfo.maxGpuLayers) {\n                            val = root.currentModelInfo.maxGpuLayers\n                            gpuLayersField.text = val\n                        }\n                        MySettings.setModelGpuLayers(root.currentModelInfo, val)\n                        focus = false\n                    }\n                }\n                Accessible.role: Accessible.EditableText\n                Accessible.name: gpuLayersLabel.text\n                Accessible.description: ToolTip.text\n            }\n        }\n\n        Rectangle {\n            Layout.row: 16\n            Layout.column: 0\n            Layout.columnSpan: 2\n            Layout.topMargin: 15\n            Layout.fillWidth: true\n            height: 1\n            color: theme.settingsDivider\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ModelsView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Dialogs\nimport QtQuick.Layouts\nimport chatlistmodel\nimport download\nimport llm\nimport modellist\nimport network\nimport mysettings\n\nRectangle {\n    id: modelsView\n    color: theme.viewBackground\n\n    signal addModelViewRequested()\n\n    ToastManager {\n        id: messageToast\n    }\n\n    ColumnLayout {\n        anchors.fill: parent\n        anchors.margins: 20\n        spacing: 30\n\n        Item {\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            visible: ModelList.installedModels.count === 0\n            ColumnLayout {\n                id: noInstalledLabel\n                anchors.centerIn: parent\n                spacing: 0\n\n                Text {\n                    Layout.alignment: Qt.AlignCenter\n                    text: qsTr(\"No Models Installed\")\n                    color: theme.mutedLightTextColor\n                    font.pixelSize: theme.fontSizeBannerSmall\n                }\n\n                Text {\n                    Layout.topMargin: 15\n                    horizontalAlignment: Qt.AlignHCenter\n                    color: theme.mutedLighterTextColor\n                    text: qsTr(\"Install a model to get started using GPT4All\")\n                    font.pixelSize: theme.fontSizeLarge\n                }\n            }\n\n            MyButton {\n                anchors.top: noInstalledLabel.bottom\n                anchors.topMargin: 50\n                anchors.horizontalCenter: noInstalledLabel.horizontalCenter\n                rightPadding: 60\n                leftPadding: 60\n                text: qsTr(\"\\uFF0B Add Model\")\n                onClicked: {\n                    addModelViewRequested()\n                }\n                Accessible.role: Accessible.Button\n                Accessible.name: qsTr(\"Shows the add model view\")\n            }\n        }\n\n        RowLayout {\n            visible: ModelList.installedModels.count !== 0\n            Layout.fillWidth: true\n            Layout.alignment: Qt.AlignTop\n            spacing: 50\n\n            ColumnLayout {\n                Layout.fillWidth: true\n                Layout.alignment: Qt.AlignLeft\n                Layout.minimumWidth: 200\n                spacing: 5\n\n                Text {\n                    id: welcome\n                    text: qsTr(\"Installed Models\")\n                    font.pixelSize: theme.fontSizeBanner\n                    color: theme.titleTextColor\n                }\n\n                Text {\n                    text: qsTr(\"Locally installed chat models\")\n                    font.pixelSize: theme.fontSizeLarge\n                    color: theme.titleInfoTextColor\n                }\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 0\n            }\n\n            MyButton {\n                Layout.alignment: Qt.AlignTop | Qt.AlignRight\n                text: qsTr(\"\\uFF0B Add Model\")\n                onClicked: {\n                    addModelViewRequested()\n                }\n            }\n        }\n\n        ScrollView {\n            id: scrollView\n            visible: ModelList.installedModels.count !== 0\n            ScrollBar.vertical.policy: ScrollBar.AsNeeded\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n            clip: true\n\n            ListView {\n                id: modelListView\n                model: ModelList.installedModels\n                boundsBehavior: Flickable.StopAtBounds\n                spacing: 30\n\n                delegate: Rectangle {\n                    id: delegateItem\n                    width: modelListView.width\n                    height: childrenRect.height + 60\n                    color: theme.conversationBackground\n                    radius: 10\n                    border.width: 1\n                    border.color: theme.controlBorder\n\n                    ColumnLayout {\n                        anchors.top: parent.top\n                        anchors.left: parent.left\n                        anchors.right: parent.right\n                        anchors.margins: 30\n\n                        Text {\n                            Layout.fillWidth: true\n                            Layout.alignment: Qt.AlignLeft\n                            text: name\n                            elide: Text.ElideRight\n                            color: theme.titleTextColor\n                            font.pixelSize: theme.fontSizeLargest\n                            font.bold: true\n                            Accessible.role: Accessible.Paragraph\n                            Accessible.name: qsTr(\"Model file\")\n                            Accessible.description: qsTr(\"Model file to be downloaded\")\n                        }\n\n                        Rectangle {\n                            Layout.fillWidth: true\n                            height: 1\n                            color: theme.dividerColor\n                        }\n\n                        RowLayout {\n                            Layout.topMargin: 10\n                            Layout.fillWidth: true\n                            Text {\n                                id: descriptionText\n                                text: description\n                                font.pixelSize: theme.fontSizeLarge\n                                Layout.fillWidth: true\n                                wrapMode: Text.WordWrap\n                                textFormat: Text.StyledText\n                                color: theme.textColor\n                                linkColor: theme.textColor\n                                Accessible.role: Accessible.Paragraph\n                                Accessible.name: qsTr(\"Description\")\n                                Accessible.description: qsTr(\"File description\")\n                                onLinkActivated: function(link) { Qt.openUrlExternally(link); }\n                                MouseArea {\n                                    anchors.fill: parent\n                                    acceptedButtons: Qt.NoButton // pass clicks to parent\n                                    cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor\n                                }\n                            }\n\n                            Rectangle {\n                                id: actionBox\n                                width: childrenRect.width + 20\n                                color: \"transparent\"\n                                border.width: 1\n                                border.color: theme.dividerColor\n                                radius: 10\n                                Layout.rightMargin: 20\n                                Layout.bottomMargin: 20\n                                Layout.minimumHeight: childrenRect.height + 20\n                                Layout.alignment: Qt.AlignRight | Qt.AlignTop\n\n                                ColumnLayout {\n                                    spacing: 0\n                                    MySettingsButton {\n                                        id: downloadButton\n                                        text: isDownloading ? qsTr(\"Cancel\") : qsTr(\"Resume\")\n                                        font.pixelSize: theme.fontSizeLarge\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.fillWidth: true\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        visible: (isDownloading || isIncomplete) && downloadError === \"\" && !isOnline && !calcHash\n                                        Accessible.description: qsTr(\"Stop/restart/start the download\")\n                                        onClicked: {\n                                            if (!isDownloading) {\n                                                Download.downloadModel(filename);\n                                            } else {\n                                                Download.cancelDownload(filename);\n                                            }\n                                        }\n                                    }\n\n                                    MySettingsDestructiveButton {\n                                        id: removeButton\n                                        text: qsTr(\"Remove\")\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.fillWidth: true\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        visible: !isDownloading && (installed || isIncomplete)\n                                        Accessible.description: qsTr(\"Remove model from filesystem\")\n                                        onClicked: {\n                                            Download.removeModel(filename);\n                                        }\n                                    }\n\n                                    MySettingsButton {\n                                        id: installButton\n                                        visible: !installed && isOnline\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.fillWidth: true\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        text: qsTr(\"Install\")\n                                        font.pixelSize: theme.fontSizeLarge\n                                        onClicked: {\n                                            var apiKeyText = apiKey.text.trim(),\n                                                baseUrlText = baseUrl.text.trim(),\n                                                modelNameText = modelName.text.trim();\n\n                                            var apiKeyOk = apiKeyText !== \"\",\n                                                baseUrlOk = !isCompatibleApi || baseUrlText !== \"\",\n                                                modelNameOk = !isCompatibleApi || modelNameText !== \"\";\n\n                                            if (!apiKeyOk)\n                                                apiKey.showError();\n                                            if (!baseUrlOk)\n                                                baseUrl.showError();\n                                            if (!modelNameOk)\n                                                modelName.showError();\n\n                                            if (!apiKeyOk || !baseUrlOk || !modelNameOk)\n                                                return;\n\n                                            if (!isCompatibleApi)\n                                                Download.installModel(\n                                                    filename,\n                                                    apiKeyText,\n                                                );\n                                            else\n                                                Download.installCompatibleModel(\n                                                    modelNameText,\n                                                    apiKeyText,\n                                                    baseUrlText,\n                                                );\n                                        }\n                                        Accessible.role: Accessible.Button\n                                        Accessible.name: qsTr(\"Install\")\n                                        Accessible.description: qsTr(\"Install online model\")\n                                    }\n\n                                    ColumnLayout {\n                                        spacing: 0\n                                        Label {\n                                            Layout.topMargin: 20\n                                            Layout.leftMargin: 20\n                                            visible: downloadError !== \"\"\n                                            textFormat: Text.StyledText\n                                            text: qsTr(\"<strong><font size=\\\"1\\\"><a href=\\\"#error\\\">Error</a></strong></font>\")\n                                            color: theme.textColor\n                                            font.pixelSize: theme.fontSizeLarge\n                                            linkColor: theme.textErrorColor\n                                            Accessible.role: Accessible.Paragraph\n                                            Accessible.name: text\n                                            Accessible.description: qsTr(\"Describes an error that occurred when downloading\")\n                                            onLinkActivated: {\n                                                downloadingErrorPopup.text = downloadError;\n                                                downloadingErrorPopup.open();\n                                            }\n                                        }\n\n                                        Label {\n                                            visible: LLM.systemTotalRAMInGB() < ramrequired\n                                            Layout.topMargin: 20\n                                            Layout.leftMargin: 20\n                                            Layout.maximumWidth: 300\n                                            textFormat: Text.StyledText\n                                            text: qsTr(\"<strong><font size=\\\"2\\\">WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).</strong></font>\").arg(ramrequired).arg(LLM.systemTotalRAMInGBString())\n                                            color: theme.textErrorColor\n                                            font.pixelSize: theme.fontSizeLarge\n                                            wrapMode: Text.WordWrap\n                                            Accessible.role: Accessible.Paragraph\n                                            Accessible.name: text\n                                            Accessible.description: qsTr(\"Error for incompatible hardware\")\n                                            onLinkActivated: {\n                                                downloadingErrorPopup.text = downloadError;\n                                                downloadingErrorPopup.open();\n                                            }\n                                        }\n                                    }\n\n                                    ColumnLayout {\n                                        visible: isDownloading && !calcHash\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.fillWidth: true\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        spacing: 20\n\n                                        ProgressBar {\n                                            id: itemProgressBar\n                                            Layout.fillWidth: true\n                                            width: 200\n                                            value: bytesReceived / bytesTotal\n                                            background: Rectangle {\n                                                implicitHeight: 45\n                                                color: theme.progressBackground\n                                                radius: 3\n                                            }\n                                            contentItem: Item {\n                                                implicitHeight: 40\n\n                                                Rectangle {\n                                                    width: itemProgressBar.visualPosition * parent.width\n                                                    height: parent.height\n                                                    radius: 2\n                                                    color: theme.progressForeground\n                                                }\n                                            }\n                                            Accessible.role: Accessible.ProgressBar\n                                            Accessible.name: qsTr(\"Download progressBar\")\n                                            Accessible.description: qsTr(\"Shows the progress made in the download\")\n                                        }\n\n                                        Label {\n                                            id: speedLabel\n                                            color: theme.textColor\n                                            Layout.alignment: Qt.AlignRight\n                                            text: speed\n                                            font.pixelSize: theme.fontSizeLarge\n                                            Accessible.role: Accessible.Paragraph\n                                            Accessible.name: qsTr(\"Download speed\")\n                                            Accessible.description: qsTr(\"Download speed in bytes/kilobytes/megabytes per second\")\n                                        }\n                                    }\n\n                                    RowLayout {\n                                        visible: calcHash\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.maximumWidth: 200\n                                        Layout.fillWidth: true\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        clip: true\n\n                                        Label {\n                                            id: calcHashLabel\n                                            color: theme.textColor\n                                            text: qsTr(\"Calculating...\")\n                                            font.pixelSize: theme.fontSizeLarge\n                                            Accessible.role: Accessible.Paragraph\n                                            Accessible.name: text\n                                            Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                        }\n\n                                        MyBusyIndicator {\n                                            id: busyCalcHash\n                                            running: calcHash\n                                            Accessible.role: Accessible.Animation\n                                            Accessible.name: qsTr(\"Busy indicator\")\n                                            Accessible.description: qsTr(\"Displayed when the file hash is being calculated\")\n                                        }\n                                    }\n\n                                    MyTextField {\n                                        id: apiKey\n                                        visible: !installed && isOnline\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        wrapMode: Text.WrapAnywhere\n                                        function showError() {\n                                            messageToast.show(qsTr(\"ERROR: $API_KEY is empty.\"));\n                                            apiKey.placeholderTextColor = theme.textErrorColor;\n                                        }\n                                        onTextChanged: {\n                                            apiKey.placeholderTextColor = theme.mutedTextColor;\n                                        }\n                                        placeholderText: qsTr(\"enter $API_KEY\")\n                                        Accessible.role: Accessible.EditableText\n                                        Accessible.name: placeholderText\n                                        Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                    }\n\n                                    MyTextField {\n                                        id: baseUrl\n                                        visible: !installed && isOnline && isCompatibleApi\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        wrapMode: Text.WrapAnywhere\n                                        function showError() {\n                                            messageToast.show(qsTr(\"ERROR: $BASE_URL is empty.\"));\n                                            baseUrl.placeholderTextColor = theme.textErrorColor;\n                                        }\n                                        onTextChanged: {\n                                            baseUrl.placeholderTextColor = theme.mutedTextColor;\n                                        }\n                                        placeholderText: qsTr(\"enter $BASE_URL\")\n                                        Accessible.role: Accessible.EditableText\n                                        Accessible.name: placeholderText\n                                        Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                    }\n\n                                    MyTextField {\n                                        id: modelName\n                                        visible: !installed && isOnline && isCompatibleApi\n                                        Layout.topMargin: 20\n                                        Layout.leftMargin: 20\n                                        Layout.minimumWidth: 200\n                                        Layout.alignment: Qt.AlignTop | Qt.AlignHCenter\n                                        wrapMode: Text.WrapAnywhere\n                                        function showError() {\n                                            messageToast.show(qsTr(\"ERROR: $MODEL_NAME is empty.\"))\n                                            modelName.placeholderTextColor = theme.textErrorColor;\n                                        }\n                                        onTextChanged: {\n                                            modelName.placeholderTextColor = theme.mutedTextColor;\n                                        }\n                                        placeholderText: qsTr(\"enter $MODEL_NAME\")\n                                        Accessible.role: Accessible.EditableText\n                                        Accessible.name: placeholderText\n                                        Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n                                    }\n                                }\n                            }\n                        }\n\n                        Item  {\n                            Layout.minimumWidth: childrenRect.width\n                            Layout.minimumHeight: childrenRect.height\n                            Layout.bottomMargin: 10\n                            RowLayout {\n                                id: paramRow\n                                anchors.centerIn: parent\n                                ColumnLayout {\n                                    Layout.topMargin: 10\n                                    Layout.bottomMargin: 10\n                                    Layout.leftMargin: 20\n                                    Layout.rightMargin: 20\n                                    Text {\n                                        text: qsTr(\"File size\")\n                                        font.pixelSize: theme.fontSizeSmall\n                                        color: theme.mutedDarkTextColor\n                                    }\n                                    Text {\n                                        text: filesize\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeSmall\n                                        font.bold: true\n                                    }\n                                }\n                                Rectangle {\n                                    width: 1\n                                    Layout.fillHeight: true\n                                    color: theme.dividerColor\n                                }\n                                ColumnLayout {\n                                    Layout.topMargin: 10\n                                    Layout.bottomMargin: 10\n                                    Layout.leftMargin: 20\n                                    Layout.rightMargin: 20\n                                    Text {\n                                        text: qsTr(\"RAM required\")\n                                        font.pixelSize: theme.fontSizeSmall\n                                        color: theme.mutedDarkTextColor\n                                    }\n                                    Text {\n                                        text: ramrequired >= 0 ? qsTr(\"%1 GB\").arg(ramrequired) : qsTr(\"?\")\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeSmall\n                                        font.bold: true\n                                    }\n                                }\n                                Rectangle {\n                                    width: 1\n                                    Layout.fillHeight: true\n                                    color: theme.dividerColor\n                                }\n                                ColumnLayout {\n                                    Layout.topMargin: 10\n                                    Layout.bottomMargin: 10\n                                    Layout.leftMargin: 20\n                                    Layout.rightMargin: 20\n                                    Text {\n                                        text: qsTr(\"Parameters\")\n                                        font.pixelSize: theme.fontSizeSmall\n                                        color: theme.mutedDarkTextColor\n                                    }\n                                    Text {\n                                        text: parameters !== \"\" ? parameters : \"?\"\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeSmall\n                                        font.bold: true\n                                    }\n                                }\n                                Rectangle {\n                                    width: 1\n                                    Layout.fillHeight: true\n                                    color: theme.dividerColor\n                                }\n                                ColumnLayout {\n                                    Layout.topMargin: 10\n                                    Layout.bottomMargin: 10\n                                    Layout.leftMargin: 20\n                                    Layout.rightMargin: 20\n                                    Text {\n                                        text: qsTr(\"Quant\")\n                                        font.pixelSize: theme.fontSizeSmall\n                                        color: theme.mutedDarkTextColor\n                                    }\n                                    Text {\n                                        text: quant\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeSmall\n                                        font.bold: true\n                                    }\n                                }\n                                Rectangle {\n                                    width: 1\n                                    Layout.fillHeight: true\n                                    color: theme.dividerColor\n                                }\n                                ColumnLayout {\n                                    Layout.topMargin: 10\n                                    Layout.bottomMargin: 10\n                                    Layout.leftMargin: 20\n                                    Layout.rightMargin: 20\n                                    Text {\n                                        text: qsTr(\"Type\")\n                                        font.pixelSize: theme.fontSizeSmall\n                                        color: theme.mutedDarkTextColor\n                                    }\n                                    Text {\n                                        text: type\n                                        color: theme.textColor\n                                        font.pixelSize: theme.fontSizeSmall\n                                        font.bold: true\n                                    }\n                                }\n                            }\n\n                            Rectangle {\n                                color: \"transparent\"\n                                anchors.fill: paramRow\n                                border.color: theme.dividerColor\n                                border.width: 1\n                                radius: 10\n                            }\n                        }\n\n                        Rectangle {\n                            Layout.fillWidth: true\n                            height: 1\n                            color: theme.dividerColor\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    Connections {\n        target: Download\n        function onToastMessage(message) {\n            messageToast.show(message);\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyBusyIndicator.qml",
    "content": "import QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nBusyIndicator {\n    id: control\n\n    property real size: 48\n    property color color: theme.accentColor\n\n    contentItem: Item {\n        implicitWidth: control.size\n        implicitHeight: control.size\n\n        Item {\n            id: item\n            x: parent.width / 2 - width / 2\n            y: parent.height / 2 - height / 2\n            width: control.size\n            height: control.size\n            opacity: control.running ? 1 : 0\n\n            Behavior on opacity {\n                OpacityAnimator {\n                    duration: 250\n                }\n            }\n\n            RotationAnimator {\n                target: item\n                running: control.visible && control.running\n                from: 0\n                to: 360\n                loops: Animation.Infinite\n                duration: 1750\n            }\n\n            Repeater {\n                id: repeater\n                model: 6\n\n                Rectangle {\n                    id: delegate\n                    x: item.width / 2 - width / 2\n                    y: item.height / 2 - height / 2\n                    implicitWidth: control.size * .2\n                    implicitHeight: control.size * .2\n                    radius: control.size * .1\n                    color: control.color\n\n                    required property int index\n\n                    transform: [\n                        Translate {\n                            y: -Math.min(item.width, item.height) * 0.5 + delegate.radius\n                        },\n                        Rotation {\n                            angle: delegate.index / repeater.count * 360\n                            origin.x: delegate.radius\n                            origin.y: delegate.radius\n                        }\n                    ]\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport mysettings\nimport mysettingsenums\n\nButton {\n    id: myButton\n    padding: 10\n    rightPadding: 18\n    leftPadding: 18\n    property color textColor: theme.oppositeTextColor\n    property color mutedTextColor: theme.oppositeMutedTextColor\n    property color backgroundColor: theme.buttonBackground\n    property color backgroundColorHovered: theme.buttonBackgroundHovered\n    property real  backgroundRadius: 10\n    property real  borderWidth: MySettings.chatTheme === MySettingsEnums.ChatTheme.LegacyDark ? 1 : 0\n    property color borderColor: theme.buttonBorder\n    property real  fontPixelSize: theme.fontSizeLarge\n    property bool  fontPixelBold: false\n    property alias textAlignment: textContent.horizontalAlignment\n\n    contentItem: Text {\n        id: textContent\n        text: myButton.text\n        horizontalAlignment: myButton.textAlignment\n        color: myButton.enabled ? textColor : mutedTextColor\n        font.pixelSize: fontPixelSize\n        font.bold: fontPixelBold\n        Accessible.role: Accessible.Button\n        Accessible.name: text\n    }\n    background: Rectangle {\n        radius: myButton.backgroundRadius\n        border.width: myButton.borderWidth\n        border.color: myButton.borderColor\n        color: !myButton.enabled ? theme.mutedTextColor : myButton.hovered ? backgroundColorHovered : backgroundColor\n    }\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyCheckBox.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nCheckBox {\n    id: myCheckBox\n\n    background: Rectangle {\n        color: \"transparent\"\n    }\n\n    indicator: Rectangle {\n        implicitWidth: 26\n        implicitHeight: 26\n        x: myCheckBox.leftPadding\n        y: parent.height / 2 - height / 2\n        border.color: theme.checkboxBorder\n        color: \"transparent\"\n        radius: 3\n\n        Rectangle {\n            width: 14\n            height: 14\n            x: 6\n            y: 6\n            radius: 2\n            color: theme.checkboxForeground\n            visible: myCheckBox.checked\n        }\n    }\n\n    contentItem: Text {\n        text: myCheckBox.text\n        font: myCheckBox.font\n        opacity: enabled ? 1.0 : 0.3\n        color: theme.textColor\n        verticalAlignment: Text.AlignVCenter\n        leftPadding: myCheckBox.indicator.width + myCheckBox.spacing\n    }\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}"
  },
  {
    "path": "gpt4all-chat/qml/MyComboBox.qml",
    "content": "import QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt5Compat.GraphicalEffects\n\nComboBox {\n    id: comboBox\n    font.pixelSize: theme.fontSizeLarge\n    spacing: 0\n    padding: 10\n    Accessible.role: Accessible.ComboBox\n    contentItem: RowLayout {\n        id: contentRow\n        spacing: 0\n        Text {\n            id: text\n            Layout.fillWidth: true\n            leftPadding: 10\n            rightPadding: 20\n            text: comboBox.displayText\n            font: comboBox.font\n            color: theme.textColor\n            verticalAlignment: Text.AlignLeft\n            elide: Text.ElideRight\n        }\n        Item {\n            Layout.preferredWidth: updown.width\n            Layout.preferredHeight: updown.height\n            Image {\n                id: updown\n                anchors.verticalCenter: parent.verticalCenter\n                sourceSize.width: comboBox.font.pixelSize\n                sourceSize.height: comboBox.font.pixelSize\n                mipmap: true\n                visible: false\n                source: \"qrc:/gpt4all/icons/up_down.svg\"\n            }\n\n            ColorOverlay {\n                anchors.fill: updown\n                source: updown\n                color: theme.textColor\n            }\n        }\n    }\n    delegate: ItemDelegate {\n        width: comboBox.width -20\n        contentItem: Text {\n            text: modelData\n            color: theme.textColor\n            font: comboBox.font\n            elide: Text.ElideRight\n            verticalAlignment: Text.AlignVCenter\n        }\n        background: Rectangle {\n            radius: 10\n            color: highlighted ? theme.menuHighlightColor : theme.menuBackgroundColor\n        }\n        highlighted: comboBox.highlightedIndex === index\n    }\n    popup: Popup {\n        y: comboBox.height - 1\n        width: comboBox.width\n        implicitHeight: Math.min(window.height - y, contentItem.implicitHeight + 20)\n        padding: 0\n        contentItem: Rectangle {\n            implicitWidth: comboBox.width\n            implicitHeight: myListView.contentHeight\n            color: \"transparent\"\n            radius: 10\n            ScrollView {\n                anchors.fill: parent\n                anchors.margins: 10\n                clip: true\n                ScrollBar.vertical.policy: ScrollBar.AsNeeded\n                ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n                ListView {\n                    id: myListView\n                    implicitHeight: contentHeight\n                    model: comboBox.popup.visible ? comboBox.delegateModel : null\n                    currentIndex: comboBox.highlightedIndex\n                    ScrollIndicator.vertical: ScrollIndicator { }\n                }\n            }\n        }\n\n        background: Rectangle {\n            color: theme.menuBackgroundColor//theme.controlBorder\n            border.color: theme.menuBorderColor //theme.controlBorder\n            border.width: 1\n            radius: 10\n        }\n    }\n    indicator: Item {\n    }\n    background: Rectangle {\n        color: theme.controlBackground\n        border.width: 1\n        border.color: theme.controlBorder\n        radius: 10\n    }\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Dialogs\nimport QtQuick.Layouts\n\nDialog {\n    id: myDialog\n    parent: Overlay.overlay\n    property alias closeButtonVisible: myCloseButton.visible\n    background: Rectangle {\n        width: parent.width\n        height: parent.height\n        color: theme.containerBackground\n        border.width: 1\n        border.color: theme.dialogBorder\n        radius: 10\n    }\n\n    Rectangle {\n        id: closeBackground\n        visible: myCloseButton.visible\n        z: 299\n        anchors.centerIn: myCloseButton\n        width: myCloseButton.width + 10\n        height: myCloseButton.height + 10\n        color: theme.containerBackground\n    }\n\n    MyToolButton {\n        id: myCloseButton\n        x: 0 + myDialog.width - myDialog.padding - width - 15\n        y: 0 - myDialog.padding + 15\n        z: 300\n        visible: myDialog.closePolicy != Popup.NoAutoClose\n        width: 24\n        height: 24\n        imageWidth: 24\n        imageHeight: 24\n        padding: 0\n        source: \"qrc:/gpt4all/icons/close.svg\"\n        fillMode: Image.PreserveAspectFit\n        onClicked: {\n            myDialog.close();\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyDirectoryField.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport llm\n\nTextField {\n    id: myDirectoryField\n    padding: 10\n    property bool isValid: LLM.directoryExists(text)\n    color: text === \"\" || isValid ? theme.textColor : theme.textErrorColor\n    background: Rectangle {\n        implicitWidth: 150\n        color: theme.controlBackground\n        border.width: 1\n        border.color: theme.controlBorder\n        radius: 10\n    }\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyFancyLink.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport Qt5Compat.GraphicalEffects\nimport mysettings\n\nMyButton {\n    id: fancyLink\n    property alias imageSource: myimage.source\n\n    Image {\n        id: myimage\n        anchors.verticalCenter: parent.verticalCenter\n        anchors.left: parent.left\n        anchors.leftMargin: 12\n        sourceSize: Qt.size(15, 15)\n        mipmap: true\n        visible: false\n    }\n\n    ColorOverlay {\n        anchors.fill: myimage\n        source: myimage\n        color: fancyLink.hovered ? theme.fancyLinkTextHovered : theme.fancyLinkText\n    }\n\n    borderWidth: 0\n    backgroundColor: \"transparent\"\n    backgroundColorHovered: \"transparent\"\n    fontPixelBold: true\n    leftPadding: 35\n    rightPadding: 8\n    topPadding: 1\n    bottomPadding: 1\n    textColor: fancyLink.hovered ? theme.fancyLinkTextHovered : theme.fancyLinkText\n    fontPixelSize: theme.fontSizeSmall\n    background: Rectangle {\n        color: \"transparent\"\n    }\n\n    Accessible.name: qsTr(\"Fancy link\")\n    Accessible.description: qsTr(\"A stylized link\")\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyFileDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Dialogs\n\nFileDialog {\n    id: fileDialog\n    title: qsTr(\"Please choose a file\")\n    property var acceptedConnection: null\n\n    function openFileDialog(currentFolder, onAccepted) {\n        fileDialog.currentFolder = currentFolder;\n        if (acceptedConnection !== null) {\n            fileDialog.accepted.disconnect(acceptedConnection);\n        }\n        acceptedConnection = function() { onAccepted(fileDialog.selectedFile); };\n        fileDialog.accepted.connect(acceptedConnection);\n        fileDialog.open();\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyFileIcon.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport Qt5Compat.GraphicalEffects\n\nItem {\n    id: fileIcon\n    property real iconSize: 24\n    property string fileName: \"\"\n    implicitWidth: iconSize\n    implicitHeight: iconSize\n\n    Image {\n        id: fileImage\n        anchors.fill: parent\n        visible: false\n        sourceSize.width: iconSize\n        sourceSize.height: iconSize\n        mipmap: true\n        source: {\n            if (fileIcon.fileName.toLowerCase().endsWith(\".txt\"))\n                return \"qrc:/gpt4all/icons/file-txt.svg\"\n            else if (fileIcon.fileName.toLowerCase().endsWith(\".pdf\"))\n                return \"qrc:/gpt4all/icons/file-pdf.svg\"\n            else if (fileIcon.fileName.toLowerCase().endsWith(\".md\"))\n                return \"qrc:/gpt4all/icons/file-md.svg\"\n            else if (fileIcon.fileName.toLowerCase().endsWith(\".xlsx\"))\n                return \"qrc:/gpt4all/icons/file-xls.svg\"\n            else if (fileIcon.fileName.toLowerCase().endsWith(\".docx\"))\n                return \"qrc:/gpt4all/icons/file-docx.svg\"\n            else\n                return \"qrc:/gpt4all/icons/file.svg\"\n        }\n    }\n    ColorOverlay {\n        anchors.fill: fileImage\n        source: fileImage\n        color: theme.textColor\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyFolderDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Dialogs\n\nFolderDialog {\n    id: folderDialog\n    title: qsTr(\"Please choose a directory\")\n\n    function openFolderDialog(currentFolder, onAccepted) {\n        folderDialog.currentFolder = currentFolder;\n        folderDialog.accepted.connect(function() { onAccepted(folderDialog.selectedFolder); });\n        folderDialog.open();\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyMenu.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nMenu {\n    id: menu\n\n    implicitWidth: Math.max(implicitBackgroundWidth + leftInset + rightInset,\n                            contentWidth + leftPadding + rightPadding + 20)\n    implicitHeight: Math.max(implicitBackgroundHeight + topInset + bottomInset,\n                             contentHeight + topPadding + bottomPadding + 20)\n\n    background: Rectangle {\n        implicitWidth: 220\n        implicitHeight: 40\n        color: theme.menuBackgroundColor\n        border.color: theme.menuBorderColor\n        border.width: 1\n        radius: 10\n    }\n\n    contentItem: Rectangle {\n        implicitWidth: myListView.contentWidth\n        implicitHeight: (myTitle.visible ? myTitle.contentHeight + 10: 0) + myListView.contentHeight\n        color: \"transparent\"\n\n        Text {\n            id: myTitle\n            visible: menu.title !== \"\"\n            text: menu.title\n            anchors.margins: 10\n            anchors.top: parent.top\n            anchors.right: parent.right\n            anchors.left: parent.left\n            leftPadding: 15\n            rightPadding: 10\n            padding: 5\n            color: theme.styledTextColor\n            font.pixelSize: theme.fontSizeSmall\n        }\n        ListView {\n            id: myListView\n            anchors.margins: 10\n            anchors.top: title.bottom\n            anchors.bottom: parent.bottom\n            anchors.right: parent.right\n            anchors.left: parent.left\n            implicitHeight: contentHeight\n            model: menu.contentModel\n            interactive: Window.window\n                         ? contentHeight + menu.topPadding + menu.bottomPadding > menu.height\n                         : false\n            clip: true\n            currentIndex: menu.currentIndex\n\n            ScrollIndicator.vertical: ScrollIndicator {}\n        }\n    }\n\n    enter: Transition {\n        NumberAnimation {\n            property: \"opacity\"\n            from: 0\n            to: 1\n            easing.type: Easing.InOutQuad\n            duration: 100\n        }\n    }\n\n    exit: Transition {\n        NumberAnimation {\n            property: \"opacity\"\n            from: 1\n            to: 0\n            easing.type: Easing.InOutQuad\n            duration: 100\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyMenuItem.qml",
    "content": "import Qt5Compat.GraphicalEffects\nimport QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nMenuItem {\n    id: item\n    background: Rectangle {\n        radius: 10\n        width: parent.width -20\n        color: item.highlighted ? theme.menuHighlightColor : theme.menuBackgroundColor\n    }\n\n    contentItem: RowLayout {\n        spacing: 0\n        Item {\n            visible: item.icon.source.toString() !== \"\"\n            Layout.leftMargin: 6\n            Layout.preferredWidth: item.icon.width\n            Layout.preferredHeight: item.icon.height\n            Image {\n                id: image\n                anchors.centerIn: parent\n                visible: false\n                fillMode: Image.PreserveAspectFit\n                mipmap: true\n                sourceSize.width: item.icon.width\n                sourceSize.height: item.icon.height\n                source: item.icon.source\n            }\n            ColorOverlay {\n                anchors.fill: image\n                source: image\n                color: theme.textColor\n            }\n        }\n        Text {\n            Layout.alignment: Qt.AlignLeft\n            padding: 5\n            text: item.text\n            color: theme.textColor\n            font.pixelSize: theme.fontSizeLarge\n        }\n        Rectangle {\n            color: \"transparent\"\n            Layout.fillWidth: true\n            height: 1\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyMiniButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport Qt5Compat.GraphicalEffects\n\nButton {\n    id: myButton\n    padding: 0\n    property color backgroundColor: theme.iconBackgroundDark\n    property color backgroundColorHovered: theme.iconBackgroundHovered\n    property alias source: image.source\n    property alias fillMode: image.fillMode\n    implicitWidth: 30\n    implicitHeight: 30\n    contentItem: Text {\n        text: myButton.text\n        horizontalAlignment: Text.AlignHCenter\n        color: myButton.enabled ? theme.textColor : theme.mutedTextColor\n        font.pixelSize: theme.fontSizeLarge\n        Accessible.role: Accessible.Button\n        Accessible.name: text\n    }\n\n    background: Item {\n        anchors.fill: parent\n        Rectangle {\n            anchors.fill: parent\n            color: \"transparent\"\n        }\n        Image {\n            id: image\n            anchors.centerIn: parent\n            visible: false\n            mipmap: true\n            sourceSize.width: 16\n            sourceSize.height: 16\n        }\n        ColorOverlay {\n            anchors.fill: image\n            source: image\n            color: myButton.hovered ? backgroundColorHovered : backgroundColor\n        }\n    }\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySettingsButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport mysettings\n\nButton {\n    id: myButton\n    padding: 10\n    rightPadding: 18\n    leftPadding: 18\n    property color  textColor: theme.lightButtonText\n    property color  mutedTextColor: theme.lightButtonMutedText\n    property color  backgroundColor: theme.lightButtonBackground\n    property color  backgroundColorHovered: enabled ? theme.lightButtonBackgroundHovered : backgroundColor\n    property real   borderWidth: 0\n    property color  borderColor: \"transparent\"\n    property real   fontPixelSize: theme.fontSizeLarge\n    property string toolTip\n    property alias backgroundRadius: background.radius\n\n    contentItem: Text {\n        text: myButton.text\n        horizontalAlignment: Text.AlignHCenter\n        color: myButton.enabled ? textColor : mutedTextColor\n        font.pixelSize: fontPixelSize\n        font.bold: true\n        Accessible.role: Accessible.Button\n        Accessible.name: text\n    }\n    background: Rectangle {\n        id: background\n        radius: 10\n        border.width: borderWidth\n        border.color: borderColor\n        color: myButton.hovered ? backgroundColorHovered : backgroundColor\n    }\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.text: toolTip\n    ToolTip.visible: toolTip !== \"\" && myButton.hovered\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySettingsDestructiveButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport mysettings\n\nButton {\n    id: myButton\n    padding: 10\n    rightPadding: 18\n    leftPadding: 18\n    font.pixelSize: theme.fontSizeLarge\n    property color textColor: theme.darkButtonText\n    property color mutedTextColor: theme.darkButtonMutedText\n    property color backgroundColor: theme.darkButtonBackground\n    property color backgroundColorHovered: enabled ? theme.darkButtonBackgroundHovered : backgroundColor\n    property real  borderWidth: 0\n    property color borderColor: \"transparent\"\n\n    contentItem: Text {\n        text: myButton.text\n        horizontalAlignment: Text.AlignHCenter\n        color: myButton.enabled ? textColor : mutedTextColor\n        font.pixelSize: theme.fontSizeLarge\n        font.bold: true\n        Accessible.role: Accessible.Button\n        Accessible.name: text\n    }\n    background: Rectangle {\n        radius: 10\n        border.width: borderWidth\n        border.color: borderColor\n        color: myButton.hovered ? backgroundColorHovered : backgroundColor\n    }\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySettingsLabel.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nColumnLayout {\n    id: root\n    property alias text: mainTextLabel.text\n    property alias helpText: helpTextLabel.text\n\n    property alias textFormat: mainTextLabel.textFormat\n    property alias wrapMode: mainTextLabel.wrapMode\n    property alias font: mainTextLabel.font\n    property alias horizontalAlignment: mainTextLabel.horizontalAlignment\n    signal linkActivated(link : url);\n    property alias color: mainTextLabel.color\n    property alias linkColor: mainTextLabel.linkColor\n\n    property var onReset: null\n    property alias canReset: resetButton.enabled\n    property bool resetClears: false\n\n    Item {\n        anchors.margins: 5\n        width: childrenRect.width\n        height: mainTextLabel.contentHeight\n\n        Label {\n            id: mainTextLabel\n            anchors.left: parent.left\n            anchors.top: parent.top\n            anchors.bottom: parent.bottom\n            color: theme.settingsTitleTextColor\n            font.pixelSize: theme.fontSizeLarger\n            font.bold: true\n            verticalAlignment: Text.AlignVCenter\n            onLinkActivated: function(link) {\n                root.linkActivated(link);\n            }\n        }\n\n        MySettingsButton {\n            id: resetButton\n            anchors.baseline: mainTextLabel.baseline\n            anchors.left: mainTextLabel.right\n            height: mainTextLabel.contentHeight\n            anchors.leftMargin: 10\n            padding: 2\n            leftPadding: 10\n            rightPadding: 10\n            backgroundRadius: 5\n            text: resetClears ? qsTr(\"Clear\") : qsTr(\"Reset\")\n            visible: root.onReset !== null\n            onClicked: root.onReset()\n        }\n    }\n    Label {\n        id: helpTextLabel\n        visible: text !== \"\"\n        Layout.fillWidth: true\n        wrapMode: Text.Wrap\n        color: theme.settingsTitleTextColor\n        font.pixelSize: theme.fontSizeLarge\n        font.bold: false\n\n        onLinkActivated: function(link) {\n            root.linkActivated(link);\n        }\n\n        MouseArea {\n            anchors.fill: parent\n            acceptedButtons: Qt.NoButton // pass clicks to parent\n            cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySettingsStack.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Controls.impl\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport mysettings\n\nItem {\n    id: settingsStack\n\n    Theme {\n        id: theme\n    }\n\n    property ListModel tabTitlesModel: ListModel { }\n    property list<Component> tabs: [ ]\n\n    TabBar {\n        id: settingsTabBar\n        anchors.top: parent.top\n        anchors.horizontalCenter: parent.horizontalCenter\n        width: parent.width / 1.75\n        z: 200\n        visible: tabTitlesModel.count > 1\n        background: Rectangle {\n            color: \"transparent\"\n        }\n        Repeater {\n            model: settingsStack.tabTitlesModel\n            TabButton {\n                id: tabButton\n                padding: 10\n                contentItem: IconLabel {\n                    color: theme.textColor\n                    font.pixelSize: theme.fontSizeLarge\n                    font.bold: tabButton.checked\n                    text: model.title\n                }\n                background: Rectangle {\n                    color: \"transparent\"\n                }\n                Accessible.role: Accessible.Button\n                Accessible.name: model.title\n            }\n        }\n    }\n\n    Rectangle {\n        id: dividerTabBar\n        visible: tabTitlesModel.count > 1\n        anchors.top: settingsTabBar.bottom\n        anchors.topMargin: 15\n        anchors.bottomMargin: 15\n        anchors.leftMargin: 15\n        anchors.rightMargin: 15\n        anchors.left: parent.left\n        anchors.right: parent.right\n        height: 1\n        color: theme.settingsDivider\n    }\n\n    StackLayout {\n        id: stackLayout\n        anchors.top: tabTitlesModel.count > 1 ? dividerTabBar.bottom : parent.top\n        anchors.topMargin: 5\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.bottom: parent.bottom\n        currentIndex: settingsTabBar.currentIndex\n\n        Repeater {\n            model: settingsStack.tabs\n            delegate: Loader {\n                id: loader\n                sourceComponent: model.modelData\n                onLoaded: {\n                    settingsStack.tabTitlesModel.append({ \"title\": loader.item.title });\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySettingsTab.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nItem {\n    id: root\n    property string title: \"\"\n    property Item contentItem: null\n    property bool showRestoreDefaultsButton: true\n    signal restoreDefaults\n\n    onContentItemChanged: function() {\n        if (contentItem) {\n            contentItem.parent = contentInner;\n            contentItem.anchors.left = contentInner.left;\n            contentItem.anchors.right = contentInner.right;\n        }\n    }\n\n    ConfirmationDialog {\n        id: restoreDefaultsDialog\n        dialogTitle: qsTr(\"Restore defaults?\")\n        description: qsTr(\"This page of settings will be reset to the defaults.\")\n        onAccepted: root.restoreDefaults()\n    }\n\n    ScrollView {\n        id: scrollView\n        width: parent.width\n        height: parent.height\n        topPadding: 15\n        leftPadding: 5\n        contentWidth: availableWidth\n        contentHeight: innerColumn.height\n        ScrollBar.vertical: ScrollBar {\n            parent: scrollView.parent\n            anchors.top: scrollView.top\n            anchors.left: scrollView.right\n            anchors.bottom: scrollView.bottom\n        }\n\n        Theme {\n            id: theme\n        }\n\n        ColumnLayout {\n            id: innerColumn\n            anchors.left: parent.left\n            anchors.right: parent.right\n            anchors.margins: 15\n            spacing: 10\n            Column {\n                id: contentInner\n                Layout.fillWidth: true\n                Layout.maximumWidth: parent.width\n            }\n\n            Item {\n                Layout.fillWidth: true\n                Layout.topMargin: 20\n                height: restoreDefaultsButton.height\n                MySettingsButton {\n                    id: restoreDefaultsButton\n                    anchors.left: parent.left\n                    visible: showRestoreDefaultsButton\n                    width: implicitWidth\n                    text: qsTr(\"Restore Defaults\")\n                    font.pixelSize: theme.fontSizeLarge\n                    Accessible.role: Accessible.Button\n                    Accessible.name: text\n                    Accessible.description: qsTr(\"Restores settings dialog to a default state\")\n                    onClicked: restoreDefaultsDialog.open()\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MySlug.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nLabel {\n    id: mySlug\n    padding: 3\n    rightPadding: 9\n    leftPadding: 9\n    font.pixelSize: theme.fontSizeSmall\n    background: Rectangle {\n        radius: 6\n        border.width: 1\n        border.color: mySlug.color\n        color: theme.slugBackground\n    }\n    ToolTip.visible: ma.containsMouse && ToolTip.text !== \"\"\n    MouseArea {\n        id: ma\n        anchors.fill: parent\n        hoverEnabled: true\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyTabButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport mysettings\nimport mysettingsenums\n\nMySettingsButton {\n    property bool isSelected: false\n    contentItem: Text {\n        text: parent.text\n        horizontalAlignment: Qt.AlignCenter\n        color: isSelected ? theme.titleTextColor : theme.styledTextColor\n        font.pixelSize: theme.fontSizeLarger\n    }\n    background: Item {\n        visible: isSelected || hovered\n        Rectangle {\n            anchors.bottom: parent.bottom\n            anchors.left: parent.left\n            anchors.right: parent.right\n            height: 3\n            color: isSelected ? theme.titleTextColor : theme.styledTextColorLighter\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyTextArea.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nTextArea {\n    id: myTextArea\n\n    property string errState: \"ok\"  // one of \"ok\", \"error\", \"warning\"\n\n    color: enabled ? theme.textColor : theme.mutedTextColor\n    placeholderTextColor: theme.mutedTextColor\n    font.pixelSize: theme.fontSizeLarge\n    background: Rectangle {\n        implicitWidth: 150\n        color: theme.controlBackground\n        border.width: errState === \"ok\" ? 1 : 2\n        border.color: {\n            switch (errState) {\n                case \"ok\":      return theme.controlBorder;\n                case \"warning\": return theme.textWarningColor;\n                case \"error\":   return theme.textErrorColor;\n            }\n        }\n        radius: 10\n    }\n    padding: 10\n    wrapMode: TextArea.Wrap\n\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyTextButton.qml",
    "content": "import QtQuick\nimport QtQuick.Controls\n\nText {\n    id: text\n\n    signal click()\n    property string tooltip\n\n    HoverHandler { id: hoverHandler }\n    TapHandler { onTapped: { click() } }\n\n    font.bold: true\n    font.underline: hoverHandler.hovered\n    font.pixelSize: theme.fontSizeSmall\n    ToolTip.text: tooltip\n    ToolTip.visible: tooltip !== \"\" && hoverHandler.hovered\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyTextField.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\n\nTextField {\n    id: myTextField\n    padding: 10\n    placeholderTextColor: theme.mutedTextColor\n    background: Rectangle {\n        implicitWidth: 150\n        color: myTextField.enabled ? theme.controlBackground : theme.disabledControlBackground\n        border.width: 1\n        border.color: theme.controlBorder\n        radius: 10\n    }\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n    color: enabled ? theme.textColor : theme.mutedTextColor\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyToolButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport Qt5Compat.GraphicalEffects\n\nButton {\n    id: myButton\n    padding: 10\n    property color backgroundColor: theme.iconBackgroundDark\n    property color backgroundColorHovered: theme.iconBackgroundHovered\n    property color toggledColor: theme.accentColor\n    property real toggledWidth: 1\n    property bool toggled: false\n    property alias source: image.source\n    property alias fillMode: image.fillMode\n    property alias imageWidth: image.sourceSize.width\n    property alias imageHeight: image.sourceSize.height\n    property alias bgTransform: background.transform\n    contentItem: Text {\n        text: myButton.text\n        horizontalAlignment: Text.AlignHCenter\n        color: myButton.enabled ? theme.textColor : theme.mutedTextColor\n        font.pixelSize: theme.fontSizeLarge\n        Accessible.role: Accessible.Button\n        Accessible.name: text\n    }\n\n    background: Item {\n        id: background\n        anchors.fill: parent\n        Rectangle {\n            anchors.fill: parent\n            color: myButton.toggledColor\n            visible: myButton.toggled\n            border.color: myButton.toggledColor\n            border.width: myButton.toggledWidth\n            radius: 8\n        }\n        Image {\n            id: image\n            anchors.centerIn: parent\n            visible: false\n            fillMode: Image.PreserveAspectFit\n            mipmap: true\n            sourceSize.width: 32\n            sourceSize.height: 32\n        }\n        ColorOverlay {\n            anchors.fill: image\n            source: image\n            color: !myButton.enabled ? theme.mutedTextColor : myButton.hovered ? backgroundColorHovered : backgroundColor\n        }\n    }\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/MyWelcomeButton.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport Qt5Compat.GraphicalEffects\nimport QtQuick.Layouts\nimport mysettings\n\nButton {\n    id: myButton\n    property alias imageSource: myimage.source\n    property alias description: description.text\n\n    contentItem: Item {\n        id: item\n        anchors.centerIn: parent\n\n        RowLayout {\n            anchors.fill: parent\n            Rectangle {\n                id: rec\n                color: \"transparent\"\n                Layout.preferredWidth: item.width * 1/5.5\n                Layout.preferredHeight: item.width * 1/5.5\n                Layout.alignment: Qt.AlignCenter\n                Image {\n                    id: myimage\n                    anchors.centerIn: parent\n                    sourceSize.width: rec.width\n                    sourceSize.height: rec.height\n                    mipmap: true\n                    visible: false\n                }\n\n                ColorOverlay {\n                    anchors.fill: myimage\n                    source: myimage\n                    color: theme.welcomeButtonBorder\n                }\n            }\n\n            ColumnLayout {\n                Layout.preferredWidth: childrenRect.width\n                Text {\n                    text: myButton.text\n                    horizontalAlignment: Text.AlignHCenter\n                    color: myButton.hovered ? theme.welcomeButtonTextHovered : theme.welcomeButtonText\n                    font.pixelSize: theme.fontSizeBannerSmall\n                    font.bold: true\n                    Accessible.role: Accessible.Button\n                    Accessible.name: text\n                }\n\n                Text {\n                    id: description\n                    horizontalAlignment: Text.AlignHCenter\n                    color: myButton.hovered ? theme.welcomeButtonTextHovered : theme.welcomeButtonText\n                    font.pixelSize: theme.fontSizeSmall\n                    font.bold: false\n                    Accessible.role: Accessible.Button\n                    Accessible.name: text\n                }\n            }\n        }\n    }\n\n    background: Rectangle {\n        radius: 10\n        border.width: 1\n        border.color: myButton.hovered ? theme.welcomeButtonBorderHovered : theme.welcomeButtonBorder\n        color: theme.welcomeButtonBackground\n    }\n\n    Accessible.role: Accessible.Button\n    Accessible.name: text\n    ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/NetworkDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport download\nimport network\nimport llm\nimport mysettings\n\nMyDialog {\n    id: networkDialog\n    anchors.centerIn: parent\n    modal: true\n    padding: 20\n\n    Theme {\n        id: theme\n    }\n\n    Column {\n        id: column\n        spacing: 20\n        Item {\n            width: childrenRect.width\n            height: childrenRect.height\n            Image {\n                id: img\n                anchors.top: parent.top\n                anchors.left: parent.left\n                width: 60\n                height: 60\n                source: \"qrc:/gpt4all/icons/gpt4all.svg\"\n            }\n            Text {\n                anchors.left: img.right\n                anchors.leftMargin: 30\n                anchors.verticalCenter: img.verticalCenter\n                text: qsTr(\"Contribute data to the GPT4All Opensource Datalake.\")\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n            }\n        }\n\n        ScrollView {\n            clip: true\n            height: 300\n            width: 1024 - 40\n            ScrollBar.vertical.policy: ScrollBar.AlwaysOn\n            ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n\n            MyTextArea {\n                id: textOptIn\n                width: 1024 - 40\n                text: qsTr(\"By enabling this feature, you will be able to participate in the democratic process of \"\n                      + \"training a large language model by contributing data for future model improvements.\\n\\n\"\n                      + \"When a GPT4All model responds to you and you have opted-in, your conversation will be sent to \"\n                      + \"the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you \"\n                      + \"dislike a response, you can suggest an alternative response. This data will be collected and \"\n                      + \"aggregated in the GPT4All Datalake.\\n\\n\"\n                      + \"NOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source \"\n                      + \"Datalake. You should have no expectation of chat privacy when this feature is enabled. You \"\n                      + \"should; however, have an expectation of an optional attribution if you wish. Your chat data \"\n                      + \"will be openly available for anyone to download and will be used by Nomic AI to improve \"\n                      + \"future GPT4All models. Nomic AI will retain all attribution information attached to your data \"\n                      + \"and you will be credited as a contributor to any GPT4All model release that uses your data!\")\n                focus: false\n                readOnly: true\n                Accessible.role: Accessible.Paragraph\n                Accessible.name: qsTr(\"Terms for opt-in\")\n                Accessible.description: qsTr(\"Describes what will happen when you opt-in\")\n            }\n        }\n\n        MyTextField {\n            id: attribution\n            width: parent.width\n            text: MySettings.networkAttribution\n            placeholderText: qsTr(\"Please provide a name for attribution (optional)\")\n            Accessible.role: Accessible.EditableText\n            Accessible.name: qsTr(\"Attribution (optional)\")\n            Accessible.description: qsTr(\"Provide attribution\")\n            onEditingFinished: {\n                MySettings.networkAttribution = attribution.text;\n            }\n        }\n    }\n\n    footer: DialogButtonBox {\n        id: dialogBox\n        padding: 20\n        alignment: Qt.AlignRight\n        spacing: 10\n        MySettingsButton {\n            text: qsTr(\"Enable\")\n            Accessible.description: qsTr(\"Enable opt-in\")\n            DialogButtonBox.buttonRole: DialogButtonBox.AcceptRole\n        }\n        MySettingsButton {\n            text: qsTr(\"Cancel\")\n            Accessible.description: qsTr(\"Cancel opt-in\")\n            DialogButtonBox.buttonRole: DialogButtonBox.RejectRole\n        }\n        background: Rectangle {\n            color: \"transparent\"\n        }\n    }\n\n    onAccepted: {\n        MySettings.networkIsActive = true\n    }\n\n    onRejected: {\n        MySettings.networkIsActive = false\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/NewVersionDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport download\nimport network\nimport llm\n\nMyDialog {\n    id: newVerionDialog\n    anchors.centerIn: parent\n    modal: true\n    width: contentItem.width\n    height: contentItem.height\n    padding: 20\n    closeButtonVisible: false\n\n    Theme {\n        id: theme\n    }\n\n    Item {\n        id: contentItem\n        width: childrenRect.width + 40\n        height: childrenRect.height + 40\n\n        Label {\n            id: label\n            anchors.top: parent.top\n            anchors.left: parent.left\n            topPadding: 20\n            bottomPadding: 20\n            text: qsTr(\"New version is available\")\n            color: theme.titleTextColor\n            font.pixelSize: theme.fontSizeLarge\n            font.bold: true\n        }\n\n        MySettingsButton {\n            id: button\n            anchors.left: label.right\n            anchors.leftMargin: 10\n            anchors.verticalCenter: label.verticalCenter\n            padding: 20\n            text: qsTr(\"Update\")\n            font.pixelSize: theme.fontSizeLarge\n            Accessible.description: qsTr(\"Update to new version\")\n            onClicked: {\n                if (!LLM.checkForUpdates())\n                    checkForUpdatesError.open()\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/PopupDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\n\nDialog {\n    id: popupDialog\n    anchors.centerIn: parent\n    padding: 20\n    property alias text: textField.text\n    property bool shouldTimeOut: true\n    property bool shouldShowBusy: false\n    modal: shouldShowBusy\n    closePolicy: shouldShowBusy ? Popup.NoAutoClose : (Popup.CloseOnEscape | Popup.CloseOnPressOutside)\n\n    Theme {\n        id: theme\n    }\n\n    Row {\n        anchors.centerIn: parent\n        spacing: 20\n\n        Label {\n            id: textField\n            width: Math.min(1024, implicitWidth)\n            height: Math.min(600, implicitHeight)\n            anchors.verticalCenter: shouldShowBusy ? busyIndicator.verticalCenter : parent.verticalCenter\n            horizontalAlignment: Text.AlignLeft\n            verticalAlignment: Text.AlignVCenter\n            textFormat: Text.StyledText\n            wrapMode: Text.WordWrap\n            color: theme.textColor\n            linkColor: theme.linkColor\n            Accessible.role: Accessible.HelpBalloon\n            Accessible.name: text\n            Accessible.description: qsTr(\"Reveals a shortlived help balloon\")\n            onLinkActivated: function(link) { Qt.openUrlExternally(link) }\n        }\n\n        MyBusyIndicator {\n            id: busyIndicator\n            visible: shouldShowBusy\n            running: shouldShowBusy\n\n            Accessible.role: Accessible.Animation\n            Accessible.name: qsTr(\"Busy indicator\")\n            Accessible.description: qsTr(\"Displayed when the popup is showing busy\")\n        }\n    }\n\n    background: Rectangle {\n        anchors.fill: parent\n        color: theme.containerBackground\n        border.width: 1\n        border.color: theme.dialogBorder\n        radius: 10\n    }\n\n    exit: Transition {\n        NumberAnimation { duration: 500; property: \"opacity\"; from: 1.0; to: 0.0 }\n    }\n\n    onOpened: {\n        if (shouldTimeOut)\n            timer.start()\n    }\n\n    Timer {\n        id: timer\n        interval: 500; running: false; repeat: false\n        onTriggered: popupDialog.close()\n    }\n}"
  },
  {
    "path": "gpt4all-chat/qml/RemoteModelCard.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport QtQuick.Dialogs\nimport Qt.labs.folderlistmodel\nimport Qt5Compat.GraphicalEffects\n\nimport llm\nimport chatlistmodel\nimport download\nimport modellist\nimport network\nimport gpt4all\nimport mysettings\nimport localdocs\n\n\nRectangle {\n    property alias providerName: providerNameLabel.text\n    property alias providerImage: myimage.source\n    property alias providerDesc: providerDescLabel.text\n    property string providerBaseUrl: \"\"\n    property bool providerIsCustom: false\n    property var modelWhitelist: null\n\n    color: theme.conversationBackground\n    radius: 10\n    border.width: 1\n    border.color: theme.controlBorder\n    implicitHeight: topColumn.height + bottomColumn.height + 33 * theme.fontScale\n\n    ColumnLayout {\n        id: topColumn\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.margins: 20\n        spacing: 15 * theme.fontScale\n        RowLayout {\n            Layout.alignment: Qt.AlignTop\n            spacing: 10\n            Item {\n                Layout.preferredWidth: 27 * theme.fontScale\n                Layout.preferredHeight: 27 * theme.fontScale\n                Layout.alignment: Qt.AlignLeft\n\n                Image {\n                    id: myimage\n                    anchors.centerIn: parent\n                    sourceSize.width: parent.width\n                    sourceSize.height: parent.height\n                    mipmap: true\n                    fillMode: Image.PreserveAspectFit\n                }\n            }\n\n            Label {\n                id: providerNameLabel\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeBanner\n            }\n        }\n\n        Label {\n            id: providerDescLabel\n            Layout.fillWidth: true\n            wrapMode: Text.Wrap\n            color: theme.settingsTitleTextColor\n            font.pixelSize: theme.fontSizeLarge\n            onLinkActivated: function(link) { Qt.openUrlExternally(link); }\n\n            MouseArea {\n                anchors.fill: parent\n                acceptedButtons: Qt.NoButton // pass clicks to parent\n                cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor\n            }\n        }\n    }\n\n    ColumnLayout {\n        id: bottomColumn\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.bottom: parent.bottom\n        anchors.margins: 20\n        spacing: 30\n\n        ColumnLayout {\n            MySettingsLabel {\n                text: qsTr(\"API Key\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.settingsTitleTextColor\n            }\n\n            MyTextField {\n                id: apiKeyField\n                Layout.fillWidth: true\n                font.pixelSize: theme.fontSizeLarge\n                wrapMode: Text.WrapAnywhere\n                function showError() {\n                    messageToast.show(qsTr(\"ERROR: $API_KEY is empty.\"));\n                    apiKeyField.placeholderTextColor = theme.textErrorColor;\n                }\n                onTextChanged: {\n                    apiKeyField.placeholderTextColor = theme.mutedTextColor;\n                    if (!providerIsCustom) {\n                        let models = ModelList.remoteModelList(apiKeyField.text, providerBaseUrl);\n                        if (modelWhitelist !== null)\n                            models = models.filter(m => modelWhitelist.includes(m));\n                        myModelList.model = models;\n                        myModelList.currentIndex = -1;\n                    }\n                }\n                placeholderText: qsTr(\"enter $API_KEY\")\n                Accessible.role: Accessible.EditableText\n                Accessible.name: placeholderText\n                Accessible.description: qsTr(\"Whether the file hash is being calculated\")\n            }\n        }\n\n        ColumnLayout {\n            visible: providerIsCustom\n            MySettingsLabel {\n                text: qsTr(\"Base Url\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.settingsTitleTextColor\n            }\n            MyTextField {\n                id: baseUrlField\n                Layout.fillWidth: true\n                font.pixelSize: theme.fontSizeLarge\n                wrapMode: Text.WrapAnywhere\n                function showError() {\n                    messageToast.show(qsTr(\"ERROR: $BASE_URL is empty.\"));\n                    baseUrlField.placeholderTextColor = theme.textErrorColor;\n                }\n                onTextChanged: {\n                    baseUrlField.placeholderTextColor = theme.mutedTextColor;\n                }\n                placeholderText: qsTr(\"enter $BASE_URL\")\n                Accessible.role: Accessible.EditableText\n                Accessible.name: placeholderText\n            }\n        }\n        ColumnLayout {\n            visible: providerIsCustom\n            MySettingsLabel {\n                text: qsTr(\"Model Name\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.settingsTitleTextColor\n            }\n            MyTextField {\n                id: modelNameField\n                Layout.fillWidth: true\n                font.pixelSize: theme.fontSizeLarge\n                wrapMode: Text.WrapAnywhere\n                function showError() {\n                    messageToast.show(qsTr(\"ERROR: $MODEL_NAME is empty.\"))\n                    modelNameField.placeholderTextColor = theme.textErrorColor;\n                }\n                onTextChanged: {\n                    modelNameField.placeholderTextColor = theme.mutedTextColor;\n                }\n                placeholderText: qsTr(\"enter $MODEL_NAME\")\n                Accessible.role: Accessible.EditableText\n                Accessible.name: placeholderText\n            }\n        }\n\n        ColumnLayout {\n            visible: myModelList.count > 0 && !providerIsCustom\n\n            MySettingsLabel {\n                text: qsTr(\"Models\")\n                font.bold: true\n                font.pixelSize: theme.fontSizeLarge\n                color: theme.settingsTitleTextColor\n            }\n\n            RowLayout {\n                spacing: 10\n\n                MyComboBox {\n                    Layout.fillWidth: true\n                    id: myModelList\n                    currentIndex: -1;\n                }\n            }\n        }\n\n        MySettingsButton {\n            id: installButton\n            Layout.alignment: Qt.AlignRight\n            text: qsTr(\"Install\")\n            font.pixelSize: theme.fontSizeLarge\n\n            property string apiKeyText: apiKeyField.text.trim()\n            property string baseUrlText: providerIsCustom ? baseUrlField.text.trim() : providerBaseUrl.trim()\n            property string modelNameText: providerIsCustom ? modelNameField.text.trim() : myModelList.currentText.trim()\n\n            enabled: apiKeyText !== \"\" && baseUrlText !== \"\" && modelNameText !== \"\"\n\n            onClicked: {\n                Download.installCompatibleModel(\n                            modelNameText,\n                            apiKeyText,\n                            baseUrlText,\n                            );\n                myModelList.currentIndex = -1;\n            }\n            Accessible.role: Accessible.Button\n            Accessible.name: qsTr(\"Install\")\n            Accessible.description: qsTr(\"Install remote model\")\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/SettingsView.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Dialogs\nimport QtQuick.Layouts\nimport Qt.labs.folderlistmodel\nimport download\nimport modellist\nimport network\nimport llm\nimport mysettings\n\nRectangle {\n    id: settingsDialog\n    color: theme.viewBackground\n\n    property alias pageToDisplay: listView.currentIndex\n\n    Item {\n        Accessible.role: Accessible.Dialog\n        Accessible.name: qsTr(\"Settings\")\n        Accessible.description: qsTr(\"Contains various application settings\")\n    }\n\n    ListModel {\n        id: stacksModel\n        ListElement {\n            title: qsTr(\"Application\")\n        }\n        ListElement {\n            title: qsTr(\"Model\")\n        }\n        ListElement {\n            title: qsTr(\"LocalDocs\")\n        }\n    }\n\n    ColumnLayout {\n        id: mainArea\n        anchors.left: parent.left\n        anchors.right: parent.right\n        anchors.top: parent.top\n        anchors.bottom: parent.bottom\n        anchors.margins: 30\n        spacing: 50\n\n        RowLayout {\n            Layout.fillWidth: true\n            Layout.alignment: Qt.AlignTop\n            spacing: 50\n\n            ColumnLayout {\n                Layout.fillWidth: true\n                Layout.alignment: Qt.AlignLeft\n                Layout.minimumWidth: 200\n                spacing: 5\n\n                Text {\n                    id: welcome\n                    text: qsTr(\"Settings\")\n                    font.pixelSize: theme.fontSizeBanner\n                    color: theme.titleTextColor\n                }\n            }\n\n            Rectangle {\n                Layout.fillWidth: true\n                height: 0\n            }\n        }\n\n        Item {\n            Layout.fillWidth: true\n            Layout.fillHeight: true\n\n            Rectangle {\n                id: stackList\n                anchors.top: parent.top\n                anchors.bottom: parent.bottom\n                anchors.left: parent.left\n                width: 220\n                color: theme.viewBackground\n                radius: 10\n\n                ScrollView {\n                    anchors.top: parent.top\n                    anchors.bottom: parent.bottom\n                    anchors.left: parent.left\n                    anchors.right: parent.right\n                    anchors.topMargin: 10\n                    ScrollBar.vertical.policy: ScrollBar.AsNeeded\n                    clip: true\n\n                    ListView {\n                        id: listView\n                        anchors.fill: parent\n                        model: stacksModel\n\n                        delegate: Rectangle {\n                            id: item\n                            width: listView.width\n                            height: titleLabel.height + 10\n                            color: \"transparent\"\n\n                            MyButton {\n                                id: titleLabel\n                                backgroundColor: index === listView.currentIndex ? theme.selectedBackground : theme.viewBackground\n                                backgroundColorHovered: backgroundColor\n                                borderColor: \"transparent\"\n                                borderWidth: 0\n                                textColor: theme.titleTextColor\n                                anchors.verticalCenter: parent.verticalCenter\n                                anchors.left: parent.left\n                                anchors.right: parent.right\n                                anchors.margins: 10\n                                font.bold: index === listView.currentIndex\n                                text: title\n                                textAlignment: Qt.AlignLeft\n                                font.pixelSize: theme.fontSizeLarge\n                                onClicked: {\n                                    listView.currentIndex = index\n                                }\n                            }\n                        }\n                    }\n                }\n            }\n\n            StackLayout {\n                id: stackLayout\n                anchors.top: parent.top\n                anchors.bottom: parent.bottom\n                anchors.left: stackList.right\n                anchors.right: parent.right\n                currentIndex: listView.currentIndex\n\n                MySettingsStack {\n                    tabs: [\n                        Component { ApplicationSettings { } }\n                    ]\n                }\n\n                MySettingsStack {\n                    tabs: [\n                        Component { ModelSettings { } }\n                    ]\n                }\n\n                MySettingsStack {\n                    tabs: [\n                        Component { LocalDocsSettings { } }\n                    ]\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/StartupDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport Qt5Compat.GraphicalEffects\nimport download\nimport network\nimport llm\nimport mysettings\n\nMyDialog {\n    id: startupDialog\n    anchors.centerIn: parent\n    modal: true\n    padding: 10\n    width: 1024\n    height: column.height + 20\n    closePolicy: !optInStatisticsRadio.choiceMade || !optInNetworkRadio.choiceMade ? Popup.NoAutoClose : (Popup.CloseOnEscape | Popup.CloseOnPressOutside)\n\n    Theme {\n        id: theme\n    }\n\n    Column {\n        id: column\n        spacing: 20\n        Item {\n            width: childrenRect.width\n            height: childrenRect.height\n            Image {\n                id: img\n                anchors.top: parent.top\n                anchors.left: parent.left\n                sourceSize.width: 60\n                sourceSize.height: 60\n                mipmap: true\n                visible: false\n                source: \"qrc:/gpt4all/icons/globe.svg\"\n            }\n            ColorOverlay {\n                anchors.fill: img\n                source: img\n                color: theme.titleTextColor\n            }\n            Text {\n                anchors.left: img.right\n                anchors.leftMargin: 10\n                anchors.verticalCenter: img.verticalCenter\n                text: qsTr(\"Welcome!\")\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n            }\n        }\n\n        ScrollView {\n            clip: true\n            height: 200\n            width: 1024 - 40\n            ScrollBar.vertical.policy: ScrollBar.AlwaysOn\n            ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n\n            MyTextArea {\n                id: welcome\n                width: 1024 - 40\n                textFormat: TextEdit.MarkdownText\n                text: qsTr(\"### Release Notes\\n%1<br/>\\n### Contributors\\n%2\").arg(Download.releaseInfo.notes).arg(Download.releaseInfo.contributors)\n                focus: false\n                readOnly: true\n                Accessible.role: Accessible.Paragraph\n                Accessible.name: qsTr(\"Release notes\")\n                Accessible.description: qsTr(\"Release notes for this version\")\n            }\n        }\n\n        ScrollView {\n            clip: true\n            height: 150\n            width: 1024 - 40\n            ScrollBar.vertical.policy: ScrollBar.AlwaysOn\n            ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n\n            MyTextArea {\n                id: optInTerms\n                width: 1024 - 40\n                textFormat: TextEdit.MarkdownText\n                text: qsTr(\n\"### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!\")\n\n                focus: false\n                readOnly: true\n                Accessible.role: Accessible.Paragraph\n                Accessible.name: qsTr(\"Terms for opt-in\")\n                Accessible.description: qsTr(\"Describes what will happen when you opt-in\")\n            }\n        }\n\n        GridLayout {\n            columns: 2\n            rowSpacing: 10\n            columnSpacing: 10\n            anchors.right: parent.right\n            Label {\n                id: optInStatistics\n                text: qsTr(\"Opt-in to anonymous usage analytics used to improve GPT4All\")\n                Layout.row: 0\n                Layout.column: 0\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Accessible.role: Accessible.Paragraph\n                Accessible.name: qsTr(\"Opt-in for anonymous usage statistics\")\n            }\n\n            ButtonGroup {\n                buttons: optInStatisticsRadio.children\n                onClicked: {\n                    MySettings.networkUsageStatsActive = optInStatisticsRadio.checked\n                    if (optInNetworkRadio.choiceMade && optInStatisticsRadio.choiceMade)\n                        startupDialog.close();\n                }\n            }\n\n            RowLayout {\n                id: optInStatisticsRadio\n                Layout.alignment: Qt.AlignVCenter\n                Layout.row: 0\n                Layout.column: 1\n                property alias checked: optInStatisticsRadioYes.checked\n                property bool choiceMade: optInStatisticsRadioYes.checked || optInStatisticsRadioNo.checked\n\n                RadioButton {\n                    id: optInStatisticsRadioYes\n                    checked: MySettings.networkUsageStatsActive\n                    text: qsTr(\"Yes\")\n                    font.pixelSize: theme.fontSizeLarge\n                    Accessible.role: Accessible.RadioButton\n                    Accessible.name: qsTr(\"Opt-in for anonymous usage statistics\")\n                    Accessible.description: qsTr(\"Allow opt-in for anonymous usage statistics\")\n\n                    background: Rectangle {\n                        color: \"transparent\"\n                    }\n\n                    indicator: Rectangle {\n                        implicitWidth: 26\n                        implicitHeight: 26\n                        x: optInStatisticsRadioYes.leftPadding\n                        y: parent.height / 2 - height / 2\n                        radius: 13\n                        border.color: theme.dialogBorder\n                        color: \"transparent\"\n\n                        Rectangle {\n                            width: 14\n                            height: 14\n                            x: 6\n                            y: 6\n                            radius: 7\n                            color: theme.textColor\n                            visible: optInStatisticsRadioYes.checked\n                        }\n                    }\n\n                    contentItem: Text {\n                        text: optInStatisticsRadioYes.text\n                        font: optInStatisticsRadioYes.font\n                        opacity: enabled ? 1.0 : 0.3\n                        color: theme.textColor\n                        verticalAlignment: Text.AlignVCenter\n                        leftPadding: optInStatisticsRadioYes.indicator.width + optInStatisticsRadioYes.spacing\n                    }\n                }\n                RadioButton {\n                    id: optInStatisticsRadioNo\n                    checked: MySettings.isNetworkUsageStatsActiveSet() && !MySettings.networkUsageStatsActive\n                    text: qsTr(\"No\")\n                    font.pixelSize: theme.fontSizeLarge\n                    Accessible.role: Accessible.RadioButton\n                    Accessible.name: qsTr(\"Opt-out for anonymous usage statistics\")\n                    Accessible.description: qsTr(\"Allow opt-out for anonymous usage statistics\")\n\n                    background: Rectangle {\n                        color: \"transparent\"\n                    }\n\n                    indicator: Rectangle {\n                        implicitWidth: 26\n                        implicitHeight: 26\n                        x: optInStatisticsRadioNo.leftPadding\n                        y: parent.height / 2 - height / 2\n                        radius: 13\n                        border.color: theme.dialogBorder\n                        color: \"transparent\"\n\n                        Rectangle {\n                            width: 14\n                            height: 14\n                            x: 6\n                            y: 6\n                            radius: 7\n                            color: theme.textColor\n                            visible: optInStatisticsRadioNo.checked\n                        }\n                    }\n\n                    contentItem: Text {\n                        text: optInStatisticsRadioNo.text\n                        font: optInStatisticsRadioNo.font\n                        opacity: enabled ? 1.0 : 0.3\n                        color: theme.textColor\n                        verticalAlignment: Text.AlignVCenter\n                        leftPadding: optInStatisticsRadioNo.indicator.width + optInStatisticsRadioNo.spacing\n                    }\n                }\n            }\n\n            Label {\n                id: optInNetwork\n                text: qsTr(\"Opt-in to anonymous sharing of chats to the GPT4All Datalake\")\n                Layout.row: 1\n                Layout.column: 0\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n                Accessible.role: Accessible.Paragraph\n                Accessible.name: qsTr(\"Opt-in for network\")\n                Accessible.description: qsTr(\"Allow opt-in for network\")\n            }\n\n            ButtonGroup {\n                buttons: optInNetworkRadio.children\n                onClicked: {\n                    MySettings.networkIsActive = optInNetworkRadio.checked\n                    if (optInNetworkRadio.choiceMade && optInStatisticsRadio.choiceMade)\n                        startupDialog.close();\n                }\n            }\n\n            RowLayout {\n                id: optInNetworkRadio\n                Layout.alignment: Qt.AlignVCenter\n                Layout.row: 1\n                Layout.column: 1\n                property alias checked: optInNetworkRadioYes.checked\n                property bool choiceMade: optInNetworkRadioYes.checked || optInNetworkRadioNo.checked\n\n                RadioButton {\n                    id: optInNetworkRadioYes\n                    checked: MySettings.networkIsActive\n                    text: qsTr(\"Yes\")\n                    font.pixelSize: theme.fontSizeLarge\n                    Accessible.role: Accessible.RadioButton\n                    Accessible.name: qsTr(\"Opt-in for network\")\n                    Accessible.description: qsTr(\"Allow opt-in anonymous sharing of chats to the GPT4All Datalake\")\n\n                    background: Rectangle {\n                        color: \"transparent\"\n                    }\n\n                    indicator: Rectangle {\n                        implicitWidth: 26\n                        implicitHeight: 26\n                        x: optInNetworkRadioYes.leftPadding\n                        y: parent.height / 2 - height / 2\n                        radius: 13\n                        border.color: theme.dialogBorder\n                        color: \"transparent\"\n\n                        Rectangle {\n                            width: 14\n                            height: 14\n                            x: 6\n                            y: 6\n                            radius: 7\n                            color: theme.textColor\n                            visible: optInNetworkRadioYes.checked\n                        }\n                    }\n\n                    contentItem: Text {\n                        text: optInNetworkRadioYes.text\n                        font: optInNetworkRadioYes.font\n                        opacity: enabled ? 1.0 : 0.3\n                        color: theme.textColor\n                        verticalAlignment: Text.AlignVCenter\n                        leftPadding: optInNetworkRadioYes.indicator.width + optInNetworkRadioYes.spacing\n                    }\n                }\n                RadioButton {\n                    id: optInNetworkRadioNo\n                    checked: MySettings.isNetworkIsActiveSet() && !MySettings.networkIsActive\n                    text: qsTr(\"No\")\n                    font.pixelSize: theme.fontSizeLarge\n                    Accessible.role: Accessible.RadioButton\n                    Accessible.name: qsTr(\"Opt-out for network\")\n                    Accessible.description: qsTr(\"Allow opt-out anonymous sharing of chats to the GPT4All Datalake\")\n\n                    background: Rectangle {\n                        color: \"transparent\"\n                    }\n\n                    indicator: Rectangle {\n                        implicitWidth: 26\n                        implicitHeight: 26\n                        x: optInNetworkRadioNo.leftPadding\n                        y: parent.height / 2 - height / 2\n                        radius: 13\n                        border.color: theme.dialogBorder\n                        color: \"transparent\"\n\n                        Rectangle {\n                            width: 14\n                            height: 14\n                            x: 6\n                            y: 6\n                            radius: 7\n                            color: theme.textColor\n                            visible: optInNetworkRadioNo.checked\n                        }\n                    }\n\n                    contentItem: Text {\n                        text: optInNetworkRadioNo.text\n                        font: optInNetworkRadioNo.font\n                        opacity: enabled ? 1.0 : 0.3\n                        color: theme.textColor\n                        verticalAlignment: Text.AlignVCenter\n                        leftPadding: optInNetworkRadioNo.indicator.width + optInNetworkRadioNo.spacing\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/Theme.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls.Basic\nimport mysettings\nimport mysettingsenums\n\nQtObject {\n    // black and white\n    property color black: Qt.hsla(231/360, 0.15, 0.19)\n    property color white: Qt.hsla(0, 0, 1)\n\n    // dark mode black and white\n    property color darkwhite: Qt.hsla(0, 0, 0.85)\n\n    // gray // FIXME: These are slightly less red than what atlas uses. should resolve diff\n    property color gray0: white\n    property color gray50: Qt.hsla(25/360, 0.05, 0.97)\n    property color gray100: Qt.hsla(25/360,0.05, 0.95)\n    property color gray200: Qt.hsla(25/360, 0.05, 0.89)\n    property color gray300: Qt.hsla(25/360, 0.05, 0.82)\n    property color gray400: Qt.hsla(25/360, 0.05, 0.71)\n    property color gray500: Qt.hsla(25/360, 0.05, 0.60)\n    property color gray600: Qt.hsla(25/360, 0.05, 0.51)\n    property color gray700: Qt.hsla(25/360, 0.05, 0.42)\n    property color gray800: Qt.hsla(25/360, 0.05, 0.35)\n    property color gray900: Qt.hsla(25/360, 0.05, 0.31)\n    property color gray950: Qt.hsla(25/360, 0.05, 0.15)\n\n    property color grayRed0: Qt.hsla(0/360, 0.108, 0.89)\n    property color grayRed50: Qt.hsla(0/360, 0.108, 0.85)\n    property color grayRed100: Qt.hsla(0/360, 0.108, 0.80)\n    property color grayRed200: Qt.hsla(0/360, 0.108, 0.76)\n    property color grayRed300: Qt.hsla(0/360, 0.108, 0.72)\n    property color grayRed400: Qt.hsla(0/360, 0.108, 0.68)\n    property color grayRed500: Qt.hsla(0/360, 0.108, 0.60)\n    property color grayRed600: Qt.hsla(0/360, 0.108, 0.56)\n    property color grayRed700: Qt.hsla(0/360, 0.108, 0.52)\n    property color grayRed800: Qt.hsla(0/360, 0.108, 0.48)\n    property color grayRed900: Qt.hsla(0/360, 0.108, 0.42)\n\n    // darkmode\n    property color darkgray0: Qt.hsla(25/360, 0.05, 0.23)\n    property color darkgray50: Qt.hsla(25/360, 0.05, 0.21)\n    property color darkgray100: Qt.hsla(25/360, 0.05, 0.19)\n    property color darkgray200: Qt.hsla(25/360, 0.05, 0.17)\n    property color darkgray300: Qt.hsla(25/360, 0.05, 0.15)\n    property color darkgray400: Qt.hsla(25/360, 0.05, 0.13)\n    property color darkgray500: Qt.hsla(25/360, 0.05, 0.11)\n    property color darkgray600: Qt.hsla(25/360, 0.05, 0.09)\n    property color darkgray700: Qt.hsla(25/360, 0.05, 0.07)\n    property color darkgray800: Qt.hsla(25/360, 0.05, 0.05)\n    property color darkgray900: Qt.hsla(25/360, 0.05, 0.03)\n    property color darkgray950: Qt.hsla(25/360, 0.05, 0.01)\n\n    // green\n    property color green50: Qt.hsla(120/360, 0.18, 0.97)\n    property color green100: Qt.hsla(120/360, 0.21, 0.93)\n    property color green200: Qt.hsla(124/360, 0.21, 0.85)\n    property color green300: Qt.hsla(122/360, 0.20, 0.73)\n    property color green400: Qt.hsla(122/360, 0.19, 0.58)\n    property color green500: Qt.hsla(121/360, 0.19, 0.45)\n    property color green600: Qt.hsla(122/360, 0.20, 0.33)\n    property color green700: Qt.hsla(122/360, 0.19, 0.29)\n    property color green800: Qt.hsla(123/360, 0.17, 0.24)\n    property color green900: Qt.hsla(124/360, 0.17, 0.20)\n    property color green950: Qt.hsla(125/360, 0.22, 0.10)\n    property color green300_sat: Qt.hsla(122/360, 0.24, 0.73)\n    property color green400_sat: Qt.hsla(122/360, 0.23, 0.58)\n    property color green450_sat: Qt.hsla(122/360, 0.23, 0.52)\n\n    // yellow\n    property color yellow0: Qt.hsla(47/360, 0.90, 0.99)\n    property color yellow25: Qt.hsla(47/360, 0.90, 0.98)\n    property color yellow50: Qt.hsla(47/360, 0.90, 0.96)\n    property color yellow100: Qt.hsla(46/360, 0.89, 0.89)\n    property color yellow200: Qt.hsla(45/360, 0.90, 0.77)\n    property color yellow300: Qt.hsla(44/360, 0.90, 0.66)\n    property color yellow400: Qt.hsla(41/360, 0.89, 0.56)\n    property color yellow500: Qt.hsla(36/360, 0.85, 0.50)\n    property color yellow600: Qt.hsla(30/360, 0.87, 0.44)\n    property color yellow700: Qt.hsla(24/360, 0.84, 0.37)\n    property color yellow800: Qt.hsla(21/360, 0.76, 0.31)\n    property color yellow900: Qt.hsla(20/360, 0.72, 0.26)\n    property color yellow950: Qt.hsla(19/360, 0.86, 0.14)\n\n    // red\n    property color red50: Qt.hsla(0, 0.71, 0.97)\n    property color red100: Qt.hsla(0, 0.87, 0.94)\n    property color red200: Qt.hsla(0, 0.89, 0.89)\n    property color red300: Qt.hsla(0, 0.85, 0.77)\n    property color red400: Qt.hsla(0, 0.83, 0.71)\n    property color red500: Qt.hsla(0, 0.76, 0.60)\n    property color red600: Qt.hsla(0, 0.65, 0.51)\n    property color red700: Qt.hsla(0, 0.67, 0.42)\n    property color red800: Qt.hsla(0, 0.63, 0.35)\n    property color red900: Qt.hsla(0, 0.56, 0.31)\n    property color red950: Qt.hsla(0, 0.67, 0.15)\n\n    // purple // FIXME: These are slightly more uniform than what atlas uses. should resolve diff\n    property color purple50: Qt.hsla(279/360, 1.0, 0.98)\n    property color purple100: Qt.hsla(279/360, 1.0, 0.95)\n    property color purple200: Qt.hsla(279/360, 1.0, 0.91)\n    property color purple300: Qt.hsla(279/360, 1.0, 0.84)\n    property color purple400: Qt.hsla(279/360, 1.0, 0.73)\n    property color purple450: Qt.hsla(279/360, 1.0, 0.68)\n    property color purple500: Qt.hsla(279/360, 1.0, 0.63)\n    property color purple600: Qt.hsla(279/360, 1.0, 0.53)\n    property color purple700: Qt.hsla(279/360, 1.0, 0.47)\n    property color purple800: Qt.hsla(279/360, 1.0, 0.39)\n    property color purple900: Qt.hsla(279/360, 1.0, 0.32)\n    property color purple950: Qt.hsla(279/360, 1.0, 0.22)\n\n    property color blue0: \"#d0d5db\"\n    property color blue100: \"#8e8ea0\"\n    property color blue200: \"#7d7d8e\"\n    property color blue400: \"#444654\"\n    property color blue500: \"#343541\"\n    property color blue600: \"#2c2d37\"\n    property color blue700: \"#26272f\"\n    property color blue800: \"#232628\"\n    property color blue900: \"#222527\"\n    property color blue950: \"#1c1f21\"\n    property color blue1000: \"#0e1011\"\n\n    property color accentColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue200\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow300\n            default:\n                return yellow300\n        }\n    }\n\n /*\n  These nolonger apply to anything (remove this?)\n  Replaced by menuHighlightColor & menuBackgroundColor now using different colors.\n\n    property color darkContrast: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray300\n            default:\n                return gray100\n        }\n    }\n\n    property color lightContrast: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue400\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray0\n            default:\n                return gray0\n        }\n    }\n*/\n    property color controlBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue800\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray0\n            default:\n                return gray300\n        }\n    }\n\n    property color controlBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray300\n            default:\n                return gray100\n        }\n    }\n\n    property color attachmentBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue900\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return gray0\n        }\n    }\n\n    property color disabledControlBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return gray200\n        }\n    }\n\n    property color dividerColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return grayRed0\n        }\n    }\n\n    property color conversationDivider: {\n        return dividerColor\n    }\n\n    property color settingsDivider: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return dividerColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray400\n            default:\n                return grayRed500\n        }\n    }\n\n    property color viewBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue600\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray100\n            default:\n                return gray50\n        }\n    }\n/*\n  These nolonger apply to anything (remove this?)\n\n    property color containerForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray300\n            default:\n                return gray300\n        }\n    }\n*/\n    property color containerBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue900\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return gray100\n        }\n    }\n\n    property color viewBarBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray400\n            default:\n                return gray100\n        }\n    }\n\n    property color progressForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return accentColor\n            default:\n                return green600\n        }\n    }\n\n    property color progressBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue900\n            case MySettingsEnums.ChatTheme.Dark:\n                return green600\n            default:\n                return green100\n        }\n    }\n\n    property color altProgressForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return progressForeground\n            default:\n                return \"#fcf0c9\"\n        }\n    }\n\n    property color altProgressBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return progressBackground\n            default:\n                return \"#fff9d2\"\n        }\n    }\n\n    property color altProgressText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            default:\n                return \"#d16f0e\"\n        }\n    }\n\n    property color checkboxBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray200\n            default:\n                return gray600\n        }\n    }\n\n    property color checkboxForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return green300\n            default:\n                return green600\n        }\n    }\n\n    property color buttonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue950\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray300\n            default:\n                return green600\n        }\n    }\n\n    property color buttonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue900\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray400\n            default:\n                return green500\n        }\n    }\n\n    property color lightButtonText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return green600\n        }\n    }\n\n    property color lightButtonMutedText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return mutedTextColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return mutedTextColor\n            default:\n                return green300\n        }\n    }\n\n    property color lightButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackground\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackground\n            default:\n                return green100\n        }\n    }\n\n    property color lightButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackgroundHovered\n            default:\n                return green200\n        }\n    }\n\n    property color mediumButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return green400_sat\n            default:\n                return green400_sat\n        }\n    }\n\n    property color mediumButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple450\n            case MySettingsEnums.ChatTheme.Dark:\n                return green450_sat\n            default:\n                return green300_sat\n        }\n    }\n\n    property color mediumButtonText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return white\n        }\n    }\n\n    property color darkButtonText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return red600\n        }\n    }\n\n    property color darkButtonMutedText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return mutedTextColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return mutedTextColor\n            default:\n                return red300\n        }\n    }\n\n    property color darkButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackground\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackground\n            default:\n                return red200\n        }\n    }\n\n    property color darkButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackgroundHovered\n            default:\n                return red300\n        }\n    }\n\n    property color lighterButtonForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return green600\n        }\n    }\n\n    property color lighterButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackground\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackground\n            default:\n                return green100\n        }\n    }\n\n    property color lighterButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackgroundHovered\n            default:\n                return green50\n        }\n    }\n\n    property color lighterButtonBackgroundHoveredRed: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackgroundHovered\n            default:\n                return red50\n        }\n    }\n\n    property color sourcesBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return lighterButtonBackground\n            case MySettingsEnums.ChatTheme.Dark:\n                return lighterButtonBackground\n            default:\n                return gray100\n        }\n    }\n\n    property color sourcesBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return lighterButtonBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return lighterButtonBackgroundHovered\n            default:\n                return gray200\n        }\n    }\n\n    property color buttonBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return controlBorder\n            default:\n                return yellow200\n        }\n    }\n\n    property color conversationInputButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return accentColor\n            default:\n                return black\n        }\n    }\n\n    property color conversationInputButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue0\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkwhite\n            default:\n                return accentColor\n        }\n    }\n\n    property color selectedBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue700\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return gray0\n        }\n    }\n\n    property color conversationButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue500\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray100\n            default:\n                return gray0\n        }\n    }\n   property color conversationBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue500\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray50\n            default:\n                return white\n        }\n    }\n\n    property color conversationProgress: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return green400\n            default:\n                return green400\n        }\n    }\n\n    property color conversationButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue400\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray0\n            default:\n                return gray100\n        }\n    }\n\n    property color conversationButtonBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow200\n            default:\n                return yellow200\n        }\n    }\n\n    property color conversationHeader: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return green400\n            default:\n                return green500\n        }\n    }\n\n    property color collectionsButtonText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return black\n            case MySettingsEnums.ChatTheme.Dark:\n                return black\n            default:\n                return white\n        }\n    }\n\n    property color collectionsButtonProgress: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray400\n            default:\n                return green400\n        }\n    }\n\n    property color collectionsButtonForeground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return green300\n            default:\n                return green600\n        }\n    }\n\n    property color collectionsButtonBackground: {\n        switch (MySettings.chatTheme) {\n            default:\n                return lighterButtonBackground\n        }\n    }\n\n    property color collectionsButtonBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            default:\n                return lighterButtonBackgroundHovered\n        }\n    }\n\n    property color welcomeButtonBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBackground\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBackground\n            default:\n                return lighterButtonBackground\n        }\n    }\n\n    property color welcomeButtonBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return buttonBorder\n            case MySettingsEnums.ChatTheme.Dark:\n                return buttonBorder\n            default:\n                return green300\n        }\n    }\n\n    property color welcomeButtonBorderHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple200\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray100\n            default:\n                return green400\n        }\n    }\n\n    property color welcomeButtonText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return green700\n        }\n    }\n\n    property color welcomeButtonTextHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple200\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray400\n            default:\n                return green800\n        }\n    }\n\n    property color fancyLinkText: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return textColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return textColor\n            default:\n                return grayRed900\n        }\n    }\n\n    property color fancyLinkTextHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return mutedTextColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return mutedTextColor\n            default:\n                return textColor\n        }\n    }\n\n    property color iconBackgroundDark: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue200\n            case MySettingsEnums.ChatTheme.Dark:\n                return green400\n            default:\n                return black\n        }\n    }\n\n    property color iconBackgroundLight: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue200\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkwhite\n            default:\n                return gray500\n        }\n    }\n\n    property color iconBackgroundHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue0\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray400\n            default:\n                return accentColor\n        }\n    }\n\n    property color iconBackgroundViewBar: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return iconBackgroundLight\n            case MySettingsEnums.ChatTheme.Dark:\n                return iconBackgroundLight\n            default:\n                return green500\n        }\n    }\n\n    property color iconBackgroundViewBarToggled: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return iconBackgroundLight\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray50\n            default:\n                return green200\n        }\n    }\n\n    property color iconBackgroundViewBarHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return iconBackgroundHovered\n            case MySettingsEnums.ChatTheme.Dark:\n                return iconBackgroundHovered\n            default:\n                return green600\n        }\n    }\n\n    property color slugBackground: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue600\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray300\n            default:\n                return gray100\n        }\n    }\n\n    property color textColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue0\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkwhite\n            default:\n                return black\n        }\n    }\n\n    // lighter contrast\n    property color mutedLighterTextColor: {\n        switch (MySettings.chatTheme) {\n            default:\n                return gray300\n        }\n    }\n\n    // light contrast\n    property color mutedLightTextColor: {\n        switch (MySettings.chatTheme) {\n            default:\n                return gray400\n        }\n    }\n\n    // normal contrast\n    property color mutedTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue200\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray400\n            default:\n                return gray500\n        }\n    }\n\n    // dark contrast\n    property color mutedDarkTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return mutedTextColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return mutedTextColor\n            default:\n                return grayRed500\n        }\n    }\n\n    // dark contrast hovered\n    property color mutedDarkTextColorHovered: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue400\n            default:\n                return grayRed900\n        }\n    }\n\n    property color oppositeTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return white\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkwhite\n            default:\n                return white\n        }\n    }\n\n    property color oppositeMutedTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return white\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkwhite\n            default:\n                return white\n        }\n    }\n\n    property color textAccent: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return accentColor\n            default:\n                return accentColor\n        }\n    }\n\n    readonly property color textErrorColor:   red400\n    readonly property color textWarningColor: yellow400\n\n    property color settingsTitleTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue100\n            case MySettingsEnums.ChatTheme.Dark:\n                return green200\n            default:\n                return black\n        }\n    }\n\n    property color titleTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return green300\n            default:\n                return green700\n        }\n    }\n\n    property color titleTextColor2: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return darkwhite\n            case MySettingsEnums.ChatTheme.Dark:\n                return green200\n            default:\n                return green700\n        }\n    }\n\n    property color titleInfoTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue200\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray400\n            default:\n                return gray600\n        }\n    }\n\n    property color styledTextColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple100\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow25\n            default:\n                return grayRed900\n        }\n    }\n\n    property color styledTextColorLighter: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple50\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow0\n            default:\n                return grayRed400\n        }\n    }\n\n    property color styledTextColor2: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue0\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow50\n            default:\n                return green500\n        }\n    }\n\n    property color chatDrawerSectionHeader: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple50\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow0\n            default:\n                return grayRed800\n        }\n    }\n\n    property color dialogBorder: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return accentColor\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray0\n            default:\n                return darkgray0\n        }\n    }\n\n    property color linkColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return yellow600\n            case MySettingsEnums.ChatTheme.Dark:\n                return yellow600\n            default:\n                return yellow600\n        }\n    }\n\n    property color mainHeader: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue900\n            case MySettingsEnums.ChatTheme.Dark:\n                return green600\n            default:\n                return green600\n        }\n    }\n\n    property color mainComboBackground: {\n        switch (MySettings.chatTheme) {\n            default:\n                return \"transparent\"\n        }\n    }\n\n    property color sendGlow: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue1000\n            case MySettingsEnums.ChatTheme.Dark:\n                return green950\n            default:\n                return green300\n        }\n    }\n\n    property color userColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue800\n            case MySettingsEnums.ChatTheme.Dark:\n                return green700\n            default:\n                return green700\n        }\n    }\n\n    property color assistantColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return purple400\n            case MySettingsEnums.ChatTheme.Dark:\n                return accentColor\n            default:\n                return accentColor\n        }\n    }\n\n    property color codeDefaultColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n            default:\n                return textColor\n        }\n    }\n\n    property color codeKeywordColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return \"#2e95d3\" // blue\n            default:\n                return \"#195273\" // dark blue\n        }\n    }\n\n    property color codeFunctionColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return\"#f22c3d\" // red\n            default:\n                return\"#7d1721\" // dark red\n        }\n    }\n\n    property color codeFunctionCallColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return \"#e9950c\" // orange\n            default:\n                return \"#815207\" // dark orange\n        }\n    }\n\n    property color codeCommentColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return \"#808080\" // gray\n            default:\n                return \"#474747\" // dark gray\n        }\n    }\n\n    property color codeStringColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return \"#00a37d\" // green\n            default:\n                return \"#004a39\" // dark green\n        }\n    }\n\n    property color codeNumberColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return \"#df3079\" // fuchsia\n            default:\n                return \"#761942\" // dark fuchsia\n        }\n    }\n\n    property color codeHeaderColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return containerBackground\n            default:\n                return green50\n        }\n    }\n\n    property color codeBackgroundColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return controlBackground\n            default:\n                return gray100\n        }\n    }\n\n    property color chatNameEditBgColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n            case MySettingsEnums.ChatTheme.Dark:\n                return controlBackground\n            default:\n                return gray100\n        }\n    }\n\n    property color menuBackgroundColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue700\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray200\n            default:\n                return gray50\n        }\n    }\n\n    property color menuHighlightColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue400\n            case MySettingsEnums.ChatTheme.Dark:\n                return darkgray0\n            default:\n                return green100\n        }\n    }\n\n    property color menuBorderColor: {\n        switch (MySettings.chatTheme) {\n            case MySettingsEnums.ChatTheme.LegacyDark:\n                return blue400\n            case MySettingsEnums.ChatTheme.Dark:\n                return gray800\n            default:\n                return gray300\n        }\n    }\n\n    property real fontScale: MySettings.fontSize === MySettingsEnums.FontSize.Small  ? 1 :\n                             MySettings.fontSize === MySettingsEnums.FontSize.Medium ? 1.3 :\n                                                  /* \"Large\" */ 1.8\n\n    property real fontSizeSmallest:     8 * fontScale\n    property real fontSizeSmaller:      9 * fontScale\n    property real fontSizeSmall:       10 * fontScale\n    property real fontSizeMedium:      11 * fontScale\n    property real fontSizeLarge:       12 * fontScale\n    property real fontSizeLarger:      14 * fontScale\n    property real fontSizeLargest:     18 * fontScale\n    property real fontSizeBannerSmall: 24 * fontScale**.8\n    property real fontSizeBanner:      32 * fontScale**.8\n    property real fontSizeBannerLarge: 48 * fontScale**.8\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ThumbsDownDialog.qml",
    "content": "import QtCore\nimport QtQuick\nimport QtQuick.Controls\nimport QtQuick.Controls.Basic\nimport QtQuick.Layouts\nimport download\nimport network\nimport llm\n\nMyDialog {\n    id: thumbsDownDialog\n    modal: true\n    padding: 20\n\n    Theme {\n        id: theme\n    }\n\n    property alias response: thumbsDownNewResponse.text\n\n    Column {\n        anchors.fill: parent\n        spacing: 20\n        Item {\n            width: childrenRect.width\n            height: childrenRect.height\n            Image {\n                id: img\n                anchors.top: parent.top\n                anchors.left: parent.left\n                width: 60\n                height: 60\n                source: \"qrc:/gpt4all/icons/thumbs_down.svg\"\n            }\n            Text {\n                anchors.left: img.right\n                anchors.leftMargin: 30\n                anchors.verticalCenter: img.verticalCenter\n                text: qsTr(\"Please edit the text below to provide a better response. (optional)\")\n                color: theme.textColor\n                font.pixelSize: theme.fontSizeLarge\n            }\n        }\n\n        ScrollView {\n            clip: true\n            height: 120\n            width: parent.width\n            ScrollBar.vertical.policy: ScrollBar.AsNeeded\n            ScrollBar.horizontal.policy: ScrollBar.AlwaysOff\n\n            MyTextArea {\n                id: thumbsDownNewResponse\n                placeholderText: qsTr(\"Please provide a better response...\")\n            }\n        }\n    }\n\n    footer: DialogButtonBox {\n        padding: 20\n        alignment: Qt.AlignRight\n        spacing: 10\n        MySettingsButton {\n            text: qsTr(\"Submit\")\n            Accessible.description: qsTr(\"Submits the user's response\")\n            DialogButtonBox.buttonRole: DialogButtonBox.AcceptRole\n        }\n        MySettingsButton {\n            text: qsTr(\"Cancel\")\n            Accessible.description: qsTr(\"Closes the response dialog\")\n            DialogButtonBox.buttonRole: DialogButtonBox.RejectRole\n        }\n        background: Rectangle {\n            color: \"transparent\"\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/Toast.qml",
    "content": "/*\n * SPDX-License-Identifier: MIT\n * Source: https://gist.github.com/jonmcclung/bae669101d17b103e94790341301c129\n * Adapted from StackOverflow: http://stackoverflow.com/questions/26879266/make-toast-in-android-by-qml\n */\n\nimport QtQuick 2.0\n\n/**\n  * @brief An Android-like timed message text in a box that self-destroys when finished if desired\n  */\nRectangle {\n    /**\n      * Public\n      */\n\n    /**\n      * @brief Shows this Toast\n      *\n      * @param {string} text Text to show\n      * @param {real} duration Duration to show in milliseconds, defaults to 3000\n      */\n    function show(text, duration=3000) {\n        message.text = text;\n        if (typeof duration !== \"undefined\") { // checks if parameter was passed\n            time = Math.max(duration, 2 * fadeTime);\n        }\n        else {\n            time = defaultTime;\n        }\n        animation.start();\n    }\n\n    property bool selfDestroying: false  // whether this Toast will self-destroy when it is finished\n\n    /**\n      * Private\n      */\n\n    id: root\n\n    readonly property real defaultTime: 3000\n    property real time: defaultTime\n    readonly property real fadeTime: 300\n\n    property real margin: 10\n\n    anchors {\n        left: parent.left\n        right: parent.right\n        margins: margin\n    }\n\n    height: message.height + margin\n    radius: margin\n\n    opacity: 0\n    color: \"#222222\"\n\n    Text {\n        id: message\n        color: \"white\"\n        wrapMode: Text.Wrap\n        horizontalAlignment: Text.AlignHCenter\n        anchors {\n            top: parent.top\n            left: parent.left\n            right: parent.right\n            margins: margin / 2\n        }\n    }\n\n    SequentialAnimation on opacity {\n        id: animation\n        running: false\n\n\n        NumberAnimation {\n            to: .9\n            duration: fadeTime\n        }\n\n        PauseAnimation {\n            duration: time - 2 * fadeTime\n        }\n\n        NumberAnimation {\n            to: 0\n            duration: fadeTime\n        }\n\n        onRunningChanged: {\n            if (!running && selfDestroying) {\n                root.destroy();\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/qml/ToastManager.qml",
    "content": "/*\n * SPDX-License-Identifier: MIT\n * Source: https://gist.github.com/jonmcclung/bae669101d17b103e94790341301c129\n * Adapted from StackOverflow: http://stackoverflow.com/questions/26879266/make-toast-in-android-by-qml\n */\n\nimport QtQuick 2.0\n\n/**\n  * @brief Manager that creates Toasts dynamically\n  */\nListView {\n    /**\n      * Public\n      */\n\n    /**\n      * @brief Shows a Toast\n      *\n      * @param {string} text Text to show\n      * @param {real} duration Duration to show in milliseconds, defaults to 3000\n      */\n    function show(text, duration=3000) {\n        model.insert(0, {text: text, duration: duration});\n    }\n\n    /**\n      * Private\n      */\n\n    id: root\n\n    z: Infinity\n    spacing: 5\n    anchors.fill: parent\n    anchors.bottomMargin: 10\n    verticalLayoutDirection: ListView.BottomToTop\n\n    interactive: false\n\n    displaced: Transition {\n        NumberAnimation {\n            properties: \"y\"\n            easing.type: Easing.InOutQuad\n        }\n    }\n    \n    delegate: Toast {\n        Component.onCompleted: {\n            if (typeof duration === \"undefined\") {\n                show(text);\n            }\n            else {\n                show(text, duration);\n            }\n        }\n    }\n\n    model: ListModel {id: model}\n}\n"
  },
  {
    "path": "gpt4all-chat/resources/gpt4all.rc",
    "content": "IDI_ICON1 ICON \"gpt4all.ico\"\n"
  },
  {
    "path": "gpt4all-chat/src/chat.cpp",
    "content": "#include \"chat.h\"\n\n#include \"chatlistmodel.h\"\n#include \"network.h\"\n#include \"server.h\"\n#include \"tool.h\"\n#include \"toolcallparser.h\"\n#include \"toolmodel.h\"\n\n#include <QByteArray>\n#include <QDataStream>\n#include <QDebug>\n#include <QFile>\n#include <QFileInfo>\n#include <QIODevice>\n#include <QLatin1String>\n#include <QMap>\n#include <QRegularExpression>\n#include <QString>\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n\n#include <optional>\n#include <utility>\n\nusing namespace ToolEnums;\n\n\nChat::Chat(QObject *parent)\n    : QObject(parent)\n    , m_id(Network::globalInstance()->generateUniqueId())\n    , m_name(tr(\"New Chat\"))\n    , m_chatModel(new ChatModel(this))\n    , m_responseState(Chat::ResponseStopped)\n    , m_creationDate(QDateTime::currentSecsSinceEpoch())\n    , m_llmodel(new ChatLLM(this))\n    , m_collectionModel(new LocalDocsCollectionsModel(this))\n{\n    connectLLM();\n}\n\nChat::Chat(server_tag_t, QObject *parent)\n    : QObject(parent)\n    , m_id(Network::globalInstance()->generateUniqueId())\n    , m_name(tr(\"Server Chat\"))\n    , m_chatModel(new ChatModel(this))\n    , m_responseState(Chat::ResponseStopped)\n    , m_creationDate(QDateTime::currentSecsSinceEpoch())\n    , m_llmodel(new Server(this))\n    , m_isServer(true)\n    , m_collectionModel(new LocalDocsCollectionsModel(this))\n{\n    connectLLM();\n}\n\nChat::~Chat()\n{\n    delete m_llmodel;\n    m_llmodel = nullptr;\n}\n\nvoid Chat::connectLLM()\n{\n    // Should be in different threads\n    connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::handleModelLoadingError, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::modelLoadingWarning, this, &Chat::modelLoadingWarning, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::generatedQuestionFinished, this, &Chat::generatedQuestionFinished, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::loadedModelInfoChanged, this, &Chat::loadedModelInfoChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::databaseResultsChanged, this, &Chat::handleDatabaseResultsChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::modelInfoChanged, this, &Chat::handleModelChanged, Qt::QueuedConnection);\n    connect(m_llmodel, &ChatLLM::trySwitchContextOfLoadedModelCompleted, this, &Chat::handleTrySwitchContextOfLoadedModelCompleted, Qt::QueuedConnection);\n\n    connect(this, &Chat::promptRequested, m_llmodel, &ChatLLM::prompt, Qt::QueuedConnection);\n    connect(this, &Chat::modelChangeRequested, m_llmodel, &ChatLLM::modelChangeRequested, Qt::QueuedConnection);\n    connect(this, &Chat::loadDefaultModelRequested, m_llmodel, &ChatLLM::loadDefaultModel, Qt::QueuedConnection);\n    connect(this, &Chat::generateNameRequested, m_llmodel, &ChatLLM::generateName, Qt::QueuedConnection);\n    connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::QueuedConnection);\n\n    connect(this, &Chat::collectionListChanged, m_collectionModel, &LocalDocsCollectionsModel::setCollections);\n\n    connect(ModelList::globalInstance(), &ModelList::modelInfoChanged, this, &Chat::handleModelInfoChanged);\n}\n\nvoid Chat::reset()\n{\n    stopGenerating();\n    // Erase our current on disk representation as we're completely resetting the chat along with id\n    ChatListModel::globalInstance()->removeChatFile(this);\n    m_id = Network::globalInstance()->generateUniqueId();\n    emit idChanged(m_id);\n    // NOTE: We deliberately do no reset the name or creation date to indicate that this was originally\n    // an older chat that was reset for another purpose. Resetting this data will lead to the chat\n    // name label changing back to 'New Chat' and showing up in the chat model list as a 'New Chat'\n    // further down in the list. This might surprise the user. In the future, we might get rid of\n    // the \"reset context\" button in the UI.\n    m_chatModel->clear();\n    m_needsSave = true;\n}\n\nvoid Chat::resetResponseState()\n{\n    if (m_responseInProgress && m_responseState == Chat::LocalDocsRetrieval)\n        return;\n\n    m_generatedQuestions = QList<QString>();\n    emit generatedQuestionsChanged();\n    m_tokenSpeed = QString();\n    emit tokenSpeedChanged();\n    m_responseInProgress = true;\n    m_responseState = m_collections.empty() ? Chat::PromptProcessing : Chat::LocalDocsRetrieval;\n    emit responseInProgressChanged();\n    emit responseStateChanged();\n}\n\nvoid Chat::newPromptResponsePair(const QString &prompt, const QList<QUrl> &attachedUrls)\n{\n    QStringList attachedContexts;\n    QList<PromptAttachment> attachments;\n    for (const QUrl &url : attachedUrls) {\n        Q_ASSERT(url.isLocalFile());\n        const QString localFilePath = url.toLocalFile();\n        const QFileInfo info(localFilePath);\n\n        Q_ASSERT(\n            info.suffix().toLower() == \"xlsx\" ||\n            info.suffix().toLower() == \"txt\" ||\n            info.suffix().toLower() == \"md\" ||\n            info.suffix().toLower() == \"rst\"\n        );\n\n        PromptAttachment attached;\n        attached.url = url;\n\n        QFile file(localFilePath);\n        if (file.open(QIODevice::ReadOnly)) {\n            attached.content = file.readAll();\n            file.close();\n        } else {\n            qWarning() << \"ERROR: Failed to open the attachment:\" << localFilePath;\n            continue;\n        }\n\n        attachments << attached;\n        attachedContexts << attached.processedContent();\n    }\n\n    QString promptPlusAttached = prompt;\n    if (!attachedContexts.isEmpty())\n        promptPlusAttached = attachedContexts.join(\"\\n\\n\") + \"\\n\\n\" + prompt;\n\n    resetResponseState();\n    if (int count = m_chatModel->count())\n        m_chatModel->updateCurrentResponse(count - 1, false);\n    m_chatModel->appendPrompt(prompt, attachments);\n    m_chatModel->appendResponse();\n\n    emit promptRequested(m_collections);\n    m_needsSave = true;\n}\n\nvoid Chat::regenerateResponse(int index)\n{\n    resetResponseState();\n    emit regenerateResponseRequested(index);\n    m_needsSave = true;\n}\n\nQVariant Chat::popPrompt(int index)\n{\n    auto content = m_llmodel->popPrompt(index);\n    m_needsSave = true;\n    if (content) return *content;\n    return QVariant::fromValue(nullptr);\n}\n\nvoid Chat::stopGenerating()\n{\n    // In future if we have more than one tool we'll have to keep track of which tools are possibly\n    // running, but for now we only have one\n    Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);\n    Q_ASSERT(toolInstance);\n    toolInstance->interrupt();\n    m_llmodel->stopGenerating();\n}\n\nChat::ResponseState Chat::responseState() const\n{\n    return m_responseState;\n}\n\nvoid Chat::handleResponseChanged()\n{\n    if (m_responseState != Chat::ResponseGeneration) {\n        m_responseState = Chat::ResponseGeneration;\n        emit responseStateChanged();\n    }\n}\n\nvoid Chat::handleModelLoadingPercentageChanged(float loadingPercentage)\n{\n    if (m_shouldDeleteLater)\n        deleteLater();\n\n    if (loadingPercentage == m_modelLoadingPercentage)\n        return;\n\n    bool wasLoading = isCurrentlyLoading();\n    bool wasLoaded = isModelLoaded();\n\n    m_modelLoadingPercentage = loadingPercentage;\n    emit modelLoadingPercentageChanged();\n\n    if (isCurrentlyLoading() != wasLoading)\n        emit isCurrentlyLoadingChanged();\n\n    if (isModelLoaded() != wasLoaded)\n        emit isModelLoadedChanged();\n}\n\nvoid Chat::promptProcessing()\n{\n    m_responseState = !databaseResults().isEmpty() ? Chat::LocalDocsProcessing : Chat::PromptProcessing;\n    emit responseStateChanged();\n}\n\nvoid Chat::generatingQuestions()\n{\n    m_responseState = Chat::GeneratingQuestions;\n    emit responseStateChanged();\n}\n\nvoid Chat::responseStopped(qint64 promptResponseMs)\n{\n    m_tokenSpeed = QString();\n    emit tokenSpeedChanged();\n\n    m_responseInProgress = false;\n    m_responseState = Chat::ResponseStopped;\n    emit responseInProgressChanged();\n    emit responseStateChanged();\n\n    const QString possibleToolcall = m_chatModel->possibleToolcall();\n\n    Network::globalInstance()->trackChatEvent(\"response_stopped\", {\n        {\"first\", m_firstResponse},\n        {\"message_count\", chatModel()->count()},\n        {\"$duration\", promptResponseMs / 1000.},\n    });\n\n    ToolCallParser parser;\n    parser.update(possibleToolcall.toUtf8());\n    if (parser.state() == ToolEnums::ParseState::Complete && parser.startTag() != ToolCallConstants::ThinkStartTag)\n        processToolCall(parser.toolCall());\n    else\n        responseComplete();\n}\n\nvoid Chat::processToolCall(const QString &toolCall)\n{\n    m_responseState = Chat::ToolCallGeneration;\n    emit responseStateChanged();\n    // Regex to remove the formatting around the code\n    static const QRegularExpression regex(\"^\\\\s*```javascript\\\\s*|\\\\s*```\\\\s*$\");\n    QString code = toolCall;\n    code.remove(regex);\n    code = code.trimmed();\n\n    // Right now the code interpreter is the only available tool\n    Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);\n    Q_ASSERT(toolInstance);\n    connect(toolInstance, &Tool::runComplete, this, &Chat::toolCallComplete, Qt::SingleShotConnection);\n\n    // The param is the code\n    const ToolParam param = { \"code\", ToolEnums::ParamType::String, code };\n    m_responseInProgress = true;\n    emit responseInProgressChanged();\n    toolInstance->run({param});\n}\n\nvoid Chat::toolCallComplete(const ToolCallInfo &info)\n{\n    // Update the current response with meta information about toolcall and re-parent\n    m_chatModel->updateToolCall(info);\n\n    ++m_consecutiveToolCalls;\n\n    m_responseInProgress = false;\n    emit responseInProgressChanged();\n\n    // We limit the number of consecutive toolcalls otherwise we get into a potentially endless loop\n    if (m_consecutiveToolCalls < 3 || info.error == ToolEnums::Error::NoError) {\n        resetResponseState();\n        emit promptRequested(m_collections); // triggers a new response\n        return;\n    }\n\n    responseComplete();\n}\n\nvoid Chat::responseComplete()\n{\n    if (m_generatedName.isEmpty())\n        emit generateNameRequested();\n\n    m_responseState = Chat::ResponseStopped;\n    emit responseStateChanged();\n\n    m_consecutiveToolCalls = 0;\n    m_firstResponse = false;\n}\n\nModelInfo Chat::modelInfo() const\n{\n    return m_modelInfo;\n}\n\nvoid Chat::setModelInfo(const ModelInfo &modelInfo)\n{\n    if (m_modelInfo != modelInfo) {\n        m_modelInfo = modelInfo;\n        m_needsSave = true;\n    } else if (isModelLoaded())\n        return;\n\n    emit modelInfoChanged();\n    emit modelChangeRequested(modelInfo);\n}\n\nvoid Chat::unloadAndDeleteLater()\n{\n    if (!isModelLoaded()) {\n        deleteLater();\n        return;\n    }\n\n    m_shouldDeleteLater = true;\n    unloadModel();\n}\n\nvoid Chat::markForDeletion()\n{\n    m_llmodel->setMarkedForDeletion(true);\n}\n\nvoid Chat::unloadModel()\n{\n    stopGenerating();\n    m_llmodel->setShouldBeLoaded(false);\n}\n\nvoid Chat::reloadModel()\n{\n    m_llmodel->setShouldBeLoaded(true);\n}\n\nvoid Chat::forceUnloadModel()\n{\n    stopGenerating();\n    m_llmodel->setForceUnloadModel(true);\n    m_llmodel->setShouldBeLoaded(false);\n}\n\nvoid Chat::forceReloadModel()\n{\n    m_llmodel->setForceUnloadModel(true);\n    m_llmodel->setShouldBeLoaded(true);\n}\n\nvoid Chat::trySwitchContextOfLoadedModel()\n{\n    m_trySwitchContextInProgress = 1;\n    emit trySwitchContextInProgressChanged();\n    m_llmodel->requestTrySwitchContext();\n}\n\nvoid Chat::generatedNameChanged(const QString &name)\n{\n    m_generatedName = name;\n    m_name = name;\n    emit nameChanged();\n    m_needsSave = true;\n}\n\nvoid Chat::generatedQuestionFinished(const QString &question)\n{\n    m_generatedQuestions << question;\n    emit generatedQuestionsChanged();\n    m_needsSave = true;\n}\n\nvoid Chat::handleModelLoadingError(const QString &error)\n{\n    if (!error.isEmpty()) {\n        auto stream = qWarning().noquote() << \"ERROR:\" << error << \"id\";\n        stream.quote() << id();\n    }\n    m_modelLoadingError = error;\n    emit modelLoadingErrorChanged();\n}\n\nvoid Chat::handleTokenSpeedChanged(const QString &tokenSpeed)\n{\n    m_tokenSpeed = tokenSpeed;\n    emit tokenSpeedChanged();\n}\n\nQString Chat::deviceBackend() const\n{\n    return m_llmodel->deviceBackend();\n}\n\nQString Chat::device() const\n{\n    return m_llmodel->device();\n}\n\nQString Chat::fallbackReason() const\n{\n    return m_llmodel->fallbackReason();\n}\n\nvoid Chat::handleDatabaseResultsChanged(const QList<ResultInfo> &results)\n{\n    m_databaseResults = results;\n    m_needsSave = true;\n}\n\n// we need to notify listeners of the modelInfo property when its properties are updated,\n// since it's a gadget and can't do that on its own\nvoid Chat::handleModelInfoChanged(const ModelInfo &modelInfo)\n{\n    if (!m_modelInfo.id().isNull() && modelInfo.id() == m_modelInfo.id())\n        emit modelInfoChanged();\n}\n\n// react if a new model is loaded\nvoid Chat::handleModelChanged(const ModelInfo &modelInfo)\n{\n    if (m_modelInfo == modelInfo)\n        return;\n\n    m_modelInfo = modelInfo;\n    emit modelInfoChanged();\n    m_needsSave = true;\n}\n\nvoid Chat::handleTrySwitchContextOfLoadedModelCompleted(int value)\n{\n    m_trySwitchContextInProgress = value;\n    emit trySwitchContextInProgressChanged();\n}\n\nbool Chat::serialize(QDataStream &stream, int version) const\n{\n    stream << m_creationDate;\n    stream << m_id;\n    stream << m_name;\n    stream << m_userName;\n    if (version >= 5)\n        stream << m_modelInfo.id();\n    else\n        stream << m_modelInfo.filename();\n    if (version >= 3)\n        stream << m_collections;\n\n    if (!m_llmodel->serialize(stream, version))\n        return false;\n    if (!m_chatModel->serialize(stream, version))\n        return false;\n    return stream.status() == QDataStream::Ok;\n}\n\nbool Chat::deserialize(QDataStream &stream, int version)\n{\n    stream >> m_creationDate;\n    stream >> m_id;\n    emit idChanged(m_id);\n    stream >> m_name;\n    stream >> m_userName;\n    m_generatedName = QLatin1String(\"nonempty\");\n    emit nameChanged();\n\n    QString modelId;\n    stream >> modelId;\n    if (version >= 5) {\n        if (ModelList::globalInstance()->contains(modelId))\n            m_modelInfo = ModelList::globalInstance()->modelInfo(modelId);\n    } else {\n        if (ModelList::globalInstance()->containsByFilename(modelId))\n            m_modelInfo = ModelList::globalInstance()->modelInfoByFilename(modelId);\n    }\n    if (!m_modelInfo.id().isEmpty())\n        emit modelInfoChanged();\n\n    if (version >= 3) {\n        stream >> m_collections;\n        emit collectionListChanged(m_collections);\n    }\n\n    m_llmodel->setModelInfo(m_modelInfo);\n    if (!m_llmodel->deserialize(stream, version))\n        return false;\n    if (!m_chatModel->deserialize(stream, version))\n        return false;\n\n    emit chatModelChanged();\n    if (stream.status() != QDataStream::Ok)\n        return false;\n\n    m_needsSave = false;\n    return true;\n}\n\nQList<QString> Chat::collectionList() const\n{\n    return m_collections;\n}\n\nbool Chat::hasCollection(const QString &collection) const\n{\n    return m_collections.contains(collection);\n}\n\nvoid Chat::addCollection(const QString &collection)\n{\n    if (hasCollection(collection))\n        return;\n\n    m_collections.append(collection);\n    emit collectionListChanged(m_collections);\n    m_needsSave = true;\n}\n\nvoid Chat::removeCollection(const QString &collection)\n{\n    if (!hasCollection(collection))\n        return;\n\n    m_collections.removeAll(collection);\n    emit collectionListChanged(m_collections);\n    m_needsSave = true;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chat.h",
    "content": "#ifndef CHAT_H\n#define CHAT_H\n\n#include \"chatllm.h\"\n#include \"chatmodel.h\"\n#include \"database.h\"\n#include \"localdocsmodel.h\"\n#include \"modellist.h\"\n#include \"tool.h\"\n\n#include <QDateTime>\n#include <QList>\n#include <QObject>\n#include <QQmlEngine> // IWYU pragma: keep\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QUrl>\n#include <QVariant>\n#include <QtTypes>\n\n// IWYU pragma: no_forward_declare LocalDocsCollectionsModel\n// IWYU pragma: no_forward_declare ToolCallInfo\nclass QDataStream;\n\n\nclass Chat : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(QString id READ id NOTIFY idChanged)\n    Q_PROPERTY(QString name READ name WRITE setName NOTIFY nameChanged)\n    Q_PROPERTY(ChatModel *chatModel READ chatModel NOTIFY chatModelChanged)\n    Q_PROPERTY(bool isModelLoaded READ isModelLoaded NOTIFY isModelLoadedChanged)\n    Q_PROPERTY(bool isCurrentlyLoading READ isCurrentlyLoading NOTIFY isCurrentlyLoadingChanged)\n    Q_PROPERTY(float modelLoadingPercentage READ modelLoadingPercentage NOTIFY modelLoadingPercentageChanged)\n    Q_PROPERTY(ModelInfo modelInfo READ modelInfo WRITE setModelInfo NOTIFY modelInfoChanged)\n    Q_PROPERTY(bool responseInProgress READ responseInProgress NOTIFY responseInProgressChanged)\n    Q_PROPERTY(bool isServer READ isServer NOTIFY isServerChanged)\n    Q_PROPERTY(ResponseState responseState READ responseState NOTIFY responseStateChanged)\n    Q_PROPERTY(QList<QString> collectionList READ collectionList NOTIFY collectionListChanged)\n    Q_PROPERTY(QString modelLoadingError READ modelLoadingError NOTIFY modelLoadingErrorChanged)\n    Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged)\n    Q_PROPERTY(QString deviceBackend READ deviceBackend NOTIFY loadedModelInfoChanged)\n    Q_PROPERTY(QString device READ device NOTIFY loadedModelInfoChanged)\n    Q_PROPERTY(QString fallbackReason READ fallbackReason NOTIFY loadedModelInfoChanged)\n    Q_PROPERTY(LocalDocsCollectionsModel *collectionModel READ collectionModel NOTIFY collectionModelChanged)\n    // 0=no, 1=waiting, 2=working\n    Q_PROPERTY(int trySwitchContextInProgress READ trySwitchContextInProgress NOTIFY trySwitchContextInProgressChanged)\n    Q_PROPERTY(QList<QString> generatedQuestions READ generatedQuestions NOTIFY generatedQuestionsChanged)\n    QML_ELEMENT\n    QML_UNCREATABLE(\"Only creatable from c++!\")\n\npublic:\n    // tag for constructing a server chat\n    struct server_tag_t { explicit server_tag_t() = default; };\n    static inline constexpr server_tag_t server_tag = server_tag_t();\n\n    enum ResponseState {\n        ResponseStopped,\n        LocalDocsRetrieval,\n        LocalDocsProcessing,\n        PromptProcessing,\n        GeneratingQuestions,\n        ResponseGeneration,\n        ToolCallGeneration\n    };\n    Q_ENUM(ResponseState)\n\n    explicit Chat(QObject *parent = nullptr);\n    explicit Chat(server_tag_t, QObject *parent = nullptr);\n    virtual ~Chat();\n    void destroy() { m_llmodel->destroy(); }\n    void connectLLM();\n\n    QString id() const { return m_id; }\n    QString name() const { return m_userName.isEmpty() ? m_name : m_userName; }\n    void setName(const QString &name)\n    {\n        m_userName = name;\n        emit nameChanged();\n        m_needsSave = true;\n    }\n    ChatModel *chatModel() { return m_chatModel; }\n\n    bool isNewChat() const { return m_name == tr(\"New Chat\") && !m_chatModel->count(); }\n\n    Q_INVOKABLE void reset();\n    bool  isModelLoaded()          const { return m_modelLoadingPercentage == 1.0f; }\n    bool  isCurrentlyLoading()     const { return m_modelLoadingPercentage > 0.0f && m_modelLoadingPercentage < 1.0f; }\n    float modelLoadingPercentage() const { return m_modelLoadingPercentage; }\n    Q_INVOKABLE void newPromptResponsePair(const QString &prompt, const QList<QUrl> &attachedUrls = {});\n    Q_INVOKABLE void regenerateResponse(int index);\n    Q_INVOKABLE QVariant popPrompt(int index);\n    Q_INVOKABLE void stopGenerating();\n\n    QList<ResultInfo> databaseResults() const { return m_databaseResults; }\n\n    bool responseInProgress() const { return m_responseInProgress; }\n    ResponseState responseState() const;\n    ModelInfo modelInfo() const;\n    void setModelInfo(const ModelInfo &modelInfo);\n\n    Q_INVOKABLE void unloadModel();\n    Q_INVOKABLE void reloadModel();\n    Q_INVOKABLE void forceUnloadModel();\n    Q_INVOKABLE void forceReloadModel();\n    Q_INVOKABLE void trySwitchContextOfLoadedModel();\n    void unloadAndDeleteLater();\n    void markForDeletion();\n\n    QDateTime creationDate() const { return QDateTime::fromSecsSinceEpoch(m_creationDate); }\n    bool serialize(QDataStream &stream, int version) const;\n    bool deserialize(QDataStream &stream, int version);\n    bool isServer() const { return m_isServer; }\n\n    QList<QString> collectionList() const;\n    LocalDocsCollectionsModel *collectionModel() const { return m_collectionModel; }\n\n    Q_INVOKABLE bool hasCollection(const QString &collection) const;\n    Q_INVOKABLE void addCollection(const QString &collection);\n    Q_INVOKABLE void removeCollection(const QString &collection);\n\n    QString modelLoadingError() const { return m_modelLoadingError; }\n\n    QString tokenSpeed() const { return m_tokenSpeed; }\n    QString deviceBackend() const;\n    QString device() const;\n    // not loaded -> QString(), no fallback -> QString(\"\")\n    QString fallbackReason() const;\n\n    int trySwitchContextInProgress() const { return m_trySwitchContextInProgress; }\n\n    QList<QString> generatedQuestions() const { return m_generatedQuestions; }\n\n    bool needsSave() const { return m_needsSave; }\n    void setNeedsSave(bool n) { m_needsSave = n; }\n\npublic Q_SLOTS:\n    void resetResponseState();\n\nQ_SIGNALS:\n    void idChanged(const QString &id);\n    void nameChanged();\n    void chatModelChanged();\n    void isModelLoadedChanged();\n    void isCurrentlyLoadingChanged();\n    void modelLoadingPercentageChanged();\n    void modelLoadingWarning(const QString &warning);\n    void responseInProgressChanged();\n    void responseStateChanged();\n    void promptRequested(const QStringList &enabledCollections);\n    void regenerateResponseRequested(int index);\n    void resetResponseRequested();\n    void resetContextRequested();\n    void modelChangeRequested(const ModelInfo &modelInfo);\n    void modelInfoChanged();\n    void loadDefaultModelRequested();\n    void generateNameRequested();\n    void modelLoadingErrorChanged();\n    void isServerChanged();\n    void collectionListChanged(const QList<QString> &collectionList);\n    void tokenSpeedChanged();\n    void deviceChanged();\n    void fallbackReasonChanged();\n    void collectionModelChanged();\n    void trySwitchContextInProgressChanged();\n    void loadedModelInfoChanged();\n    void generatedQuestionsChanged();\n\nprivate Q_SLOTS:\n    void handleResponseChanged();\n    void handleModelLoadingPercentageChanged(float);\n    void promptProcessing();\n    void generatingQuestions();\n    void responseStopped(qint64 promptResponseMs);\n    void processToolCall(const QString &toolCall);\n    void toolCallComplete(const ToolCallInfo &info);\n    void responseComplete();\n    void generatedNameChanged(const QString &name);\n    void generatedQuestionFinished(const QString &question);\n    void handleModelLoadingError(const QString &error);\n    void handleTokenSpeedChanged(const QString &tokenSpeed);\n    void handleDatabaseResultsChanged(const QList<ResultInfo> &results);\n    void handleModelInfoChanged(const ModelInfo &modelInfo);\n    void handleModelChanged(const ModelInfo &modelInfo);\n    void handleTrySwitchContextOfLoadedModelCompleted(int value);\n\nprivate:\n    QString m_id;\n    QString m_name;\n    QString m_generatedName;\n    QString m_userName;\n    ModelInfo m_modelInfo;\n    QString m_modelLoadingError;\n    QString m_tokenSpeed;\n    QString m_device;\n    QString m_fallbackReason;\n    QList<QString> m_collections;\n    QList<QString> m_generatedQuestions;\n    ChatModel *m_chatModel;\n    bool m_responseInProgress = false;\n    ResponseState m_responseState;\n    qint64 m_creationDate;\n    ChatLLM *m_llmodel;\n    QList<ResultInfo> m_databaseResults;\n    bool m_isServer = false;\n    bool m_shouldDeleteLater = false;\n    float m_modelLoadingPercentage = 0.0f;\n    LocalDocsCollectionsModel *m_collectionModel;\n    bool m_firstResponse = true;\n    int m_trySwitchContextInProgress = 0;\n    bool m_isCurrentlyLoading = false;\n    // True if we need to serialize the chat to disk, because of one of two reasons:\n    // - The chat was freshly created during this launch.\n    // - The chat was changed after loading it from disk.\n    bool m_needsSave = true;\n    int m_consecutiveToolCalls = 0;\n};\n\n#endif // CHAT_H\n"
  },
  {
    "path": "gpt4all-chat/src/chatapi.cpp",
    "content": "#include \"chatapi.h\"\n\n#include \"utils.h\"\n\n#include <fmt/format.h>\n\n#include <QAnyStringView>\n#include <QCoreApplication>\n#include <QDebug>\n#include <QGuiApplication>\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QLatin1String>\n#include <QNetworkAccessManager>\n#include <QNetworkRequest>\n#include <QStringView>\n#include <QThread>\n#include <QUrl>\n#include <QUtf8StringView> // IWYU pragma: keep\n#include <QVariant>\n#include <QXmlStreamReader>\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n\n#include <expected>\n#include <functional>\n#include <iostream>\n#include <utility>\n\nusing namespace Qt::Literals::StringLiterals;\n\n//#define DEBUG\n\n\nChatAPI::ChatAPI()\n    : QObject(nullptr)\n    , m_modelName(\"gpt-3.5-turbo\")\n    , m_requestURL(\"\")\n    , m_responseCallback(nullptr)\n{\n}\n\nsize_t ChatAPI::requiredMem(const std::string &modelPath, int n_ctx, int ngl)\n{\n    Q_UNUSED(modelPath);\n    Q_UNUSED(n_ctx);\n    Q_UNUSED(ngl);\n    return 0;\n}\n\nbool ChatAPI::loadModel(const std::string &modelPath, int n_ctx, int ngl)\n{\n    Q_UNUSED(modelPath);\n    Q_UNUSED(n_ctx);\n    Q_UNUSED(ngl);\n    return true;\n}\n\nvoid ChatAPI::setThreadCount(int32_t n_threads)\n{\n    Q_UNUSED(n_threads);\n}\n\nint32_t ChatAPI::threadCount() const\n{\n    return 1;\n}\n\nChatAPI::~ChatAPI()\n{\n}\n\nbool ChatAPI::isModelLoaded() const\n{\n    return true;\n}\n\nstatic auto parsePrompt(QXmlStreamReader &xml) -> std::expected<QJsonArray, QString>\n{\n    QJsonArray messages;\n\n    auto xmlError = [&xml] {\n        return std::unexpected(u\"%1:%2: %3\"_s.arg(xml.lineNumber()).arg(xml.columnNumber()).arg(xml.errorString()));\n    };\n\n    if (xml.hasError())\n        return xmlError();\n    if (xml.atEnd())\n        return messages;\n\n    // skip header\n    bool foundElement = false;\n    do {\n        switch (xml.readNext()) {\n            using enum QXmlStreamReader::TokenType;\n        case Invalid:\n            return xmlError();\n        case EndDocument:\n            return messages;\n        default:\n            foundElement = true;\n        case StartDocument:\n        case Comment:\n        case DTD:\n        case ProcessingInstruction:\n            ;\n        }\n    } while (!foundElement);\n\n    // document body loop\n    bool foundRoot = false;\n    for (;;) {\n        switch (xml.tokenType()) {\n            using enum QXmlStreamReader::TokenType;\n        case StartElement:\n            {\n                auto name = xml.name();\n                if (!foundRoot) {\n                    if (name != \"chat\"_L1)\n                        return std::unexpected(u\"unexpected tag: %1\"_s.arg(name));\n                    foundRoot = true;\n                } else {\n                    if (name != \"user\"_L1 && name != \"assistant\"_L1 && name != \"system\"_L1)\n                        return std::unexpected(u\"unknown role: %1\"_s.arg(name));\n                    auto content = xml.readElementText();\n                    if (xml.tokenType() != EndElement)\n                        return xmlError();\n                    messages << makeJsonObject({\n                        { \"role\"_L1,    name.toString().trimmed() },\n                        { \"content\"_L1, content                   },\n                    });\n                }\n                break;\n            }\n        case Characters:\n            if (!xml.isWhitespace())\n                return std::unexpected(u\"unexpected text: %1\"_s.arg(xml.text()));\n        case Comment:\n        case ProcessingInstruction:\n        case EndElement:\n            break;\n        case EndDocument:\n            return messages;\n        case Invalid:\n            return xmlError();\n        default:\n            return std::unexpected(u\"unexpected token: %1\"_s.arg(xml.tokenString()));\n        }\n        xml.readNext();\n    }\n}\n\nvoid ChatAPI::prompt(\n    std::string_view        prompt,\n    const PromptCallback   &promptCallback,\n    const ResponseCallback &responseCallback,\n    const PromptContext    &promptCtx\n) {\n    Q_UNUSED(promptCallback)\n\n    if (!isModelLoaded())\n        throw std::invalid_argument(\"Attempted to prompt an unloaded model.\");\n    if (!promptCtx.n_predict)\n        return; // nothing requested\n\n    // FIXME: We don't set the max_tokens on purpose because in order to do so safely without encountering\n    // an error we need to be able to count the tokens in our prompt. The only way to do this is to use\n    // the OpenAI tiktoken library or to implement our own tokenization function that matches precisely\n    // the tokenization used by the OpenAI model we're calling. OpenAI has not introduced any means of\n    // using the REST API to count tokens in a prompt.\n    auto root = makeJsonObject({\n        { \"model\"_L1,       m_modelName     },\n        { \"stream\"_L1,      true            },\n        { \"temperature\"_L1, promptCtx.temp  },\n        { \"top_p\"_L1,       promptCtx.top_p },\n    });\n\n    // conversation history\n    {\n        QUtf8StringView promptUtf8(prompt);\n        QXmlStreamReader xml(promptUtf8);\n        auto messages = parsePrompt(xml);\n        if (!messages) {\n            auto error = fmt::format(\"Failed to parse API model prompt: {}\", messages.error());\n            qDebug().noquote() << \"ChatAPI ERROR:\" << error << \"Prompt:\\n\\n\" << promptUtf8 << '\\n';\n            throw std::invalid_argument(error);\n        }\n        root.insert(\"messages\"_L1, *messages);\n    }\n\n    QJsonDocument doc(root);\n\n#if defined(DEBUG)\n    qDebug().noquote() << \"ChatAPI::prompt begin network request\" << doc.toJson();\n#endif\n\n    m_responseCallback = responseCallback;\n\n    // The following code sets up a worker thread and object to perform the actual api request to\n    // chatgpt and then blocks until it is finished\n    QThread workerThread;\n    ChatAPIWorker worker(this);\n    worker.moveToThread(&workerThread);\n    connect(&worker, &ChatAPIWorker::finished, &workerThread, &QThread::quit, Qt::DirectConnection);\n    connect(this, &ChatAPI::request, &worker, &ChatAPIWorker::request, Qt::QueuedConnection);\n    workerThread.start();\n    emit request(m_apiKey, doc.toJson(QJsonDocument::Compact));\n    workerThread.wait();\n\n    m_responseCallback = nullptr;\n\n#if defined(DEBUG)\n    qDebug() << \"ChatAPI::prompt end network request\";\n#endif\n}\n\nbool ChatAPI::callResponse(int32_t token, const std::string& string)\n{\n    Q_ASSERT(m_responseCallback);\n    if (!m_responseCallback) {\n        std::cerr << \"ChatAPI ERROR: no response callback!\\n\";\n        return false;\n    }\n    return m_responseCallback(token, string);\n}\n\nvoid ChatAPIWorker::request(const QString &apiKey, const QByteArray &array)\n{\n    QUrl apiUrl(m_chat->url());\n    const QString authorization = u\"Bearer %1\"_s.arg(apiKey).trimmed();\n    QNetworkRequest request(apiUrl);\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n    request.setRawHeader(\"Authorization\", authorization.toUtf8());\n#if defined(DEBUG)\n    qDebug() << \"ChatAPI::request\"\n             << \"API URL: \" << apiUrl.toString()\n             << \"Authorization: \" << authorization.toUtf8();\n#endif\n    m_networkManager = new QNetworkAccessManager(this);\n    QNetworkReply *reply = m_networkManager->post(request, array);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n    connect(reply, &QNetworkReply::finished, this, &ChatAPIWorker::handleFinished);\n    connect(reply, &QNetworkReply::readyRead, this, &ChatAPIWorker::handleReadyRead);\n    connect(reply, &QNetworkReply::errorOccurred, this, &ChatAPIWorker::handleErrorOccurred);\n}\n\nvoid ChatAPIWorker::handleFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply) {\n        emit finished();\n        return;\n    }\n\n    QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n\n    if (!response.isValid()) {\n        m_chat->callResponse(\n            -1,\n            tr(\"ERROR: Network error occurred while connecting to the API server\")\n                .toStdString()\n        );\n        return;\n    }\n\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok || code != 200) {\n        bool isReplyEmpty(reply->readAll().isEmpty());\n        if (isReplyEmpty)\n            m_chat->callResponse(\n                -1,\n                tr(\"ChatAPIWorker::handleFinished got HTTP Error %1 %2\")\n                    .arg(code)\n                    .arg(reply->errorString())\n                    .toStdString()\n            );\n        qWarning().noquote() << \"ERROR: ChatAPIWorker::handleFinished got HTTP Error\" << code << \"response:\"\n                             << reply->errorString();\n    }\n    reply->deleteLater();\n    emit finished();\n}\n\nvoid ChatAPIWorker::handleReadyRead()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply) {\n        emit finished();\n        return;\n    }\n\n    QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n\n    if (!response.isValid())\n        return;\n\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok || code != 200) {\n        m_chat->callResponse(\n            -1,\n            u\"ERROR: ChatAPIWorker::handleReadyRead got HTTP Error %1 %2: %3\"_s\n                .arg(code).arg(reply->errorString(), reply->readAll()).toStdString()\n        );\n        emit finished();\n        return;\n    }\n\n    while (reply->canReadLine()) {\n        QString jsonData = reply->readLine().trimmed();\n        if (jsonData.startsWith(\"data:\"))\n            jsonData.remove(0, 5);\n        jsonData = jsonData.trimmed();\n        if (jsonData.isEmpty())\n            continue;\n        if (jsonData == \"[DONE]\")\n            continue;\n#if defined(DEBUG)\n        qDebug().noquote() << \"line\" << jsonData;\n#endif\n        QJsonParseError err;\n        const QJsonDocument document = QJsonDocument::fromJson(jsonData.toUtf8(), &err);\n        if (err.error != QJsonParseError::NoError) {\n            m_chat->callResponse(-1, u\"ERROR: ChatAPI responded with invalid json \\\"%1\\\"\"_s\n                                         .arg(err.errorString()).toStdString());\n            continue;\n        }\n\n        const QJsonObject root = document.object();\n        const QJsonArray choices = root.value(\"choices\").toArray();\n        const QJsonObject choice = choices.first().toObject();\n        const QJsonObject delta = choice.value(\"delta\").toObject();\n        const QString content = delta.value(\"content\").toString();\n        m_currentResponse += content;\n        if (!m_chat->callResponse(0, content.toStdString())) {\n            reply->abort();\n            emit finished();\n            return;\n        }\n    }\n}\n\nvoid ChatAPIWorker::handleErrorOccurred(QNetworkReply::NetworkError code)\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply || reply->error() == QNetworkReply::OperationCanceledError /*when we call abort on purpose*/) {\n        emit finished();\n        return;\n    }\n\n    qWarning().noquote() << \"ERROR: ChatAPIWorker::handleErrorOccurred got HTTP Error\" << code << \"response:\"\n                         << reply->errorString();\n    emit finished();\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chatapi.h",
    "content": "#ifndef CHATAPI_H\n#define CHATAPI_H\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QByteArray>\n#include <QNetworkReply>\n#include <QObject>\n#include <QString>\n#include <QtPreprocessorSupport>\n\n#include <cstddef>\n#include <cstdint>\n#include <span>\n#include <stdexcept>\n#include <string>\n#include <string_view>\n#include <unordered_map>\n#include <vector>\n\n// IWYU pragma: no_forward_declare QByteArray\nclass ChatAPI;\nclass QNetworkAccessManager;\n\n\nclass ChatAPIWorker : public QObject {\n    Q_OBJECT\npublic:\n    ChatAPIWorker(ChatAPI *chatAPI)\n        : QObject(nullptr)\n        , m_networkManager(nullptr)\n        , m_chat(chatAPI) {}\n    virtual ~ChatAPIWorker() {}\n\n    QString currentResponse() const { return m_currentResponse; }\n\n    void request(const QString &apiKey, const QByteArray &array);\n\nQ_SIGNALS:\n    void finished();\n\nprivate Q_SLOTS:\n    void handleFinished();\n    void handleReadyRead();\n    void handleErrorOccurred(QNetworkReply::NetworkError code);\n\nprivate:\n    ChatAPI *m_chat;\n    QNetworkAccessManager *m_networkManager;\n    QString m_currentResponse;\n};\n\nclass ChatAPI : public QObject, public LLModel {\n    Q_OBJECT\npublic:\n    ChatAPI();\n    virtual ~ChatAPI();\n\n    bool supportsEmbedding() const override { return false; }\n    bool supportsCompletion() const override { return true; }\n    bool loadModel(const std::string &modelPath, int n_ctx, int ngl) override;\n    bool isModelLoaded() const override;\n    size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) override;\n\n    // All three of the state virtual functions are handled custom inside of chatllm save/restore\n    size_t stateSize() const override\n    { throwNotImplemented(); }\n    size_t saveState(std::span<uint8_t> stateOut, std::vector<Token> &inputTokensOut) const override\n    { Q_UNUSED(stateOut); Q_UNUSED(inputTokensOut); throwNotImplemented(); }\n    size_t restoreState(std::span<const uint8_t> state, std::span<const Token> inputTokens) override\n    { Q_UNUSED(state); Q_UNUSED(inputTokens); throwNotImplemented(); }\n\n    void prompt(std::string_view        prompt,\n                const PromptCallback   &promptCallback,\n                const ResponseCallback &responseCallback,\n                const PromptContext    &ctx) override;\n\n    [[noreturn]]\n    int32_t countPromptTokens(std::string_view prompt) const override\n    { Q_UNUSED(prompt); throwNotImplemented(); }\n\n    void setThreadCount(int32_t n_threads) override;\n    int32_t threadCount() const override;\n\n    void setModelName(const QString &modelName) { m_modelName = modelName; }\n    void setAPIKey(const QString &apiKey) { m_apiKey = apiKey; }\n    void setRequestURL(const QString &requestURL) { m_requestURL = requestURL; }\n    QString url() const { return m_requestURL; }\n\n    bool callResponse(int32_t token, const std::string &string);\n\n    [[noreturn]]\n    int32_t contextLength() const override\n    { throwNotImplemented(); }\n\n    auto specialTokens() -> std::unordered_map<std::string, std::string> const override\n    { return {}; }\n\nQ_SIGNALS:\n    void request(const QString &apiKey, const QByteArray &array);\n\nprotected:\n    // We have to implement these as they are pure virtual in base class, but we don't actually use\n    // them as they are only called from the default implementation of 'prompt' which we override and\n    // completely replace\n\n    [[noreturn]]\n    static void throwNotImplemented() { throw std::logic_error(\"not implemented\"); }\n\n    [[noreturn]]\n    std::vector<Token> tokenize(std::string_view str) const override\n    { Q_UNUSED(str); throwNotImplemented(); }\n\n    [[noreturn]]\n    bool isSpecialToken(Token id) const override\n    { Q_UNUSED(id); throwNotImplemented(); }\n\n    [[noreturn]]\n    std::string tokenToString(Token id) const override\n    { Q_UNUSED(id); throwNotImplemented(); }\n\n    [[noreturn]]\n    void initSampler(const PromptContext &ctx) override\n    { Q_UNUSED(ctx); throwNotImplemented(); }\n\n    [[noreturn]]\n    Token sampleToken() const override\n    { throwNotImplemented(); }\n\n    [[noreturn]]\n    bool evalTokens(int32_t nPast, std::span<const Token> tokens) const override\n    { Q_UNUSED(nPast); Q_UNUSED(tokens); throwNotImplemented(); }\n\n    [[noreturn]]\n    void shiftContext(const PromptContext &promptCtx, int32_t *nPast) override\n    { Q_UNUSED(promptCtx); Q_UNUSED(nPast); throwNotImplemented(); }\n\n    [[noreturn]]\n    int32_t inputLength() const override\n    { throwNotImplemented(); }\n\n    [[noreturn]]\n    int32_t computeModelInputPosition(std::span<const Token> input) const override\n    { Q_UNUSED(input); throwNotImplemented(); }\n\n    [[noreturn]]\n    void setModelInputPosition(int32_t pos) override\n    { Q_UNUSED(pos); throwNotImplemented(); }\n\n    [[noreturn]]\n    void appendInputToken(Token tok) override\n    { Q_UNUSED(tok); throwNotImplemented(); }\n\n    [[noreturn]]\n    const std::vector<Token> &endTokens() const override\n    { throwNotImplemented(); }\n\n    [[noreturn]]\n    bool shouldAddBOS() const override\n    { throwNotImplemented(); }\n\n    [[noreturn]]\n    std::span<const Token> inputTokens() const override\n    { throwNotImplemented(); }\n\nprivate:\n    ResponseCallback m_responseCallback;\n    QString          m_modelName;\n    QString          m_apiKey;\n    QString          m_requestURL;\n};\n\n#endif // CHATAPI_H\n"
  },
  {
    "path": "gpt4all-chat/src/chatlistmodel.cpp",
    "content": "#include \"chatlistmodel.h\"\n\n#include \"mysettings.h\"\n\n#include <QCoreApplication>\n#include <QDataStream>\n#include <QDir>\n#include <QElapsedTimer>\n#include <QEvent>\n#include <QFile>\n#include <QFileInfo>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QIODevice>\n#include <QSettings>\n#include <QStringList> // IWYU pragma: keep\n#include <Qt>\n#include <QtTypes>\n\n#include <algorithm>\n\n\nstatic constexpr quint32 CHAT_FORMAT_MAGIC   = 0xF5D553CC;\nstatic constexpr qint32  CHAT_FORMAT_VERSION = 12;\n\nclass MyChatListModel: public ChatListModel { };\nQ_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)\nChatListModel *ChatListModel::globalInstance()\n{\n    return chatListModelInstance();\n}\n\nChatListModel::ChatListModel()\n    : QAbstractListModel(nullptr) {\n\n        QCoreApplication::instance()->installEventFilter(this);\n}\n\nbool ChatListModel::eventFilter(QObject *obj, QEvent *ev)\n{\n    if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange)\n        emit dataChanged(index(0, 0), index(m_chats.size() - 1, 0));\n    return false;\n}\n\nvoid ChatListModel::loadChats()\n{\n    addChat();\n\n    ChatsRestoreThread *thread = new ChatsRestoreThread;\n    connect(thread, &ChatsRestoreThread::chatRestored, this, &ChatListModel::restoreChat, Qt::QueuedConnection);\n    connect(thread, &ChatsRestoreThread::finished, this, &ChatListModel::chatsRestoredFinished, Qt::QueuedConnection);\n    connect(thread, &ChatsRestoreThread::finished, thread, &QObject::deleteLater);\n    thread->start();\n\n    m_chatSaver = std::make_unique<ChatSaver>();\n    connect(this, &ChatListModel::requestSaveChats, m_chatSaver.get(), &ChatSaver::saveChats, Qt::QueuedConnection);\n    connect(m_chatSaver.get(), &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection);\n    // save chats on application quit\n    connect(QCoreApplication::instance(), &QCoreApplication::aboutToQuit, this, &ChatListModel::saveChatsSync);\n\n    connect(MySettings::globalInstance(), &MySettings::serverChatChanged, this, &ChatListModel::handleServerEnabledChanged);\n}\n\nvoid ChatListModel::removeChatFile(Chat *chat) const\n{\n    Q_ASSERT(chat != m_serverChat);\n    const QString savePath = MySettings::globalInstance()->modelPath();\n    QFile file(savePath + \"/gpt4all-\" + chat->id() + \".chat\");\n    if (!file.exists())\n        return;\n    bool success = file.remove();\n    if (!success)\n        qWarning() << \"ERROR: Couldn't remove chat file:\" << file.fileName();\n}\n\nChatSaver::ChatSaver()\n    : QObject(nullptr)\n{\n    moveToThread(&m_thread);\n    m_thread.start();\n}\n\nChatSaver::~ChatSaver()\n{\n    m_thread.quit();\n    m_thread.wait();\n}\n\nQVector<Chat *> ChatListModel::getChatsToSave() const\n{\n    QVector<Chat *> toSave;\n    for (auto *chat : m_chats)\n        if (chat != m_serverChat && !chat->isNewChat())\n            toSave << chat;\n    return toSave;\n}\n\nvoid ChatListModel::saveChats()\n{\n    auto toSave = getChatsToSave();\n    if (toSave.isEmpty()) {\n        emit saveChatsFinished();\n        return;\n    }\n\n    emit requestSaveChats(toSave);\n}\n\nvoid ChatListModel::saveChatsForQuit()\n{\n    saveChats();\n    m_startedFinalSave = true;\n}\n\nvoid ChatListModel::saveChatsSync()\n{\n    auto toSave = getChatsToSave();\n    if (!m_startedFinalSave && !toSave.isEmpty())\n        m_chatSaver->saveChats(toSave);\n}\n\nvoid ChatSaver::saveChats(const QVector<Chat *> &chats)\n{\n    // we can be called from the main thread instead of a worker thread at quit time, so take a lock\n    QMutexLocker locker(&m_mutex);\n\n    QElapsedTimer timer;\n    timer.start();\n    const QString savePath = MySettings::globalInstance()->modelPath();\n    qsizetype nSavedChats = 0;\n    for (Chat *chat : chats) {\n        if (!chat->needsSave())\n            continue;\n        ++nSavedChats;\n\n        QString fileName = \"gpt4all-\" + chat->id() + \".chat\";\n        QString filePath = savePath + \"/\" + fileName;\n        QFile originalFile(filePath);\n        QFile tempFile(filePath + \".tmp\"); // Temporary file\n\n        bool success = tempFile.open(QIODevice::WriteOnly);\n        if (!success) {\n            qWarning() << \"ERROR: Couldn't save chat to temporary file:\" << tempFile.fileName();\n            continue;\n        }\n        QDataStream out(&tempFile);\n\n        out << CHAT_FORMAT_MAGIC;\n        out << CHAT_FORMAT_VERSION;\n        out.setVersion(QDataStream::Qt_6_2);\n\n        qDebug() << \"serializing chat\" << fileName;\n        if (!chat->serialize(out, CHAT_FORMAT_VERSION)) {\n            qWarning() << \"ERROR: Couldn't serialize chat to file:\" << tempFile.fileName();\n            tempFile.remove();\n            continue;\n        }\n\n        chat->setNeedsSave(false);\n        if (originalFile.exists())\n            originalFile.remove();\n        tempFile.rename(filePath);\n    }\n\n    qint64 elapsedTime = timer.elapsed();\n    qDebug() << \"serializing chats took\" << elapsedTime << \"ms, saved\" << nSavedChats << \"/\" << chats.size() << \"chats\";\n    emit saveChatsFinished();\n}\n\nvoid ChatsRestoreThread::run()\n{\n    QElapsedTimer timer;\n    timer.start();\n    struct FileInfo {\n        bool oldFile;\n        qint64 creationDate;\n        QString file;\n    };\n    QList<FileInfo> files;\n    {\n        // Look for any files in the original spot which was the settings config directory\n        QSettings settings;\n        QFileInfo settingsInfo(settings.fileName());\n        QString settingsPath = settingsInfo.absolutePath();\n        QDir dir(settingsPath);\n        dir.setNameFilters(QStringList() << \"gpt4all-*.chat\");\n        QStringList fileNames = dir.entryList();\n        for (const QString &f : fileNames) {\n            QString filePath = settingsPath + \"/\" + f;\n            QFile file(filePath);\n            bool success = file.open(QIODevice::ReadOnly);\n            if (!success) {\n                qWarning() << \"ERROR: Couldn't restore chat from file:\" << file.fileName();\n                continue;\n            }\n            QDataStream in(&file);\n            FileInfo info;\n            info.oldFile = true;\n            info.file = filePath;\n            in >> info.creationDate;\n            files.append(info);\n            file.close();\n        }\n    }\n    {\n        const QString savePath = MySettings::globalInstance()->modelPath();\n        QDir dir(savePath);\n        dir.setNameFilters(QStringList() << \"gpt4all-*.chat\");\n        QStringList fileNames = dir.entryList();\n        for (const QString &f : fileNames) {\n            QString filePath = savePath + \"/\" + f;\n            QFile file(filePath);\n            bool success = file.open(QIODevice::ReadOnly);\n            if (!success) {\n                qWarning() << \"ERROR: Couldn't restore chat from file:\" << file.fileName();\n                continue;\n            }\n            QDataStream in(&file);\n            // Read and check the header\n            quint32 magic;\n            in >> magic;\n            if (magic != CHAT_FORMAT_MAGIC) {\n                qWarning() << \"ERROR: Chat file has bad magic:\" << file.fileName();\n                continue;\n            }\n\n            // Read the version\n            qint32 version;\n            in >> version;\n            if (version < 1) {\n                qWarning() << \"WARNING: Chat file version\" << version << \"is not supported:\" << file.fileName();\n                continue;\n            }\n            if (version > CHAT_FORMAT_VERSION) {\n                qWarning().nospace() << \"WARNING: Chat file is from a future version (have \" << version << \" want \"\n                                     << CHAT_FORMAT_VERSION << \"): \" << file.fileName();\n                continue;\n            }\n\n            if (version < 2)\n                in.setVersion(QDataStream::Qt_6_2);\n\n            FileInfo info;\n            info.oldFile = false;\n            info.file = filePath;\n            in >> info.creationDate;\n            files.append(info);\n            file.close();\n        }\n    }\n    std::sort(files.begin(), files.end(), [](const FileInfo &a, const FileInfo &b) {\n        return a.creationDate > b.creationDate;\n    });\n\n    for (FileInfo &f : files) {\n        QFile file(f.file);\n        bool success = file.open(QIODevice::ReadOnly);\n        if (!success) {\n            qWarning() << \"ERROR: Couldn't restore chat from file:\" << file.fileName();\n            continue;\n        }\n        QDataStream in(&file);\n\n        qint32 version = 0;\n        if (!f.oldFile) {\n            // Read and check the header\n            quint32 magic;\n            in >> magic;\n            if (magic != CHAT_FORMAT_MAGIC) {\n                qWarning() << \"ERROR: Chat file has bad magic:\" << file.fileName();\n                continue;\n            }\n\n            // Read the version\n            in >> version;\n            if (version < 1) {\n                qWarning() << \"ERROR: Chat file has non supported version:\" << file.fileName();\n                continue;\n            }\n\n            if (version < 2)\n                in.setVersion(QDataStream::Qt_6_2);\n        }\n\n        qDebug() << \"deserializing chat\" << f.file;\n\n        auto chat = std::make_unique<Chat>();\n        chat->moveToThread(qGuiApp->thread());\n        bool ok = chat->deserialize(in, version);\n        if (!ok) {\n            qWarning() << \"ERROR: Couldn't deserialize chat from file:\" << file.fileName();\n        } else if (!in.atEnd()) {\n            qWarning().nospace() << \"error loading chat from \" << file.fileName() << \": extra data at end of file\";\n        } else {\n            emit chatRestored(chat.release());\n        }\n        if (f.oldFile)\n           file.remove(); // No longer storing in this directory\n        file.close();\n    }\n\n    qint64 elapsedTime = timer.elapsed();\n    qDebug() << \"deserializing chats took:\" << elapsedTime << \"ms\";\n}\n\nvoid ChatListModel::restoreChat(Chat *chat)\n{\n    chat->setParent(this);\n    connect(chat, &Chat::nameChanged, this, &ChatListModel::nameChanged);\n\n    beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());\n    m_chats.append(chat);\n    endInsertRows();\n}\n\nvoid ChatListModel::chatsRestoredFinished()\n{\n    addServerChat();\n}\n\nvoid ChatListModel::handleServerEnabledChanged()\n{\n    if (MySettings::globalInstance()->serverChat() || m_serverChat != m_currentChat)\n        return;\n\n    Chat *nextChat = get(0);\n    Q_ASSERT(nextChat && nextChat != m_serverChat);\n    setCurrentChat(nextChat);\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chatlistmodel.h",
    "content": "#ifndef CHATLISTMODEL_H\n#define CHATLISTMODEL_H\n\n#include \"chat.h\"\n#include \"chatllm.h\"\n#include \"chatmodel.h\"\n\n#include <QAbstractListModel>\n#include <QByteArray>\n#include <QDate>\n#include <QDebug>\n#include <QHash>\n#include <QList>\n#include <QMutex>\n#include <QObject>\n#include <QString>\n#include <QThread>\n#include <QVariant>\n#include <QVector> // IWYU pragma: keep\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n#include <QtPreprocessorSupport>\n\n#include <memory>\n\n\nclass ChatsRestoreThread : public QThread\n{\n    Q_OBJECT\npublic:\n    void run() override;\n\nQ_SIGNALS:\n    void chatRestored(Chat *chat);\n};\n\nclass ChatSaver : public QObject\n{\n    Q_OBJECT\npublic:\n    explicit ChatSaver();\n    ~ChatSaver() override;\n\nQ_SIGNALS:\n    void saveChatsFinished();\n\npublic Q_SLOTS:\n    void saveChats(const QVector<Chat*> &chats);\n\nprivate:\n    QThread m_thread;\n    QMutex  m_mutex;\n};\n\nclass ChatListModel : public QAbstractListModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n    Q_PROPERTY(Chat *currentChat READ currentChat WRITE setCurrentChat NOTIFY currentChatChanged)\n\npublic:\n    static ChatListModel *globalInstance();\n\n    enum Roles {\n        IdRole = Qt::UserRole + 1,\n        NameRole,\n        SectionRole\n    };\n\n    int rowCount(const QModelIndex &parent = QModelIndex()) const override\n    {\n        Q_UNUSED(parent)\n        return m_chats.size();\n    }\n\n    QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override\n    {\n        if (!index.isValid() || index.row() < 0 || index.row() >= m_chats.size())\n            return QVariant();\n\n        const Chat *item = m_chats.at(index.row());\n        switch (role) {\n            case IdRole:\n                return item->id();\n            case NameRole:\n                return item->name();\n            case SectionRole: {\n                if (item == m_serverChat)\n                    return QString();\n                const QDate date = QDate::currentDate();\n                const QDate itemDate = item->creationDate().date();\n                if (date == itemDate)\n                    return tr(\"TODAY\");\n                else if (itemDate >= date.addDays(-7))\n                    return tr(\"THIS WEEK\");\n                else if (itemDate >= date.addMonths(-1))\n                    return tr(\"THIS MONTH\");\n                else if (itemDate >= date.addMonths(-6))\n                    return tr(\"LAST SIX MONTHS\");\n                else if (itemDate.year() == date.year())\n                    return tr(\"THIS YEAR\");\n                else if (itemDate.year() == date.year() - 1)\n                    return tr(\"LAST YEAR\");\n                else\n                    return QString::number(itemDate.year());\n            }\n        }\n\n        return QVariant();\n    }\n\n    QHash<int, QByteArray> roleNames() const override\n    {\n        QHash<int, QByteArray> roles;\n        roles[IdRole] = \"id\";\n        roles[NameRole] = \"name\";\n        roles[SectionRole] = \"section\";\n        return roles;\n    }\n\n    bool shouldSaveChats() const;\n    void setShouldSaveChats(bool b);\n\n    bool shouldSaveChatGPTChats() const;\n    void setShouldSaveChatGPTChats(bool b);\n\n    Q_INVOKABLE void loadChats();\n\n    Q_INVOKABLE void addChat()\n    {\n        // Select the existing new chat if we already have one\n        if (m_newChat) {\n            setCurrentChat(m_newChat);\n            return;\n        }\n\n        // Create a new chat pointer and connect it to determine when it is populated\n        m_newChat = new Chat(this);\n        connect(m_newChat->chatModel(), &ChatModel::countChanged,\n            this, &ChatListModel::newChatCountChanged);\n        connect(m_newChat, &Chat::nameChanged,\n            this, &ChatListModel::nameChanged);\n\n        beginInsertRows(QModelIndex(), 0, 0);\n        m_chats.prepend(m_newChat);\n        endInsertRows();\n        emit countChanged();\n        setCurrentChat(m_newChat);\n    }\n\n    Q_INVOKABLE void addServerChat()\n    {\n        // Create a new dummy chat pointer and don't connect it\n        if (m_serverChat)\n            return;\n\n        m_serverChat = new Chat(Chat::server_tag, this);\n        beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());\n        m_chats.append(m_serverChat);\n        endInsertRows();\n        emit countChanged();\n    }\n\n    Q_INVOKABLE void removeChat(Chat* chat)\n    {\n        Q_ASSERT(chat != m_serverChat);\n        if (!m_chats.contains(chat)) {\n            qWarning() << \"WARNING: Removing chat failed with id\" << chat->id();\n            return;\n        }\n\n        removeChatFile(chat);\n\n        if (chat == m_newChat) {\n            m_newChat->disconnect(this);\n            m_newChat = nullptr;\n        }\n\n        chat->markForDeletion();\n\n        const int index = m_chats.indexOf(chat);\n        if (m_chats.count() < 3 /*m_serverChat included*/) {\n            addChat();\n        } else {\n            int nextIndex;\n            if (index == m_chats.count() - 2 /*m_serverChat is last*/)\n                nextIndex = index - 1;\n            else\n                nextIndex = index + 1;\n            Chat *nextChat = get(nextIndex);\n            Q_ASSERT(nextChat);\n            setCurrentChat(nextChat);\n        }\n\n        const int newIndex = m_chats.indexOf(chat);\n        beginRemoveRows(QModelIndex(), newIndex, newIndex);\n        m_chats.removeAll(chat);\n        endRemoveRows();\n        chat->unloadAndDeleteLater();\n    }\n\n    Chat *currentChat() const\n    {\n        return m_currentChat;\n    }\n\n    void setCurrentChat(Chat *chat)\n    {\n        if (!m_chats.contains(chat)) {\n            qWarning() << \"ERROR: Setting current chat failed with id\" << chat->id();\n            return;\n        }\n\n        if (m_currentChat && m_currentChat != m_serverChat)\n            m_currentChat->unloadModel();\n        m_currentChat = chat;\n        emit currentChatChanged();\n        if (!m_currentChat->isModelLoaded() && m_currentChat != m_serverChat)\n            m_currentChat->trySwitchContextOfLoadedModel();\n    }\n\n    Q_INVOKABLE Chat* get(int index)\n    {\n        if (index < 0 || index >= m_chats.size()) return nullptr;\n        return m_chats.at(index);\n    }\n\n    int count() const { return m_chats.size(); }\n\n    // stop ChatLLM threads for clean shutdown\n    void destroyChats()\n    {\n        for (auto *chat: m_chats) { chat->destroy(); }\n        ChatLLM::destroyStore();\n    }\n\n    void removeChatFile(Chat *chat) const;\n    Q_INVOKABLE void saveChats();\n    Q_INVOKABLE void saveChatsForQuit();\n    void restoreChat(Chat *chat);\n    void chatsRestoredFinished();\n\npublic Q_SLOTS:\n    void handleServerEnabledChanged();\n\nQ_SIGNALS:\n    void countChanged();\n    void currentChatChanged();\n    void requestSaveChats(const QVector<Chat*> &);\n    void saveChatsFinished();\n\nprotected:\n    bool eventFilter(QObject *obj, QEvent *ev) override;\n\nprivate Q_SLOTS:\n    // Used with QCoreApplication::aboutToQuit. Does not require an event loop.\n    void saveChatsSync();\n\n    void newChatCountChanged()\n    {\n        Q_ASSERT(m_newChat && m_newChat->chatModel()->count());\n        m_newChat->chatModel()->disconnect(this);\n        m_newChat = nullptr;\n    }\n\n    void nameChanged()\n    {\n        Chat *chat = qobject_cast<Chat *>(sender());\n        if (!chat)\n            return;\n\n        int row = m_chats.indexOf(chat);\n        if (row < 0 || row >= m_chats.size())\n            return;\n\n        QModelIndex index = createIndex(row, 0);\n        emit dataChanged(index, index, {NameRole});\n    }\n\n    void printChats()\n    {\n        for (auto c : m_chats) {\n            qDebug() << c->name()\n                << (c == m_currentChat ? \"currentChat: true\" : \"currentChat: false\")\n                << (c == m_newChat ? \"newChat: true\" : \"newChat: false\");\n        }\n    }\n\nprivate:\n    QVector<Chat *> getChatsToSave() const;\n\nprivate:\n    Chat* m_newChat = nullptr;\n    Chat* m_serverChat = nullptr;\n    Chat* m_currentChat = nullptr;\n    QList<Chat*> m_chats;\n    std::unique_ptr<ChatSaver> m_chatSaver;\n    bool m_startedFinalSave = false;\n\nprivate:\n    explicit ChatListModel();\n    ~ChatListModel() {}\n    friend class MyChatListModel;\n};\n\n#endif // CHATITEMMODEL_H\n"
  },
  {
    "path": "gpt4all-chat/src/chatllm.cpp",
    "content": "#include \"chatllm.h\"\n\n#include \"chat.h\"\n#include \"chatapi.h\"\n#include \"chatmodel.h\"\n#include \"jinja_helpers.h\"\n#include \"localdocs.h\"\n#include \"mysettings.h\"\n#include \"network.h\"\n#include \"tool.h\"\n#include \"toolmodel.h\"\n#include \"toolcallparser.h\"\n\n#include <fmt/format.h>\n#include <minja/minja.hpp>\n#include <nlohmann/json.hpp>\n\n#include <QChar>\n#include <QDataStream>\n#include <QDebug>\n#include <QFile>\n#include <QGlobalStatic>\n#include <QIODevice> // IWYU pragma: keep\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QMap>\n#include <QMutex> // IWYU pragma: keep\n#include <QMutexLocker> // IWYU pragma: keep\n#include <QRegularExpression> // IWYU pragma: keep\n#include <QRegularExpressionMatch> // IWYU pragma: keep\n#include <QSet>\n#include <QStringView>\n#include <QTextStream>\n#include <QUrl>\n#include <QVariant>\n#include <QWaitCondition>\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n#include <QtTypes> // IWYU pragma: keep\n\n#include <algorithm>\n#include <chrono>\n#include <cmath>\n#include <concepts>\n#include <cstddef>\n#include <cstdint>\n#include <ctime>\n#include <exception>\n#include <functional>\n#include <iomanip>\n#include <limits>\n#include <optional>\n#include <ranges>\n#include <regex>\n#include <span>\n#include <sstream>\n#include <stdexcept>\n#include <string_view>\n#include <tuple>\n#include <utility>\n#include <vector>\n\nusing namespace Qt::Literals::StringLiterals;\nusing namespace ToolEnums;\nnamespace ranges = std::ranges;\nusing json = nlohmann::ordered_json;\n\n//#define DEBUG\n//#define DEBUG_MODEL_LOADING\n\n// NOTE: not threadsafe\nstatic const std::shared_ptr<minja::Context> &jinjaEnv()\n{\n    static std::shared_ptr<minja::Context> environment;\n    if (!environment) {\n        environment = minja::Context::builtins();\n        environment->set(\"strftime_now\", minja::simple_function(\n            \"strftime_now\", { \"format\" },\n            [](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {\n                auto format = args.at(\"format\").get<std::string>();\n                using Clock = std::chrono::system_clock;\n                time_t nowUnix = Clock::to_time_t(Clock::now());\n                auto localDate = *std::localtime(&nowUnix);\n                std::ostringstream ss;\n                ss << std::put_time(&localDate, format.c_str());\n                return ss.str();\n            }\n        ));\n        environment->set(\"regex_replace\", minja::simple_function(\n            \"regex_replace\", { \"str\", \"pattern\", \"repl\" },\n            [](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {\n                auto str     = args.at(\"str\"    ).get<std::string>();\n                auto pattern = args.at(\"pattern\").get<std::string>();\n                auto repl    = args.at(\"repl\"   ).get<std::string>();\n                return std::regex_replace(str, std::regex(pattern), repl);\n            }\n        ));\n    }\n    return environment;\n}\n\nclass BaseResponseHandler {\npublic:\n    virtual void onSplitIntoTwo    (const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) = 0;\n    virtual void onSplitIntoThree  (const QString &secondBuffer, const QString &thirdBuffer)                          = 0;\n    // \"old-style\" responses, with all of the implementation details left in\n    virtual void onOldResponseChunk(const QByteArray &chunk)                                                          = 0;\n    // notify of a \"new-style\" response that has been cleaned of tool calling\n    virtual bool onBufferResponse  (const QString &response, int bufferIdx)                                           = 0;\n    // notify of a \"new-style\" response, no tool calling applicable\n    virtual bool onRegularResponse ()                                                                                 = 0;\n    virtual bool getStopGenerating () const                                                                           = 0;\n};\n\nstatic auto promptModelWithTools(\n    LLModel *model, const LLModel::PromptCallback &promptCallback, BaseResponseHandler &respHandler,\n    const LLModel::PromptContext &ctx, const QByteArray &prompt, const QStringList &toolNames\n) -> std::pair<QStringList, bool>\n{\n    ToolCallParser toolCallParser(toolNames);\n    auto handleResponse = [&toolCallParser, &respHandler](LLModel::Token token, std::string_view piece) -> bool {\n        Q_UNUSED(token)\n\n        toolCallParser.update(piece.data());\n\n        // Split the response into two if needed\n        if (toolCallParser.numberOfBuffers() < 2 && toolCallParser.splitIfPossible()) {\n            const auto parseBuffers = toolCallParser.buffers();\n            Q_ASSERT(parseBuffers.size() == 2);\n            respHandler.onSplitIntoTwo(toolCallParser.startTag(), parseBuffers.at(0), parseBuffers.at(1));\n        }\n\n        // Split the response into three if needed\n        if (toolCallParser.numberOfBuffers() < 3 && toolCallParser.startTag() == ToolCallConstants::ThinkStartTag\n            && toolCallParser.splitIfPossible()) {\n            const auto parseBuffers = toolCallParser.buffers();\n            Q_ASSERT(parseBuffers.size() == 3);\n            respHandler.onSplitIntoThree(parseBuffers.at(1), parseBuffers.at(2));\n        }\n\n        respHandler.onOldResponseChunk(QByteArray::fromRawData(piece.data(), piece.size()));\n\n        bool ok;\n        const auto parseBuffers = toolCallParser.buffers();\n        if (parseBuffers.size() > 1) {\n            ok = respHandler.onBufferResponse(parseBuffers.last(), parseBuffers.size() - 1);\n        } else {\n            ok = respHandler.onRegularResponse();\n        }\n        if (!ok)\n            return false;\n\n        const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete\n            && toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;\n\n        return !shouldExecuteToolCall && !respHandler.getStopGenerating();\n    };\n    model->prompt(std::string_view(prompt), promptCallback, handleResponse, ctx);\n\n    const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete\n        && toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;\n\n    return { toolCallParser.buffers(), shouldExecuteToolCall };\n}\n\nclass LLModelStore {\npublic:\n    static LLModelStore *globalInstance();\n\n    LLModelInfo acquireModel(); // will block until llmodel is ready\n    void releaseModel(LLModelInfo &&info); // must be called when you are done\n    void destroy();\n\nprivate:\n    LLModelStore()\n    {\n        // seed with empty model\n        m_availableModel = LLModelInfo();\n    }\n    ~LLModelStore() {}\n    std::optional<LLModelInfo> m_availableModel;\n    QMutex m_mutex;\n    QWaitCondition m_condition;\n    friend class MyLLModelStore;\n};\n\nclass MyLLModelStore : public LLModelStore { };\nQ_GLOBAL_STATIC(MyLLModelStore, storeInstance)\nLLModelStore *LLModelStore::globalInstance()\n{\n    return storeInstance();\n}\n\nLLModelInfo LLModelStore::acquireModel()\n{\n    QMutexLocker locker(&m_mutex);\n    while (!m_availableModel)\n        m_condition.wait(locker.mutex());\n    auto first = std::move(*m_availableModel);\n    m_availableModel.reset();\n    return first;\n}\n\nvoid LLModelStore::releaseModel(LLModelInfo &&info)\n{\n    QMutexLocker locker(&m_mutex);\n    Q_ASSERT(!m_availableModel);\n    m_availableModel = std::move(info);\n    m_condition.wakeAll();\n}\n\nvoid LLModelStore::destroy()\n{\n    QMutexLocker locker(&m_mutex);\n    m_availableModel.reset();\n}\n\nvoid LLModelInfo::resetModel(ChatLLM *cllm, LLModel *model) {\n    this->model.reset(model);\n    fallbackReason.reset();\n    emit cllm->loadedModelInfoChanged();\n}\n\nChatLLM::ChatLLM(Chat *parent, bool isServer)\n    : QObject{nullptr}\n    , m_chat(parent)\n    , m_shouldBeLoaded(false)\n    , m_forceUnloadModel(false)\n    , m_markedForDeletion(false)\n    , m_stopGenerating(false)\n    , m_timer(nullptr)\n    , m_isServer(isServer)\n    , m_forceMetal(MySettings::globalInstance()->forceMetal())\n    , m_reloadingToChangeVariant(false)\n    , m_chatModel(parent->chatModel())\n{\n    moveToThread(&m_llmThread);\n    connect(this, &ChatLLM::shouldBeLoadedChanged, this, &ChatLLM::handleShouldBeLoadedChanged,\n        Qt::QueuedConnection); // explicitly queued\n    connect(this, &ChatLLM::trySwitchContextRequested, this, &ChatLLM::trySwitchContextOfLoadedModel,\n        Qt::QueuedConnection); // explicitly queued\n    connect(parent, &Chat::idChanged, this, &ChatLLM::handleChatIdChanged);\n    connect(&m_llmThread, &QThread::started, this, &ChatLLM::handleThreadStarted);\n    connect(MySettings::globalInstance(), &MySettings::forceMetalChanged, this, &ChatLLM::handleForceMetalChanged);\n    connect(MySettings::globalInstance(), &MySettings::deviceChanged, this, &ChatLLM::handleDeviceChanged);\n\n    // The following are blocking operations and will block the llm thread\n    connect(this, &ChatLLM::requestRetrieveFromDB, LocalDocs::globalInstance()->database(), &Database::retrieveFromDB,\n        Qt::BlockingQueuedConnection);\n\n    m_llmThread.setObjectName(parent->id());\n    m_llmThread.start();\n}\n\nChatLLM::~ChatLLM()\n{\n    destroy();\n}\n\nvoid ChatLLM::destroy()\n{\n    m_stopGenerating = true;\n    m_llmThread.quit();\n    m_llmThread.wait();\n\n    // The only time we should have a model loaded here is on shutdown\n    // as we explicitly unload the model in all other circumstances\n    if (isModelLoaded()) {\n        m_llModelInfo.resetModel(this);\n    }\n}\n\nvoid ChatLLM::destroyStore()\n{\n    LLModelStore::globalInstance()->destroy();\n}\n\nvoid ChatLLM::handleThreadStarted()\n{\n    m_timer = new TokenTimer(this);\n    connect(m_timer, &TokenTimer::report, this, &ChatLLM::reportSpeed);\n    emit threadStarted();\n}\n\nvoid ChatLLM::handleForceMetalChanged(bool forceMetal)\n{\n#if defined(Q_OS_MAC) && defined(__aarch64__)\n    m_forceMetal = forceMetal;\n    if (isModelLoaded() && m_shouldBeLoaded) {\n        m_reloadingToChangeVariant = true;\n        unloadModel();\n        reloadModel();\n        m_reloadingToChangeVariant = false;\n    }\n#else\n    Q_UNUSED(forceMetal);\n#endif\n}\n\nvoid ChatLLM::handleDeviceChanged()\n{\n    if (isModelLoaded() && m_shouldBeLoaded) {\n        m_reloadingToChangeVariant = true;\n        unloadModel();\n        reloadModel();\n        m_reloadingToChangeVariant = false;\n    }\n}\n\nbool ChatLLM::loadDefaultModel()\n{\n    ModelInfo defaultModel = ModelList::globalInstance()->defaultModelInfo();\n    if (defaultModel.filename().isEmpty()) {\n        emit modelLoadingError(u\"Could not find any model to load\"_s);\n        return false;\n    }\n    return loadModel(defaultModel);\n}\n\nvoid ChatLLM::trySwitchContextOfLoadedModel(const ModelInfo &modelInfo)\n{\n    // We're trying to see if the store already has the model fully loaded that we wish to use\n    // and if so we just acquire it from the store and switch the context and return true. If the\n    // store doesn't have it or we're already loaded or in any other case just return false.\n\n    // If we're already loaded or a server or we're reloading to change the variant/device or the\n    // modelInfo is empty, then this should fail\n    if (\n        isModelLoaded() || m_isServer || m_reloadingToChangeVariant || modelInfo.name().isEmpty() || !m_shouldBeLoaded\n    ) {\n        emit trySwitchContextOfLoadedModelCompleted(0);\n        return;\n    }\n\n    QString filePath = modelInfo.dirpath + modelInfo.filename();\n    QFileInfo fileInfo(filePath);\n\n    acquireModel();\n#if defined(DEBUG_MODEL_LOADING)\n        qDebug() << \"acquired model from store\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n\n    // The store gave us no already loaded model, the wrong type of model, then give it back to the\n    // store and fail\n    if (!m_llModelInfo.model || m_llModelInfo.fileInfo != fileInfo || !m_shouldBeLoaded) {\n        LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n        emit trySwitchContextOfLoadedModelCompleted(0);\n        return;\n    }\n\n#if defined(DEBUG_MODEL_LOADING)\n    qDebug() << \"store had our model\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n\n    emit trySwitchContextOfLoadedModelCompleted(2);\n    emit modelLoadingPercentageChanged(1.0f);\n    emit trySwitchContextOfLoadedModelCompleted(0);\n}\n\nbool ChatLLM::loadModel(const ModelInfo &modelInfo)\n{\n    // This is a complicated method because N different possible threads are interested in the outcome\n    // of this method. Why? Because we have a main/gui thread trying to monitor the state of N different\n    // possible chat threads all vying for a single resource - the currently loaded model - as the user\n    // switches back and forth between chats. It is important for our main/gui thread to never block\n    // but simultaneously always have up2date information with regards to which chat has the model loaded\n    // and what the type and name of that model is. I've tried to comment extensively in this method\n    // to provide an overview of what we're doing here.\n\n    if (isModelLoaded() && this->modelInfo() == modelInfo) {\n        // already acquired -> keep it\n        return true; // already loaded\n    }\n\n    // reset status\n    emit modelLoadingPercentageChanged(std::numeric_limits<float>::min()); // small non-zero positive value\n    emit modelLoadingError(\"\");\n\n    QString filePath = modelInfo.dirpath + modelInfo.filename();\n    QFileInfo fileInfo(filePath);\n\n    // We have a live model, but it isn't the one we want\n    bool alreadyAcquired = isModelLoaded();\n    if (alreadyAcquired) {\n#if defined(DEBUG_MODEL_LOADING)\n        qDebug() << \"already acquired model deleted\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n        m_llModelInfo.resetModel(this);\n    } else if (!m_isServer) {\n        // This is a blocking call that tries to retrieve the model we need from the model store.\n        // If it succeeds, then we just have to restore state. If the store has never had a model\n        // returned to it, then the modelInfo.model pointer should be null which will happen on startup\n        acquireModel();\n#if defined(DEBUG_MODEL_LOADING)\n        qDebug() << \"acquired model from store\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n        // At this point it is possible that while we were blocked waiting to acquire the model from the\n        // store, that our state was changed to not be loaded. If this is the case, release the model\n        // back into the store and quit loading\n        if (!m_shouldBeLoaded) {\n#if defined(DEBUG_MODEL_LOADING)\n            qDebug() << \"no longer need model\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n            LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n            emit modelLoadingPercentageChanged(0.0f);\n            return false;\n        }\n\n        // Check if the store just gave us exactly the model we were looking for\n        if (m_llModelInfo.model && m_llModelInfo.fileInfo == fileInfo && !m_reloadingToChangeVariant) {\n#if defined(DEBUG_MODEL_LOADING)\n            qDebug() << \"store had our model\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n            emit modelLoadingPercentageChanged(1.0f);\n            setModelInfo(modelInfo);\n            Q_ASSERT(!m_modelInfo.filename().isEmpty());\n            if (m_modelInfo.filename().isEmpty())\n                emit modelLoadingError(u\"Modelinfo is left null for %1\"_s.arg(modelInfo.filename()));\n            return true;\n        } else {\n            // Release the memory since we have to switch to a different model.\n#if defined(DEBUG_MODEL_LOADING)\n            qDebug() << \"deleting model\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n            m_llModelInfo.resetModel(this);\n        }\n    }\n\n    // Guarantee we've released the previous models memory\n    Q_ASSERT(!m_llModelInfo.model);\n\n    // Store the file info in the modelInfo in case we have an error loading\n    m_llModelInfo.fileInfo = fileInfo;\n\n    if (fileInfo.exists()) {\n        QVariantMap modelLoadProps;\n        if (modelInfo.isOnline) {\n            QString apiKey;\n            QString requestUrl;\n            QString modelName;\n            {\n                QFile file(filePath);\n                bool success = file.open(QIODeviceBase::ReadOnly);\n                (void)success;\n                Q_ASSERT(success);\n                QJsonDocument doc = QJsonDocument::fromJson(file.readAll());\n                QJsonObject obj = doc.object();\n                apiKey = obj[\"apiKey\"].toString();\n                modelName = obj[\"modelName\"].toString();\n                if (modelInfo.isCompatibleApi) {\n                    QString baseUrl(obj[\"baseUrl\"].toString());\n                    QUrl apiUrl(QUrl::fromUserInput(baseUrl));\n                    if (!Network::isHttpUrlValid(apiUrl)) {\n                        return false;\n                    }\n                    QString currentPath(apiUrl.path());\n                    QString suffixPath(\"%1/chat/completions\");\n                    apiUrl.setPath(suffixPath.arg(currentPath));\n                    requestUrl = apiUrl.toString();\n                } else {\n                    requestUrl = modelInfo.url();\n                }\n            }\n            m_llModelType = LLModelTypeV1::API;\n            ChatAPI *model = new ChatAPI();\n            model->setModelName(modelName);\n            model->setRequestURL(requestUrl);\n            model->setAPIKey(apiKey);\n            m_llModelInfo.resetModel(this, model);\n        } else if (!loadNewModel(modelInfo, modelLoadProps)) {\n            return false; // m_shouldBeLoaded became false\n        }\n#if defined(DEBUG_MODEL_LOADING)\n        qDebug() << \"new model\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n#if defined(DEBUG)\n        qDebug() << \"modelLoadedChanged\" << m_llmThread.objectName();\n        fflush(stdout);\n#endif\n        emit modelLoadingPercentageChanged(isModelLoaded() ? 1.0f : 0.0f);\n        emit loadedModelInfoChanged();\n\n        modelLoadProps.insert(\"requestedDevice\", MySettings::globalInstance()->device());\n        modelLoadProps.insert(\"model\", modelInfo.filename());\n        Network::globalInstance()->trackChatEvent(\"model_load\", modelLoadProps);\n    } else {\n        if (!m_isServer)\n            LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo)); // release back into the store\n        resetModel();\n        emit modelLoadingError(u\"Could not find file for model %1\"_s.arg(modelInfo.filename()));\n    }\n\n    if (m_llModelInfo.model)\n        setModelInfo(modelInfo);\n    return bool(m_llModelInfo.model);\n}\n\n/* Returns false if the model should no longer be loaded (!m_shouldBeLoaded).\n * Otherwise returns true, even on error. */\nbool ChatLLM::loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadProps)\n{\n    QElapsedTimer modelLoadTimer;\n    modelLoadTimer.start();\n\n    QString requestedDevice = MySettings::globalInstance()->device();\n    int n_ctx = MySettings::globalInstance()->modelContextLength(modelInfo);\n    int ngl = MySettings::globalInstance()->modelGpuLayers(modelInfo);\n\n    std::string backend = \"auto\";\n#ifdef Q_OS_MAC\n    if (requestedDevice == \"CPU\") {\n        backend = \"cpu\";\n    } else if (m_forceMetal) {\n#ifdef __aarch64__\n        backend = \"metal\";\n#endif\n    }\n#else // !defined(Q_OS_MAC)\n    if (requestedDevice.startsWith(\"CUDA: \"))\n        backend = \"cuda\";\n#endif\n\n    QString filePath = modelInfo.dirpath + modelInfo.filename();\n\n    auto construct = [this, &filePath, &modelInfo, &modelLoadProps, n_ctx](std::string const &backend) {\n        QString constructError;\n        m_llModelInfo.resetModel(this);\n        try {\n            auto *model = LLModel::Implementation::construct(filePath.toStdString(), backend, n_ctx);\n            m_llModelInfo.resetModel(this, model);\n        } catch (const LLModel::MissingImplementationError &e) {\n            modelLoadProps.insert(\"error\", \"missing_model_impl\");\n            constructError = e.what();\n        } catch (const LLModel::UnsupportedModelError &e) {\n            modelLoadProps.insert(\"error\", \"unsupported_model_file\");\n            constructError = e.what();\n        } catch (const LLModel::BadArchError &e) {\n            constructError = e.what();\n            modelLoadProps.insert(\"error\", \"unsupported_model_arch\");\n            modelLoadProps.insert(\"model_arch\", QString::fromStdString(e.arch()));\n        }\n\n        if (!m_llModelInfo.model) {\n            if (!m_isServer)\n                LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n            resetModel();\n            emit modelLoadingError(u\"Error loading %1: %2\"_s.arg(modelInfo.filename(), constructError));\n            return false;\n        }\n\n        m_llModelInfo.model->setProgressCallback([this](float progress) -> bool {\n            progress = std::max(progress, std::numeric_limits<float>::min()); // keep progress above zero\n            emit modelLoadingPercentageChanged(progress);\n            return m_shouldBeLoaded;\n        });\n        return true;\n    };\n\n    if (!construct(backend))\n        return true;\n\n    if (m_llModelInfo.model->isModelBlacklisted(filePath.toStdString())) {\n        static QSet<QString> warned;\n        auto fname = modelInfo.filename();\n        if (!warned.contains(fname)) {\n            emit modelLoadingWarning(\n                u\"%1 is known to be broken. Please get a replacement via the download dialog.\"_s.arg(fname)\n            );\n            warned.insert(fname); // don't warn again until restart\n        }\n    }\n\n    auto approxDeviceMemGB = [](const LLModel::GPUDevice *dev) {\n        float memGB = dev->heapSize / float(1024 * 1024 * 1024);\n        return std::floor(memGB * 10.f) / 10.f; // truncate to 1 decimal place\n    };\n\n    std::vector<LLModel::GPUDevice> availableDevices;\n    const LLModel::GPUDevice *defaultDevice = nullptr;\n    {\n        const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString(), n_ctx, ngl);\n        availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);\n        // Pick the best device\n        // NB: relies on the fact that Kompute devices are listed first\n        if (!availableDevices.empty() && availableDevices.front().type == 2 /*a discrete gpu*/) {\n            defaultDevice = &availableDevices.front();\n            float memGB = defaultDevice->heapSize / float(1024 * 1024 * 1024);\n            memGB = std::floor(memGB * 10.f) / 10.f; // truncate to 1 decimal place\n            modelLoadProps.insert(\"default_device\", QString::fromStdString(defaultDevice->name));\n            modelLoadProps.insert(\"default_device_mem\", approxDeviceMemGB(defaultDevice));\n            modelLoadProps.insert(\"default_device_backend\", QString::fromStdString(defaultDevice->backendName()));\n        }\n    }\n\n    bool actualDeviceIsCPU = true;\n\n#if defined(Q_OS_MAC) && defined(__aarch64__)\n    if (m_llModelInfo.model->implementation().buildVariant() == \"metal\")\n        actualDeviceIsCPU = false;\n#else\n    if (requestedDevice != \"CPU\") {\n        const auto *device = defaultDevice;\n        if (requestedDevice != \"Auto\") {\n            // Use the selected device\n            for (const LLModel::GPUDevice &d : availableDevices) {\n                if (QString::fromStdString(d.selectionName()) == requestedDevice) {\n                    device = &d;\n                    break;\n                }\n            }\n        }\n\n        std::string unavail_reason;\n        if (!device) {\n            // GPU not available\n        } else if (!m_llModelInfo.model->initializeGPUDevice(device->index, &unavail_reason)) {\n            m_llModelInfo.fallbackReason = QString::fromStdString(unavail_reason);\n        } else {\n            actualDeviceIsCPU = false;\n            modelLoadProps.insert(\"requested_device_mem\", approxDeviceMemGB(device));\n        }\n    }\n#endif\n\n    bool success = m_llModelInfo.model->loadModel(filePath.toStdString(), n_ctx, ngl);\n\n    if (!m_shouldBeLoaded) {\n        m_llModelInfo.resetModel(this);\n        if (!m_isServer)\n            LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n        resetModel();\n        emit modelLoadingPercentageChanged(0.0f);\n        return false;\n    }\n\n    if (actualDeviceIsCPU) {\n        // we asked llama.cpp to use the CPU\n    } else if (!success) {\n        // llama_init_from_file returned nullptr\n        m_llModelInfo.fallbackReason = \"GPU loading failed (out of VRAM?)\";\n        modelLoadProps.insert(\"cpu_fallback_reason\", \"gpu_load_failed\");\n\n        // For CUDA, make sure we don't use the GPU at all - ngl=0 still offloads matmuls\n        if (backend == \"cuda\" && !construct(\"auto\"))\n            return true;\n\n        success = m_llModelInfo.model->loadModel(filePath.toStdString(), n_ctx, 0);\n\n        if (!m_shouldBeLoaded) {\n            m_llModelInfo.resetModel(this);\n            if (!m_isServer)\n                LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n            resetModel();\n            emit modelLoadingPercentageChanged(0.0f);\n            return false;\n        }\n    } else if (!m_llModelInfo.model->usingGPUDevice()) {\n        // ggml_vk_init was not called in llama.cpp\n        // We might have had to fallback to CPU after load if the model is not possible to accelerate\n        // for instance if the quantization method is not supported on Vulkan yet\n        m_llModelInfo.fallbackReason = \"model or quant has no GPU support\";\n        modelLoadProps.insert(\"cpu_fallback_reason\", \"gpu_unsupported_model\");\n    }\n\n    if (!success) {\n        m_llModelInfo.resetModel(this);\n        if (!m_isServer)\n            LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n        resetModel();\n        emit modelLoadingError(u\"Could not load model due to invalid model file for %1\"_s.arg(modelInfo.filename()));\n        modelLoadProps.insert(\"error\", \"loadmodel_failed\");\n        return true;\n    }\n\n    switch (m_llModelInfo.model->implementation().modelType()[0]) {\n    case 'L': m_llModelType = LLModelTypeV1::LLAMA; break;\n    default:\n        {\n            m_llModelInfo.resetModel(this);\n            if (!m_isServer)\n                LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n            resetModel();\n            emit modelLoadingError(u\"Could not determine model type for %1\"_s.arg(modelInfo.filename()));\n        }\n    }\n\n    modelLoadProps.insert(\"$duration\", modelLoadTimer.elapsed() / 1000.);\n    return true;\n}\n\nbool ChatLLM::isModelLoaded() const\n{\n    return m_llModelInfo.model && m_llModelInfo.model->isModelLoaded();\n}\n\nstatic QString &removeLeadingWhitespace(QString &s)\n{\n    auto firstNonSpace = ranges::find_if_not(s, [](auto c) { return c.isSpace(); });\n    s.remove(0, firstNonSpace - s.begin());\n    return s;\n}\n\ntemplate <ranges::input_range R>\n    requires std::convertible_to<ranges::range_reference_t<R>, QChar>\nbool isAllSpace(R &&r)\n{\n    return ranges::all_of(std::forward<R>(r), [](QChar c) { return c.isSpace(); });\n}\n\nvoid ChatLLM::regenerateResponse(int index)\n{\n    Q_ASSERT(m_chatModel);\n    if (m_chatModel->regenerateResponse(index)) {\n        emit responseChanged();\n        prompt(m_chat->collectionList());\n    }\n}\n\nstd::optional<QString> ChatLLM::popPrompt(int index)\n{\n    Q_ASSERT(m_chatModel);\n    return m_chatModel->popPrompt(index);\n}\n\nModelInfo ChatLLM::modelInfo() const\n{\n    return m_modelInfo;\n}\n\nvoid ChatLLM::setModelInfo(const ModelInfo &modelInfo)\n{\n    m_modelInfo = modelInfo;\n    emit modelInfoChanged(modelInfo);\n}\n\nvoid ChatLLM::acquireModel()\n{\n    m_llModelInfo = LLModelStore::globalInstance()->acquireModel();\n    emit loadedModelInfoChanged();\n}\n\nvoid ChatLLM::resetModel()\n{\n    m_llModelInfo = {};\n    emit loadedModelInfoChanged();\n}\n\nvoid ChatLLM::modelChangeRequested(const ModelInfo &modelInfo)\n{\n    // ignore attempts to switch to the same model twice\n    if (!isModelLoaded() || this->modelInfo() != modelInfo) {\n        m_shouldBeLoaded = true;\n        loadModel(modelInfo);\n    }\n}\n\nstatic LLModel::PromptContext promptContextFromSettings(const ModelInfo &modelInfo)\n{\n    auto *mySettings = MySettings::globalInstance();\n    return {\n        .n_predict      = mySettings->modelMaxLength          (modelInfo),\n        .top_k          = mySettings->modelTopK               (modelInfo),\n        .top_p          = float(mySettings->modelTopP         (modelInfo)),\n        .min_p          = float(mySettings->modelMinP         (modelInfo)),\n        .temp           = float(mySettings->modelTemperature  (modelInfo)),\n        .n_batch        = mySettings->modelPromptBatchSize    (modelInfo),\n        .repeat_penalty = float(mySettings->modelRepeatPenalty(modelInfo)),\n        .repeat_last_n  = mySettings->modelRepeatPenaltyTokens(modelInfo),\n    };\n}\n\nvoid ChatLLM::prompt(const QStringList &enabledCollections)\n{\n    if (!isModelLoaded()) {\n        emit responseStopped(0);\n        return;\n    }\n\n    try {\n        promptInternalChat(enabledCollections, promptContextFromSettings(m_modelInfo));\n    } catch (const std::exception &e) {\n        // FIXME(jared): this is neither translated nor serialized\n        m_chatModel->setResponseValue(u\"Error: %1\"_s.arg(QString::fromUtf8(e.what())));\n        m_chatModel->setError();\n        emit responseStopped(0);\n    }\n}\n\nstd::vector<MessageItem> ChatLLM::forkConversation(const QString &prompt) const\n{\n    Q_ASSERT(m_chatModel);\n    if (m_chatModel->hasError())\n        throw std::logic_error(\"cannot continue conversation with an error\");\n\n    std::vector<MessageItem> conversation;\n    {\n        auto items = m_chatModel->messageItems();\n        // It is possible the main thread could have erased the conversation while the llm thread,\n        // is busy forking the conversatoin but it must have set stop generating first\n        Q_ASSERT(items.size() >= 2 || m_stopGenerating); // should be prompt/response pairs\n        conversation.reserve(items.size() + 1);\n        conversation.assign(items.begin(), items.end());\n    }\n    qsizetype nextIndex = conversation.empty() ? 0 : conversation.back().index().value() + 1;\n    conversation.emplace_back(nextIndex, MessageItem::Type::Prompt, prompt.toUtf8());\n    return conversation;\n}\n\n// version 0 (default): HF compatible\n// version 1: explicit LocalDocs formatting\nstatic uint parseJinjaTemplateVersion(QStringView tmpl)\n{\n    static uint MAX_VERSION = 1;\n    static QRegularExpression reVersion(uR\"(\\A{#-?\\s+gpt4all v(\\d+)-?#}\\s*$)\"_s, QRegularExpression::MultilineOption);\n    if (auto match = reVersion.matchView(tmpl); match.hasMatch()) {\n        uint ver = match.captured(1).toUInt();\n        if (ver > MAX_VERSION)\n            throw std::out_of_range(fmt::format(\"Unknown template version: {}\", ver));\n        return ver;\n    }\n    return 0;\n}\n\nstatic std::shared_ptr<minja::TemplateNode> loadJinjaTemplate(const std::string &source)\n{\n    return minja::Parser::parse(source, { .trim_blocks = true, .lstrip_blocks = true, .keep_trailing_newline = false });\n}\n\nstd::optional<std::string> ChatLLM::checkJinjaTemplateError(const std::string &source)\n{\n    try {\n        loadJinjaTemplate(source);\n    } catch (const std::runtime_error &e) {\n        return e.what();\n    }\n    return std::nullopt;\n}\n\nstd::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) const\n{\n    Q_ASSERT(items.size() >= 1);\n\n    auto *mySettings = MySettings::globalInstance();\n    auto &model      = m_llModelInfo.model;\n\n    QString chatTemplate, systemMessage;\n    auto chatTemplateSetting = mySettings->modelChatTemplate(m_modelInfo);\n    if (auto tmpl = chatTemplateSetting.asModern()) {\n        chatTemplate = *tmpl;\n    } else if (chatTemplateSetting.isLegacy()) {\n        throw std::logic_error(\"cannot apply Jinja to a legacy prompt template\");\n    } else {\n        throw std::logic_error(\"cannot apply Jinja without setting a chat template first\");\n    }\n    if (isAllSpace(chatTemplate)) {\n        throw std::logic_error(\"cannot apply Jinja with a blank chat template\");\n    }\n    if (auto tmpl = mySettings->modelSystemMessage(m_modelInfo).asModern()) {\n        systemMessage = *tmpl;\n    } else {\n        throw std::logic_error(\"cannot apply Jinja with a legacy system message\");\n    }\n\n    uint version = parseJinjaTemplateVersion(chatTemplate);\n\n    auto makeMap = [version](const MessageItem &item) {\n        return JinjaMessage(version, item).AsJson();\n    };\n\n    std::unique_ptr<MessageItem> systemItem;\n    bool useSystem = !isAllSpace(systemMessage);\n\n    json::array_t messages;\n    messages.reserve(useSystem + items.size());\n    if (useSystem) {\n        systemItem = std::make_unique<MessageItem>(MessageItem::system_tag, systemMessage.toUtf8());\n        messages.emplace_back(makeMap(*systemItem));\n    }\n    for (auto &item : items)\n        messages.emplace_back(makeMap(item));\n\n    json::array_t toolList;\n    const int toolCount = ToolModel::globalInstance()->count();\n    for (int i = 0; i < toolCount; ++i) {\n        Tool *t = ToolModel::globalInstance()->get(i);\n        toolList.push_back(t->jinjaValue());\n    }\n\n    json::object_t params {\n        { \"messages\",              std::move(messages) },\n        { \"add_generation_prompt\", true                },\n        { \"toolList\",              toolList            },\n    };\n    for (auto &[name, token] : model->specialTokens())\n        params.emplace(std::move(name), std::move(token));\n\n    try {\n        auto tmpl = loadJinjaTemplate(chatTemplate.toStdString());\n        auto context = minja::Context::make(minja::Value(std::move(params)), jinjaEnv());\n        return tmpl->render(context);\n    } catch (const std::runtime_error &e) {\n        throw std::runtime_error(fmt::format(\"Failed to parse chat template: {}\", e.what()));\n    }\n    Q_UNREACHABLE();\n}\n\nauto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,\n                                 qsizetype startOffset) -> ChatPromptResult\n{\n    Q_ASSERT(isModelLoaded());\n    Q_ASSERT(m_chatModel);\n\n    // Return a vector of relevant messages for this chat.\n    // \"startOffset\" is used to select only local server messages from the current chat session.\n    auto getChat = [&]() {\n        auto items = m_chatModel->messageItems();\n        if (startOffset > 0)\n            items.erase(items.begin(), items.begin() + startOffset);\n        Q_ASSERT(items.size() >= 2);\n        return items;\n    };\n\n    QList<ResultInfo> databaseResults;\n    if (!enabledCollections.isEmpty()) {\n        std::optional<std::pair<int, QString>> query;\n        {\n            // Find the prompt that represents the query. Server chats are flexible and may not have one.\n            auto items = getChat();\n            if (auto peer = m_chatModel->getPeer(items, items.end() - 1)) // peer of response\n                query = { (*peer)->index().value(), (*peer)->content() };\n        }\n\n        if (query) {\n            auto &[promptIndex, queryStr] = *query;\n            const int retrievalSize = MySettings::globalInstance()->localDocsRetrievalSize();\n            emit requestRetrieveFromDB(enabledCollections, queryStr, retrievalSize, &databaseResults); // blocks\n            m_chatModel->updateSources(promptIndex, databaseResults);\n            emit databaseResultsChanged(databaseResults);\n        }\n    }\n\n    auto messageItems = getChat();\n    messageItems.pop_back(); // exclude new response\n\n    auto result = promptInternal(messageItems, ctx, !databaseResults.isEmpty());\n    return {\n        /*PromptResult*/ {\n            .response       = std::move(result.response),\n            .promptTokens   = result.promptTokens,\n            .responseTokens = result.responseTokens,\n        },\n        /*databaseResults*/ std::move(databaseResults),\n    };\n}\n\nclass ChatViewResponseHandler : public BaseResponseHandler {\npublic:\n    ChatViewResponseHandler(ChatLLM *cllm, QElapsedTimer *totalTime, ChatLLM::PromptResult *result)\n        : m_cllm(cllm), m_totalTime(totalTime), m_result(result) {}\n\n    void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override\n    {\n        if (startTag == ToolCallConstants::ThinkStartTag)\n            m_cllm->m_chatModel->splitThinking({ firstBuffer, secondBuffer });\n        else\n            m_cllm->m_chatModel->splitToolCall({ firstBuffer, secondBuffer });\n    }\n\n    void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override\n    {\n        m_cllm->m_chatModel->endThinking({ secondBuffer, thirdBuffer }, m_totalTime->elapsed());\n    }\n\n    void onOldResponseChunk(const QByteArray &chunk) override\n    {\n        m_result->responseTokens++;\n        m_cllm->m_timer->inc();\n        m_result->response.append(chunk);\n    }\n\n    bool onBufferResponse(const QString &response, int bufferIdx) override\n    {\n        Q_UNUSED(bufferIdx)\n        try {\n            QString r = response;\n            m_cllm->m_chatModel->setResponseValue(removeLeadingWhitespace(r));\n        } catch (const std::exception &e) {\n            // We have a try/catch here because the main thread might have removed the response from\n            // the chatmodel by erasing the conversation during the response... the main thread sets\n            // m_stopGenerating before doing so, but it doesn't wait after that to reset the chatmodel\n            Q_ASSERT(m_cllm->m_stopGenerating);\n            return false;\n        }\n        emit m_cllm->responseChanged();\n        return true;\n    }\n\n    bool onRegularResponse() override\n    {\n        auto respStr = QString::fromUtf8(m_result->response);\n        return onBufferResponse(respStr, 0);\n    }\n\n    bool getStopGenerating() const override\n    { return m_cllm->m_stopGenerating; }\n\nprivate:\n    ChatLLM               *m_cllm;\n    QElapsedTimer         *m_totalTime;\n    ChatLLM::PromptResult *m_result;\n};\n\nauto ChatLLM::promptInternal(\n    const std::variant<std::span<const MessageItem>, std::string_view> &prompt,\n    const LLModel::PromptContext &ctx,\n    bool usedLocalDocs\n) -> PromptResult\n{\n    Q_ASSERT(isModelLoaded());\n\n    auto *mySettings = MySettings::globalInstance();\n\n    // unpack prompt argument\n    const std::span<const MessageItem> *messageItems = nullptr;\n    std::string                      jinjaBuffer;\n    std::string_view                 conversation;\n    if (auto *nonChat = std::get_if<std::string_view>(&prompt)) {\n        conversation = *nonChat; // complete the string without a template\n    } else {\n        messageItems    = &std::get<std::span<const MessageItem>>(prompt);\n        jinjaBuffer  = applyJinjaTemplate(*messageItems);\n        conversation = jinjaBuffer;\n    }\n\n    // check for overlength last message\n    if (!dynamic_cast<const ChatAPI *>(m_llModelInfo.model.get())) {\n        auto nCtx = m_llModelInfo.model->contextLength();\n        std::string jinjaBuffer2;\n        auto lastMessageRendered = (messageItems && messageItems->size() > 1)\n            ? std::string_view(jinjaBuffer2 = applyJinjaTemplate({ &messageItems->back(), 1 }))\n            : conversation;\n        int32_t lastMessageLength = m_llModelInfo.model->countPromptTokens(lastMessageRendered);\n        if (auto limit = nCtx - 4; lastMessageLength > limit) {\n            throw std::invalid_argument(\n                tr(\"Your message was too long and could not be processed (%1 > %2). \"\n                   \"Please try again with something shorter.\").arg(lastMessageLength).arg(limit).toUtf8().constData()\n            );\n        }\n    }\n\n    PromptResult result {};\n\n    auto handlePrompt = [this, &result](std::span<const LLModel::Token> batch, bool cached) -> bool {\n        Q_UNUSED(cached)\n        result.promptTokens += batch.size();\n        m_timer->start();\n        return !m_stopGenerating;\n    };\n\n    QElapsedTimer totalTime;\n    totalTime.start();\n    ChatViewResponseHandler respHandler(this, &totalTime, &result);\n\n    m_timer->start();\n    QStringList finalBuffers;\n    bool        shouldExecuteTool;\n    try {\n        emit promptProcessing();\n        m_llModelInfo.model->setThreadCount(mySettings->threadCount());\n        m_stopGenerating = false;\n        std::tie(finalBuffers, shouldExecuteTool) = promptModelWithTools(\n            m_llModelInfo.model.get(), handlePrompt, respHandler, ctx,\n            QByteArray::fromRawData(conversation.data(), conversation.size()),\n            ToolCallConstants::AllTagNames\n        );\n    } catch (...) {\n        m_timer->stop();\n        throw;\n    }\n\n    m_timer->stop();\n    qint64 elapsed = totalTime.elapsed();\n\n    // trim trailing whitespace\n    auto respStr = QString::fromUtf8(result.response);\n    if (!respStr.isEmpty() && (std::as_const(respStr).back().isSpace() || finalBuffers.size() > 1)) {\n        if (finalBuffers.size() > 1)\n            m_chatModel->setResponseValue(finalBuffers.last().trimmed());\n        else\n            m_chatModel->setResponseValue(respStr.trimmed());\n        emit responseChanged();\n    }\n\n    bool doQuestions = false;\n    if (!m_isServer && messageItems && !shouldExecuteTool) {\n        switch (mySettings->suggestionMode()) {\n            case SuggestionMode::On:            doQuestions = true;          break;\n            case SuggestionMode::LocalDocsOnly: doQuestions = usedLocalDocs; break;\n            case SuggestionMode::Off:           ;\n        }\n    }\n    if (doQuestions)\n        generateQuestions(elapsed);\n    else\n        emit responseStopped(elapsed);\n\n    return result;\n}\n\nvoid ChatLLM::setShouldBeLoaded(bool b)\n{\n#if defined(DEBUG_MODEL_LOADING)\n    qDebug() << \"setShouldBeLoaded\" << m_llmThread.objectName() << b << m_llModelInfo.model.get();\n#endif\n    m_shouldBeLoaded = b; // atomic\n    emit shouldBeLoadedChanged();\n}\n\nvoid ChatLLM::requestTrySwitchContext()\n{\n    m_shouldBeLoaded = true; // atomic\n    emit trySwitchContextRequested(modelInfo());\n}\n\nvoid ChatLLM::handleShouldBeLoadedChanged()\n{\n    if (m_shouldBeLoaded)\n        reloadModel();\n    else\n        unloadModel();\n}\n\nvoid ChatLLM::unloadModel()\n{\n    if (!isModelLoaded() || m_isServer)\n        return;\n\n    if (!m_forceUnloadModel || !m_shouldBeLoaded)\n        emit modelLoadingPercentageChanged(0.0f);\n    else\n        emit modelLoadingPercentageChanged(std::numeric_limits<float>::min()); // small non-zero positive value\n\n#if defined(DEBUG_MODEL_LOADING)\n    qDebug() << \"unloadModel\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n\n    if (m_forceUnloadModel) {\n        m_llModelInfo.resetModel(this);\n        m_forceUnloadModel = false;\n    }\n\n    LLModelStore::globalInstance()->releaseModel(std::move(m_llModelInfo));\n}\n\nvoid ChatLLM::reloadModel()\n{\n    if (isModelLoaded() && m_forceUnloadModel)\n        unloadModel(); // we unload first if we are forcing an unload\n\n    if (isModelLoaded() || m_isServer)\n        return;\n\n#if defined(DEBUG_MODEL_LOADING)\n    qDebug() << \"reloadModel\" << m_llmThread.objectName() << m_llModelInfo.model.get();\n#endif\n    const ModelInfo m = modelInfo();\n    if (m.name().isEmpty())\n        loadDefaultModel();\n    else\n        loadModel(m);\n}\n\n// This class throws discards the text within thinking tags, for use with chat names and follow-up questions.\nclass SimpleResponseHandler : public BaseResponseHandler {\npublic:\n    SimpleResponseHandler(ChatLLM *cllm)\n        : m_cllm(cllm) {}\n\n    void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override\n    { /* no-op */ }\n\n    void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override\n    { /* no-op */ }\n\n    void onOldResponseChunk(const QByteArray &chunk) override\n    { m_response.append(chunk); }\n\n    bool onBufferResponse(const QString &response, int bufferIdx) override\n    {\n        if (bufferIdx == 1)\n            return true; // ignore \"think\" content\n        return onSimpleResponse(response);\n    }\n\n    bool onRegularResponse() override\n    { return onBufferResponse(QString::fromUtf8(m_response), 0); }\n\n    bool getStopGenerating() const override\n    { return m_cllm->m_stopGenerating; }\n\nprotected:\n    virtual bool onSimpleResponse(const QString &response) = 0;\n\nprotected:\n    ChatLLM    *m_cllm;\n    QByteArray  m_response;\n};\n\nclass NameResponseHandler : public SimpleResponseHandler {\nprivate:\n    // max length of chat names, in words\n    static constexpr qsizetype MAX_WORDS = 3;\n\npublic:\n    using SimpleResponseHandler::SimpleResponseHandler;\n\nprotected:\n    bool onSimpleResponse(const QString &response) override\n    {\n        QTextStream stream(const_cast<QString *>(&response), QIODeviceBase::ReadOnly);\n        QStringList words;\n        while (!stream.atEnd() && words.size() < MAX_WORDS) {\n            QString word;\n            stream >> word;\n            words << word;\n        }\n\n        emit m_cllm->generatedNameChanged(words.join(u' '));\n        return words.size() < MAX_WORDS || stream.atEnd();\n    }\n};\n\nvoid ChatLLM::generateName()\n{\n    Q_ASSERT(isModelLoaded());\n    if (!isModelLoaded() || m_isServer)\n        return;\n\n    Q_ASSERT(m_chatModel);\n\n    auto *mySettings = MySettings::globalInstance();\n\n    const QString chatNamePrompt = mySettings->modelChatNamePrompt(m_modelInfo);\n    if (isAllSpace(chatNamePrompt)) {\n        qWarning() << \"ChatLLM: not generating chat name because prompt is empty\";\n        return;\n    }\n\n    NameResponseHandler respHandler(this);\n\n    try {\n        promptModelWithTools(\n            m_llModelInfo.model.get(),\n            /*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },\n            respHandler, promptContextFromSettings(m_modelInfo),\n            applyJinjaTemplate(forkConversation(chatNamePrompt)).c_str(),\n            { ToolCallConstants::ThinkTagName }\n        );\n    } catch (const std::exception &e) {\n        qWarning() << \"ChatLLM failed to generate name:\" << e.what();\n    }\n}\n\nvoid ChatLLM::handleChatIdChanged(const QString &id)\n{\n    m_llmThread.setObjectName(id);\n}\n\nclass QuestionResponseHandler : public SimpleResponseHandler {\npublic:\n    using SimpleResponseHandler::SimpleResponseHandler;\n\nprotected:\n    bool onSimpleResponse(const QString &response) override\n    {\n        auto responseUtf8Bytes = response.toUtf8().slice(m_offset);\n        auto responseUtf8 = std::string(responseUtf8Bytes.begin(), responseUtf8Bytes.end());\n        // extract all questions from response\n        ptrdiff_t lastMatchEnd = -1;\n        auto it = std::sregex_iterator(responseUtf8.begin(), responseUtf8.end(), s_reQuestion);\n        auto end = std::sregex_iterator();\n        for (; it != end; ++it) {\n            auto pos = it->position();\n            auto len = it->length();\n            lastMatchEnd = pos + len;\n            emit m_cllm->generatedQuestionFinished(QString::fromUtf8(&responseUtf8[pos], len));\n        }\n\n        // remove processed input from buffer\n        if (lastMatchEnd != -1)\n            m_offset += lastMatchEnd;\n        return true;\n    }\n\nprivate:\n    // FIXME: This only works with response by the model in english which is not ideal for a multi-language\n    // model.\n    // match whole question sentences\n    static inline const std::regex s_reQuestion { R\"(\\b(?:What|Where|How|Why|When|Who|Which|Whose|Whom)\\b[^?]*\\?)\" };\n\n    qsizetype m_offset = 0;\n};\n\nvoid ChatLLM::generateQuestions(qint64 elapsed)\n{\n    Q_ASSERT(isModelLoaded());\n    if (!isModelLoaded()) {\n        emit responseStopped(elapsed);\n        return;\n    }\n\n    auto *mySettings = MySettings::globalInstance();\n\n    QString suggestedFollowUpPrompt = mySettings->modelSuggestedFollowUpPrompt(m_modelInfo);\n    if (isAllSpace(suggestedFollowUpPrompt)) {\n        qWarning() << \"ChatLLM: not generating follow-up questions because prompt is empty\";\n        emit responseStopped(elapsed);\n        return;\n    }\n\n    emit generatingQuestions();\n\n    QuestionResponseHandler respHandler(this);\n\n    QElapsedTimer totalTime;\n    totalTime.start();\n    try {\n        promptModelWithTools(\n            m_llModelInfo.model.get(),\n            /*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },\n            respHandler, promptContextFromSettings(m_modelInfo),\n            applyJinjaTemplate(forkConversation(suggestedFollowUpPrompt)).c_str(),\n            { ToolCallConstants::ThinkTagName }\n        );\n    } catch (const std::exception &e) {\n        qWarning() << \"ChatLLM failed to generate follow-up questions:\" << e.what();\n    }\n    elapsed += totalTime.elapsed();\n    emit responseStopped(elapsed);\n}\n\n// this function serialized the cached model state to disk.\n// we want to also serialize n_ctx, and read it at load time.\nbool ChatLLM::serialize(QDataStream &stream, int version)\n{\n    if (version < 11) {\n        if (version >= 6) {\n            stream << false; // serializeKV\n        }\n        if (version >= 2) {\n            if (m_llModelType == LLModelTypeV1::NONE) {\n                qWarning() << \"ChatLLM ERROR: attempted to serialize a null model for chat id\" << m_chat->id()\n                           << \"name\" << m_chat->name();\n                return false;\n            }\n            stream << m_llModelType;\n            stream << 0; // state version\n        }\n        {\n            QString dummy;\n            stream << dummy; // response\n            stream << dummy; // generated name\n        }\n        stream << quint32(0); // prompt + response tokens\n\n        if (version < 6) { // serialize binary state\n            if (version < 4) {\n                stream << 0; // responseLogits\n            }\n            stream << int32_t(0); // n_past\n            stream << quint64(0); // input token count\n            stream << QByteArray(); // KV cache state\n        }\n    }\n    return stream.status() == QDataStream::Ok;\n}\n\nbool ChatLLM::deserialize(QDataStream &stream, int version)\n{\n    // discard all state since we are initialized from the ChatModel as of v11\n    if (version < 11) {\n        union { int intval; quint32 u32; quint64 u64; };\n\n        bool deserializeKV = true;\n        if (version >= 6)\n            stream >> deserializeKV;\n\n        if (version >= 2) {\n            stream >> intval; // model type\n            auto llModelType = (version >= 6 ? parseLLModelTypeV1 : parseLLModelTypeV0)(intval);\n            if (llModelType == LLModelTypeV1::NONE) {\n                qWarning().nospace() << \"error loading chat id \" << m_chat->id() << \": unrecognized model type: \"\n                                     << intval;\n                return false;\n            }\n\n            /* note: prior to chat version 10, API models and chats with models removed in v2.5.0 only wrote this because of\n             * undefined behavior in Release builds */\n            stream >> intval; // state version\n            if (intval) {\n                qWarning().nospace() << \"error loading chat id \" << m_chat->id() << \": unrecognized internal state version\";\n                return false;\n            }\n        }\n\n        {\n            QString dummy;\n            stream >> dummy; // response\n            stream >> dummy; // name response\n        }\n        stream >> u32; // prompt + response token count\n\n        // We don't use the raw model state anymore.\n        if (deserializeKV) {\n            if (version < 4) {\n                stream >> u32; // response logits\n            }\n            stream >> u32; // n_past\n            if (version >= 7) {\n                stream >> u32; // n_ctx\n            }\n            if (version < 9) {\n                stream >> u64; // logits size\n                stream.skipRawData(u64 * sizeof(float)); // logits\n            }\n            stream >> u64; // token cache size\n            stream.skipRawData(u64 * sizeof(int)); // token cache\n            QByteArray dummy;\n            stream >> dummy; // state\n        }\n    }\n    return stream.status() == QDataStream::Ok;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chatllm.h",
    "content": "#ifndef CHATLLM_H\n#define CHATLLM_H\n\n#include \"chatmodel.h\"\n#include \"database.h\"\n#include \"modellist.h\"\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QByteArray>\n#include <QElapsedTimer>\n#include <QFileInfo>\n#include <QList>\n#include <QObject>\n#include <QPointer>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QThread>\n#include <QVariantMap> // IWYU pragma: keep\n#include <QtNumeric>\n\n#include <atomic>\n#include <memory>\n#include <optional>\n#include <span>\n#include <string>\n#include <string_view>\n#include <variant>\n#include <vector>\n\nusing namespace Qt::Literals::StringLiterals;\n\nclass ChatLLM;\nclass QDataStream;\n\n\n// NOTE: values serialized to disk, do not change or reuse\nenum class LLModelTypeV0 { // chat versions 2-5\n    MPT       = 0,\n    GPTJ      = 1,\n    LLAMA     = 2,\n    CHATGPT   = 3,\n    REPLIT    = 4,\n    FALCON    = 5,\n    BERT      = 6, // not used\n    STARCODER = 7,\n};\nenum class LLModelTypeV1 { // since chat version 6 (v2.5.0)\n    GPTJ      = 0, // not for new chats\n    LLAMA     = 1,\n    API       = 2,\n    BERT      = 3, // not used\n    // none of the below are used in new chats\n    REPLIT    = 4,\n    FALCON    = 5,\n    MPT       = 6,\n    STARCODER = 7,\n    NONE      = -1, // no state\n};\n\ninline LLModelTypeV1 parseLLModelTypeV1(int type)\n{\n    switch (LLModelTypeV1(type)) {\n    case LLModelTypeV1::GPTJ:\n    case LLModelTypeV1::LLAMA:\n    case LLModelTypeV1::API:\n    // case LLModelTypeV1::BERT: -- not used\n    case LLModelTypeV1::REPLIT:\n    case LLModelTypeV1::FALCON:\n    case LLModelTypeV1::MPT:\n    case LLModelTypeV1::STARCODER:\n        return LLModelTypeV1(type);\n    default:\n        return LLModelTypeV1::NONE;\n    }\n}\n\ninline LLModelTypeV1 parseLLModelTypeV0(int v0)\n{\n    switch (LLModelTypeV0(v0)) {\n    case LLModelTypeV0::MPT:       return LLModelTypeV1::MPT;\n    case LLModelTypeV0::GPTJ:      return LLModelTypeV1::GPTJ;\n    case LLModelTypeV0::LLAMA:     return LLModelTypeV1::LLAMA;\n    case LLModelTypeV0::CHATGPT:   return LLModelTypeV1::API;\n    case LLModelTypeV0::REPLIT:    return LLModelTypeV1::REPLIT;\n    case LLModelTypeV0::FALCON:    return LLModelTypeV1::FALCON;\n    // case LLModelTypeV0::BERT: -- not used\n    case LLModelTypeV0::STARCODER: return LLModelTypeV1::STARCODER;\n    default:                       return LLModelTypeV1::NONE;\n    }\n}\n\nstruct LLModelInfo {\n    std::unique_ptr<LLModel> model;\n    QFileInfo fileInfo;\n    std::optional<QString> fallbackReason;\n\n    // NOTE: This does not store the model type or name on purpose as this is left for ChatLLM which\n    // must be able to serialize the information even if it is in the unloaded state\n\n    void resetModel(ChatLLM *cllm, LLModel *model = nullptr);\n};\n\nclass TokenTimer : public QObject {\n    Q_OBJECT\npublic:\n    explicit TokenTimer(QObject *parent)\n        : QObject(parent)\n        , m_elapsed(0) {}\n\n    static int rollingAverage(int oldAvg, int newNumber, int n)\n    {\n        // i.e. to calculate the new average after then nth number,\n        // you multiply the old average by n−1, add the new number, and divide the total by n.\n        return qRound(((float(oldAvg) * (n - 1)) + newNumber) / float(n));\n    }\n\n    void start() { m_tokens = 0; m_elapsed = 0; m_time.invalidate(); }\n    void stop() { handleTimeout(); }\n    void inc() {\n        if (!m_time.isValid())\n            m_time.start();\n        ++m_tokens;\n        if (m_time.elapsed() > 999)\n            handleTimeout();\n    }\n\nQ_SIGNALS:\n    void report(const QString &speed);\n\nprivate Q_SLOTS:\n    void handleTimeout()\n    {\n        m_elapsed += m_time.restart();\n        emit report(u\"%1 tokens/sec\"_s.arg(m_tokens / float(m_elapsed / 1000.0f), 0, 'g', 2));\n    }\n\nprivate:\n    QElapsedTimer m_time;\n    qint64 m_elapsed;\n    quint32 m_tokens;\n};\n\nclass Chat;\nclass ChatLLM : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(QString deviceBackend READ deviceBackend NOTIFY loadedModelInfoChanged)\n    Q_PROPERTY(QString device READ device NOTIFY loadedModelInfoChanged)\n    Q_PROPERTY(QString fallbackReason READ fallbackReason NOTIFY loadedModelInfoChanged)\npublic:\n    ChatLLM(Chat *parent, bool isServer = false);\n    virtual ~ChatLLM();\n\n    static void destroyStore();\n    static std::optional<std::string> checkJinjaTemplateError(const std::string &source);\n\n    void destroy();\n    bool isModelLoaded() const;\n    void regenerateResponse(int index);\n    // used to implement edit functionality\n    std::optional<QString> popPrompt(int index);\n\n    void stopGenerating() { m_stopGenerating = true; }\n\n    bool shouldBeLoaded() const { return m_shouldBeLoaded; }\n    void setShouldBeLoaded(bool b);\n    void requestTrySwitchContext();\n    void setForceUnloadModel(bool b) { m_forceUnloadModel = b; }\n    void setMarkedForDeletion(bool b) { m_markedForDeletion = b; }\n\n    ModelInfo modelInfo() const;\n    void setModelInfo(const ModelInfo &info);\n\n    void acquireModel();\n    void resetModel();\n\n    QString deviceBackend() const\n    {\n        if (!isModelLoaded()) return QString();\n        std::string name = LLModel::GPUDevice::backendIdToName(m_llModelInfo.model->backendName());\n        return QString::fromStdString(name);\n    }\n\n    QString device() const\n    {\n        if (!isModelLoaded()) return QString();\n        const char *name = m_llModelInfo.model->gpuDeviceName();\n        return name ? QString(name) : u\"CPU\"_s;\n    }\n\n    // not loaded -> QString(), no fallback -> QString(\"\")\n    QString fallbackReason() const\n    {\n        if (!isModelLoaded()) return QString();\n        return m_llModelInfo.fallbackReason.value_or(u\"\"_s);\n    }\n\n    bool serialize(QDataStream &stream, int version);\n    bool deserialize(QDataStream &stream, int version);\n\npublic Q_SLOTS:\n    void prompt(const QStringList &enabledCollections);\n    bool loadDefaultModel();\n    void trySwitchContextOfLoadedModel(const ModelInfo &modelInfo);\n    bool loadModel(const ModelInfo &modelInfo);\n    void modelChangeRequested(const ModelInfo &modelInfo);\n    void unloadModel();\n    void reloadModel();\n    void generateName();\n    void handleChatIdChanged(const QString &id);\n    void handleShouldBeLoadedChanged();\n    void handleThreadStarted();\n    void handleForceMetalChanged(bool forceMetal);\n    void handleDeviceChanged();\n\nQ_SIGNALS:\n    void loadedModelInfoChanged();\n    void modelLoadingPercentageChanged(float);\n    void modelLoadingError(const QString &error);\n    void modelLoadingWarning(const QString &warning);\n    void responseChanged();\n    void responseFailed();\n    void promptProcessing();\n    void generatingQuestions();\n    void responseStopped(qint64 promptResponseMs);\n    void generatedNameChanged(const QString &name);\n    void generatedQuestionFinished(const QString &generatedQuestion);\n    void stateChanged();\n    void threadStarted();\n    void shouldBeLoadedChanged();\n    void trySwitchContextRequested(const ModelInfo &modelInfo);\n    void trySwitchContextOfLoadedModelCompleted(int value);\n    void requestRetrieveFromDB(const QList<QString> &collections, const QString &text, int retrievalSize, QList<ResultInfo> *results);\n    void reportSpeed(const QString &speed);\n    void reportDevice(const QString &device);\n    void reportFallbackReason(const QString &fallbackReason);\n    void databaseResultsChanged(const QList<ResultInfo>&);\n    void modelInfoChanged(const ModelInfo &modelInfo);\n\nprotected:\n    struct PromptResult {\n        QByteArray response;       // raw UTF-8\n        int        promptTokens;   // note: counts *entire* history, even if cached\n        int        responseTokens;\n    };\n\n    struct ChatPromptResult : PromptResult {\n        QList<ResultInfo> databaseResults;\n    };\n\n    ChatPromptResult promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,\n                                        qsizetype startOffset = 0);\n    // passing a string_view directly skips templating and uses the raw string\n    PromptResult promptInternal(const std::variant<std::span<const MessageItem>, std::string_view> &prompt,\n                                const LLModel::PromptContext &ctx,\n                                bool usedLocalDocs);\n\nprivate:\n    bool loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadProps);\n\n    std::vector<MessageItem> forkConversation(const QString &prompt) const;\n\n    // Applies the Jinja template. Query mode returns only the last message without special tokens.\n    // Returns a (# of messages, rendered prompt) pair.\n    std::string applyJinjaTemplate(std::span<const MessageItem> items) const;\n\n    void generateQuestions(qint64 elapsed);\n\nprotected:\n    QPointer<ChatModel> m_chatModel;\n\nprivate:\n    const Chat *m_chat;\n    LLModelInfo m_llModelInfo;\n    LLModelTypeV1 m_llModelType = LLModelTypeV1::NONE;\n    ModelInfo m_modelInfo;\n    TokenTimer *m_timer;\n    QThread m_llmThread;\n    std::atomic<bool> m_stopGenerating;\n    std::atomic<bool> m_shouldBeLoaded;\n    std::atomic<bool> m_forceUnloadModel;\n    std::atomic<bool> m_markedForDeletion;\n    bool m_isServer;\n    bool m_forceMetal;\n    bool m_reloadingToChangeVariant;\n    friend class ChatViewResponseHandler;\n    friend class SimpleResponseHandler;\n};\n\n#endif // CHATLLM_H\n"
  },
  {
    "path": "gpt4all-chat/src/chatmodel.cpp",
    "content": "#include \"chatmodel.h\"\n\n#include <QDebug>\n#include <QMap>\n#include <QTextStream>\n#include <QtLogging>\n\n#include <exception>\n\n\nQList<ResultInfo> ChatItem::consolidateSources(const QList<ResultInfo> &sources)\n{\n    QMap<QString, ResultInfo> groupedData;\n    for (const ResultInfo &info : sources) {\n        if (groupedData.contains(info.file)) {\n            groupedData[info.file].text += \"\\n---\\n\" + info.text;\n        } else {\n            groupedData[info.file] = info;\n        }\n    }\n    QList<ResultInfo> consolidatedSources = groupedData.values();\n    return consolidatedSources;\n}\n\nvoid ChatItem::serializeResponse(QDataStream &stream, int version)\n{\n    stream << value;\n}\n\nvoid ChatItem::serializeToolCall(QDataStream &stream, int version)\n{\n    stream << value;\n    toolCallInfo.serialize(stream, version);\n}\n\nvoid ChatItem::serializeToolResponse(QDataStream &stream, int version)\n{\n    stream << value;\n}\n\nvoid ChatItem::serializeText(QDataStream &stream, int version)\n{\n    stream << value;\n}\n\nvoid ChatItem::serializeThink(QDataStream &stream, int version)\n{\n    stream << value;\n    stream << thinkingTime;\n}\n\nvoid ChatItem::serializeSubItems(QDataStream &stream, int version)\n{\n    stream << name;\n    switch (auto typ = type()) {\n        using enum ChatItem::Type;\n        case Response:      { serializeResponse(stream, version);       break; }\n        case ToolCall:      { serializeToolCall(stream, version);       break; }\n        case ToolResponse:  { serializeToolResponse(stream, version);   break; }\n        case Text:          { serializeText(stream, version);           break; }\n        case Think:         { serializeThink(stream, version);          break; }\n        case System:\n        case Prompt:\n            throw std::invalid_argument(fmt::format(\"cannot serialize subitem type {}\", int(typ)));\n    }\n\n    stream << qsizetype(subItems.size());\n    for (ChatItem *item :subItems)\n        item->serializeSubItems(stream, version);\n}\n\nvoid ChatItem::serialize(QDataStream &stream, int version)\n{\n    stream << name;\n    stream << value;\n    stream << newResponse;\n    stream << isCurrentResponse;\n    stream << stopped;\n    stream << thumbsUpState;\n    stream << thumbsDownState;\n    if (version >= 11 && type() == ChatItem::Type::Response)\n        stream << isError;\n    if (version >= 8) {\n        stream << sources.size();\n        for (const ResultInfo &info : sources) {\n            Q_ASSERT(!info.file.isEmpty());\n            stream << info.collection;\n            stream << info.path;\n            stream << info.file;\n            stream << info.title;\n            stream << info.author;\n            stream << info.date;\n            stream << info.text;\n            stream << info.page;\n            stream << info.from;\n            stream << info.to;\n        }\n    } else if (version >= 3) {\n        QList<QString> references;\n        QList<QString> referencesContext;\n        int validReferenceNumber = 1;\n        for (const ResultInfo &info : sources) {\n            if (info.file.isEmpty())\n                continue;\n\n            QString reference;\n            {\n                QTextStream stream(&reference);\n                stream << (validReferenceNumber++) << \". \";\n                if (!info.title.isEmpty())\n                    stream << \"\\\"\" << info.title << \"\\\". \";\n                if (!info.author.isEmpty())\n                    stream << \"By \" << info.author << \". \";\n                if (!info.date.isEmpty())\n                    stream << \"Date: \" << info.date << \". \";\n                stream << \"In \" << info.file << \". \";\n                if (info.page != -1)\n                    stream << \"Page \" << info.page << \". \";\n                if (info.from != -1) {\n                    stream << \"Lines \" << info.from;\n                    if (info.to != -1)\n                        stream << \"-\" << info.to;\n                    stream << \". \";\n                }\n                stream << \"[Context](context://\" << validReferenceNumber - 1 << \")\";\n            }\n            references.append(reference);\n            referencesContext.append(info.text);\n        }\n\n        stream << references.join(\"\\n\");\n        stream << referencesContext;\n    }\n    if (version >= 10) {\n        stream << promptAttachments.size();\n        for (const PromptAttachment &a : promptAttachments) {\n            Q_ASSERT(!a.url.isEmpty());\n            stream << a.url;\n            stream << a.content;\n        }\n    }\n\n    if (version >= 12) {\n        stream << qsizetype(subItems.size());\n        for (ChatItem *item :subItems)\n            item->serializeSubItems(stream, version);\n    }\n}\n\nbool ChatItem::deserializeToolCall(QDataStream &stream, int version)\n{\n    stream >> value;\n    return toolCallInfo.deserialize(stream, version);;\n}\n\nbool ChatItem::deserializeToolResponse(QDataStream &stream, int version)\n{\n    stream >> value;\n    return true;\n}\n\nbool ChatItem::deserializeText(QDataStream &stream, int version)\n{\n    stream >> value;\n    return true;\n}\n\nbool ChatItem::deserializeResponse(QDataStream &stream, int version)\n{\n    stream >> value;\n    return true;\n}\n\nbool ChatItem::deserializeThink(QDataStream &stream, int version)\n{\n    stream >> value;\n    stream >> thinkingTime;\n    return true;\n}\n\nbool ChatItem::deserializeSubItems(QDataStream &stream, int version)\n{\n    stream >> name;\n    try {\n        type(); // check name\n    } catch (const std::exception &e) {\n        qWarning() << \"ChatModel ERROR:\" << e.what();\n        return false;\n    }\n    switch (auto typ = type()) {\n        using enum ChatItem::Type;\n        case Response:      { deserializeResponse(stream, version); break; }\n        case ToolCall:      { deserializeToolCall(stream, version); break; }\n        case ToolResponse:  { deserializeToolResponse(stream, version); break; }\n        case Text:          { deserializeText(stream, version); break; }\n        case Think:         { deserializeThink(stream, version); break; }\n        case System:\n        case Prompt:\n            throw std::invalid_argument(fmt::format(\"cannot serialize subitem type {}\", int(typ)));\n    }\n\n    qsizetype count;\n    stream >> count;\n    for (int i = 0; i < count; ++i) {\n        ChatItem *c = new ChatItem(this);\n        if (!c->deserializeSubItems(stream, version)) {\n            delete c;\n            return false;\n        }\n        subItems.push_back(c);\n    }\n\n    return true;\n}\n\nbool ChatItem::deserialize(QDataStream &stream, int version)\n{\n    if (version < 12) {\n        int id;\n        stream >> id;\n    }\n    stream >> name;\n    try {\n        type(); // check name\n    } catch (const std::exception &e) {\n        qWarning() << \"ChatModel ERROR:\" << e.what();\n        return false;\n    }\n    stream >> value;\n    if (version < 10) {\n        // This is deprecated and no longer used\n        QString prompt;\n        stream >> prompt;\n    }\n    stream >> newResponse;\n    stream >> isCurrentResponse;\n    stream >> stopped;\n    stream >> thumbsUpState;\n    stream >> thumbsDownState;\n    if (version >= 11 && type() == ChatItem::Type::Response)\n        stream >> isError;\n    if (version >= 8) {\n        qsizetype count;\n        stream >> count;\n        for (int i = 0; i < count; ++i) {\n            ResultInfo info;\n            stream >> info.collection;\n            stream >> info.path;\n            stream >> info.file;\n            stream >> info.title;\n            stream >> info.author;\n            stream >> info.date;\n            stream >> info.text;\n            stream >> info.page;\n            stream >> info.from;\n            stream >> info.to;\n            sources.append(info);\n        }\n        consolidatedSources = ChatItem::consolidateSources(sources);\n    } else if (version >= 3) {\n        QString references;\n        QList<QString> referencesContext;\n        stream >> references;\n        stream >> referencesContext;\n\n        if (!references.isEmpty()) {\n            QList<QString> referenceList = references.split(\"\\n\");\n\n            // Ignore empty lines and those that begin with \"---\" which is no longer used\n            for (auto it = referenceList.begin(); it != referenceList.end();) {\n                if (it->trimmed().isEmpty() || it->trimmed().startsWith(\"---\"))\n                    it = referenceList.erase(it);\n                else\n                    ++it;\n            }\n\n            Q_ASSERT(referenceList.size() == referencesContext.size());\n            for (int j = 0; j < referenceList.size(); ++j) {\n                QString reference = referenceList[j];\n                QString context = referencesContext[j];\n                ResultInfo info;\n                QTextStream refStream(&reference);\n                QString dummy;\n                int validReferenceNumber;\n                refStream >> validReferenceNumber >> dummy;\n                // Extract title (between quotes)\n                if (reference.contains(\"\\\"\")) {\n                    int startIndex = reference.indexOf('\"') + 1;\n                    int endIndex = reference.indexOf('\"', startIndex);\n                    info.title = reference.mid(startIndex, endIndex - startIndex);\n                }\n\n                // Extract author (after \"By \" and before the next period)\n                if (reference.contains(\"By \")) {\n                    int startIndex = reference.indexOf(\"By \") + 3;\n                    int endIndex = reference.indexOf('.', startIndex);\n                    info.author = reference.mid(startIndex, endIndex - startIndex).trimmed();\n                }\n\n                // Extract date (after \"Date: \" and before the next period)\n                if (reference.contains(\"Date: \")) {\n                    int startIndex = reference.indexOf(\"Date: \") + 6;\n                    int endIndex = reference.indexOf('.', startIndex);\n                    info.date = reference.mid(startIndex, endIndex - startIndex).trimmed();\n                }\n\n                // Extract file name (after \"In \" and before the \"[Context]\")\n                if (reference.contains(\"In \") && reference.contains(\". [Context]\")) {\n                    int startIndex = reference.indexOf(\"In \") + 3;\n                    int endIndex = reference.indexOf(\". [Context]\", startIndex);\n                    info.file = reference.mid(startIndex, endIndex - startIndex).trimmed();\n                }\n\n                // Extract page number (after \"Page \" and before the next space)\n                if (reference.contains(\"Page \")) {\n                    int startIndex = reference.indexOf(\"Page \") + 5;\n                    int endIndex = reference.indexOf(' ', startIndex);\n                    if (endIndex == -1) endIndex = reference.length();\n                    info.page = reference.mid(startIndex, endIndex - startIndex).toInt();\n                }\n\n                // Extract lines (after \"Lines \" and before the next space or hyphen)\n                if (reference.contains(\"Lines \")) {\n                    int startIndex = reference.indexOf(\"Lines \") + 6;\n                    int endIndex = reference.indexOf(' ', startIndex);\n                    if (endIndex == -1) endIndex = reference.length();\n                    int hyphenIndex = reference.indexOf('-', startIndex);\n                    if (hyphenIndex != -1 && hyphenIndex < endIndex) {\n                        info.from = reference.mid(startIndex, hyphenIndex - startIndex).toInt();\n                        info.to = reference.mid(hyphenIndex + 1, endIndex - hyphenIndex - 1).toInt();\n                    } else {\n                        info.from = reference.mid(startIndex, endIndex - startIndex).toInt();\n                    }\n                }\n                info.text = context;\n                sources.append(info);\n            }\n\n            consolidatedSources = ChatItem::consolidateSources(sources);\n        }\n    }\n    if (version >= 10) {\n        qsizetype count;\n        stream >> count;\n        QList<PromptAttachment> attachments;\n        for (int i = 0; i < count; ++i) {\n            PromptAttachment a;\n            stream >> a.url;\n            stream >> a.content;\n            attachments.append(a);\n        }\n        promptAttachments = attachments;\n    }\n\n    if (version >= 12) {\n        qsizetype count;\n        stream >> count;\n        for (int i = 0; i < count; ++i) {\n            ChatItem *c = new ChatItem(this);\n            if (!c->deserializeSubItems(stream, version)) {\n                delete c;\n                return false;\n            }\n            subItems.push_back(c);\n        }\n    }\n    return true;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chatmodel.h",
    "content": "#ifndef CHATMODEL_H\n#define CHATMODEL_H\n\n#include \"database.h\"\n#include \"tool.h\"\n#include \"toolcallparser.h\"\n#include \"utils.h\" // IWYU pragma: keep\n#include \"xlsxtomd.h\"\n\n#include <fmt/format.h>\n\n#include <QAbstractListModel>\n#include <QBuffer>\n#include <QByteArray>\n#include <QClipboard>\n#include <QDataStream>\n#include <QFileInfo>\n#include <QGuiApplication>\n#include <QIODevice>\n#include <QHash>\n#include <QList>\n#include <QMutex>\n#include <QMutexLocker> // IWYU pragma: keep\n#include <QObject>\n#include <QPair> // IWYU pragma: keep\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QUrl>\n#include <QVariant>\n#include <Qt>\n#include <QtAssert>\n#include <QtPreprocessorSupport>\n#include <QtTypes>\n\n#include <algorithm>\n#include <iterator>\n#include <list>\n#include <optional>\n#include <ranges>\n#include <span>\n#include <stdexcept>\n#include <utility>\n#include <vector>\n\nusing namespace Qt::Literals::StringLiterals;\nnamespace ranges = std::ranges;\nnamespace views  = std::views;\n\n\nstruct PromptAttachment {\n    Q_GADGET\n    Q_PROPERTY(QUrl url MEMBER url)\n    Q_PROPERTY(QByteArray content MEMBER content)\n    Q_PROPERTY(QString file READ file)\n    Q_PROPERTY(QString processedContent READ processedContent)\n\npublic:\n    QUrl url;\n    QByteArray content;\n\n    QString file() const\n    {\n        if (!url.isLocalFile())\n            return QString();\n        const QString localFilePath = url.toLocalFile();\n        const QFileInfo info(localFilePath);\n        return info.fileName();\n    }\n\n    QString processedContent() const\n    {\n        const QString localFilePath = url.toLocalFile();\n        const QFileInfo info(localFilePath);\n        if (info.suffix().toLower() != \"xlsx\")\n            return u\"## Attached: %1\\n\\n%2\"_s.arg(file(), content);\n\n        QBuffer buffer;\n        buffer.setData(content);\n        buffer.open(QIODevice::ReadOnly);\n        const QString md = XLSXToMD::toMarkdown(&buffer);\n        buffer.close();\n        return u\"## Attached: %1\\n\\n%2\"_s.arg(file(), md);\n    }\n\n    bool operator==(const PromptAttachment &other) const { return url == other.url; }\n};\nQ_DECLARE_METATYPE(PromptAttachment)\n\n// Used by Server to represent a message from the client.\nstruct MessageInput\n{\n    enum class Type { System, Prompt, Response };\n    Type    type;\n    QString content;\n};\n\nclass MessageItem\n{\n    Q_GADGET\n    Q_PROPERTY(Type    type    READ type    CONSTANT)\n    Q_PROPERTY(QString content READ content CONSTANT)\n\npublic:\n    enum class Type { System, Prompt, Response, ToolResponse };\n\n    struct system_tag_t { explicit system_tag_t() = default; };\n    static inline constexpr system_tag_t system_tag = system_tag_t{};\n\n    MessageItem(qsizetype index, Type type, QString content)\n        : m_index(index), m_type(type), m_content(std::move(content))\n    {\n        Q_ASSERT(type != Type::System); // use system_tag constructor\n    }\n\n    // Construct a system message with no index, since they are never stored in the chat\n    MessageItem(system_tag_t, QString content)\n        : m_type(Type::System), m_content(std::move(content)) {}\n\n    MessageItem(qsizetype index, Type type, QString content, const QList<ResultInfo> &sources, const QList<PromptAttachment> &promptAttachments)\n        : m_index(index)\n        , m_type(type)\n        , m_content(std::move(content))\n        , m_sources(sources)\n        , m_promptAttachments(promptAttachments) {}\n\n    // index of the parent ChatItem (system, prompt, response) in its container\n    std::optional<qsizetype> index()   const { return m_index; }\n\n    Type           type()    const { return m_type;    }\n    const QString &content() const { return m_content; }\n\n    QList<ResultInfo>       sources()           const { return m_sources;           }\n    QList<PromptAttachment> promptAttachments() const { return m_promptAttachments; }\n\n    // used with version 0 Jinja templates\n    QString bakedPrompt() const\n    {\n        if (type() != Type::Prompt)\n            throw std::logic_error(\"bakedPrompt() called on non-prompt item\");\n        QStringList parts;\n        if (!m_sources.isEmpty()) {\n            parts << u\"### Context:\\n\"_s;\n            for (auto &source : std::as_const(m_sources))\n                parts << u\"Collection: \"_s << source.collection\n                      << u\"\\nPath: \"_s     << source.path\n                      << u\"\\nExcerpt: \"_s  << source.text << u\"\\n\\n\"_s;\n        }\n        for (auto &attached : std::as_const(m_promptAttachments))\n            parts << attached.processedContent() << u\"\\n\\n\"_s;\n        parts << m_content;\n        return parts.join(QString());\n    }\n\nprivate:\n    std::optional<qsizetype> m_index;\n    Type                     m_type;\n    QString                  m_content;\n    QList<ResultInfo>        m_sources;\n    QList<PromptAttachment>  m_promptAttachments;\n};\nQ_DECLARE_METATYPE(MessageItem)\n\nclass ChatItem : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(QString name  MEMBER name )\n    Q_PROPERTY(QString value MEMBER value)\n\n    // prompts and responses\n    Q_PROPERTY(QString content   READ content NOTIFY contentChanged)\n\n    // prompts\n    Q_PROPERTY(QList<PromptAttachment> promptAttachments MEMBER promptAttachments)\n\n    // responses\n    Q_PROPERTY(bool                isCurrentResponse   MEMBER isCurrentResponse NOTIFY isCurrentResponseChanged)\n    Q_PROPERTY(bool                isError             MEMBER isError          )\n    Q_PROPERTY(QList<ChatItem *>   childItems          READ   childItems       )\n\n    // toolcall\n    Q_PROPERTY(bool                isToolCallError     READ isToolCallError     NOTIFY isTooCallErrorChanged)\n\n    // responses (DataLake)\n    Q_PROPERTY(QString newResponse     MEMBER newResponse    )\n    Q_PROPERTY(bool    stopped         MEMBER stopped        )\n    Q_PROPERTY(bool    thumbsUpState   MEMBER thumbsUpState  )\n    Q_PROPERTY(bool    thumbsDownState MEMBER thumbsDownState)\n\n    // thinking\n    Q_PROPERTY(int     thinkingTime    MEMBER thinkingTime NOTIFY thinkingTimeChanged)\n\npublic:\n    enum class Type { System, Prompt, Response, Text, ToolCall, ToolResponse, Think };\n\n    // tags for constructing ChatItems\n    struct prompt_tag_t        { explicit prompt_tag_t       () = default; };\n    struct response_tag_t      { explicit response_tag_t     () = default; };\n    struct system_tag_t        { explicit system_tag_t       () = default; };\n    struct text_tag_t          { explicit text_tag_t         () = default; };\n    struct tool_call_tag_t     { explicit tool_call_tag_t    () = default; };\n    struct tool_response_tag_t { explicit tool_response_tag_t() = default; };\n    struct think_tag_t         { explicit think_tag_t        () = default; };\n    static inline constexpr prompt_tag_t        prompt_tag        = prompt_tag_t        {};\n    static inline constexpr response_tag_t      response_tag      = response_tag_t      {};\n    static inline constexpr system_tag_t        system_tag        = system_tag_t        {};\n    static inline constexpr text_tag_t          text_tag          = text_tag_t          {};\n    static inline constexpr tool_call_tag_t     tool_call_tag     = tool_call_tag_t     {};\n    static inline constexpr tool_response_tag_t tool_response_tag = tool_response_tag_t {};\n    static inline constexpr think_tag_t         think_tag         = think_tag_t         {};\n\npublic:\n    ChatItem(QObject *parent)\n        : QObject(nullptr)\n    {\n        moveToThread(parent->thread());\n        // setParent must be called from the thread the object lives in\n        QMetaObject::invokeMethod(this, [this, parent]() { this->setParent(parent); });\n    }\n\n    // NOTE: System messages are currently never serialized and only *stored* by the local server.\n    //       ChatLLM prepends a system MessageItem on-the-fly.\n    ChatItem(QObject *parent, system_tag_t, const QString &value)\n        : ChatItem(parent)\n    { this->name = u\"System: \"_s; this->value = value; }\n\n    ChatItem(QObject *parent, prompt_tag_t, const QString &value, const QList<PromptAttachment> &attachments = {})\n        : ChatItem(parent)\n    { this->name = u\"Prompt: \"_s; this->value = value; this->promptAttachments = attachments; }\n\nprivate:\n    ChatItem(QObject *parent, response_tag_t, bool isCurrentResponse, const QString &value = {})\n        : ChatItem(parent)\n    { this->name = u\"Response: \"_s; this->value = value; this->isCurrentResponse = isCurrentResponse; }\n\npublic:\n    // A new response, to be filled in\n    ChatItem(QObject *parent, response_tag_t)\n        : ChatItem(parent, response_tag, true) {}\n\n    // An existing response, from Server\n    ChatItem(QObject *parent, response_tag_t, const QString &value)\n        : ChatItem(parent, response_tag, false, value) {}\n\n    ChatItem(QObject *parent, text_tag_t, const QString &value)\n        : ChatItem(parent)\n    { this->name = u\"Text: \"_s; this->value = value; }\n\n    ChatItem(QObject *parent, tool_call_tag_t, const QString &value)\n        : ChatItem(parent)\n    { this->name = u\"ToolCall: \"_s; this->value = value; }\n\n    ChatItem(QObject *parent, tool_response_tag_t, const QString &value)\n        : ChatItem(parent)\n    { this->name = u\"ToolResponse: \"_s; this->value = value; }\n\n    ChatItem(QObject *parent, think_tag_t, const QString &value)\n        : ChatItem(parent)\n    { this->name = u\"Think: \"_s; this->value = value; }\n\n    Type type() const\n    {\n        if (name == u\"System: \"_s)\n            return Type::System;\n        if (name == u\"Prompt: \"_s)\n            return Type::Prompt;\n        if (name == u\"Response: \"_s)\n            return Type::Response;\n        if (name == u\"Text: \"_s)\n            return Type::Text;\n        if (name == u\"ToolCall: \"_s)\n            return Type::ToolCall;\n        if (name == u\"ToolResponse: \"_s)\n            return Type::ToolResponse;\n        if (name == u\"Think: \"_s)\n            return Type::Think;\n        throw std::invalid_argument(fmt::format(\"Chat item has unknown label: {:?}\", name));\n    }\n\n    QString flattenedContent() const\n    {\n        if (subItems.empty())\n            return value;\n\n        // We only flatten one level\n        QString content;\n        for (ChatItem *item : subItems)\n            content += item->value;\n        return content;\n    }\n\n    QString content() const\n    {\n        if (type() == Type::Response) {\n            // We parse if this contains any part of a partial toolcall\n            ToolCallParser parser;\n            parser.update(value.toUtf8());\n\n            // If no tool call is detected, return the original value\n            if (parser.startIndex() < 0)\n                return value;\n\n            // Otherwise we only return the text before and any partial tool call\n            const QString beforeToolCall = value.left(parser.startIndex());\n            return beforeToolCall;\n        }\n\n        if (type() == Type::Think)\n            return thinkContent(value);\n\n        if (type() == Type::ToolCall)\n            return toolCallContent(value);\n\n        // We don't show any of content from the tool response in the GUI\n        if (type() == Type::ToolResponse)\n            return QString();\n\n        return value;\n    }\n\n    QString thinkContent(const QString &value) const\n    {\n        ToolCallParser parser;\n        parser.update(value.toUtf8());\n\n        // Extract the content\n        QString content = parser.toolCall();\n        content = content.trimmed();\n        return content;\n    }\n\n    QString toolCallContent(const QString &value) const\n    {\n        ToolCallParser parser;\n        parser.update(value.toUtf8());\n\n        // Extract the code\n        QString code = parser.toolCall();\n        code = code.trimmed();\n\n        QString result;\n\n        // If we've finished the tool call then extract the result from meta information\n        if (toolCallInfo.name == ToolCallConstants::CodeInterpreterFunction)\n            result = \"```\\n\" + toolCallInfo.result + \"```\";\n\n        // Return the formatted code and the result if available\n        return code + result;\n    }\n\n    QString clipboardContent() const\n    {\n        QStringList clipContent;\n        for (const ChatItem *item : subItems)\n            clipContent << item->clipboardContent();\n        clipContent << content();\n        return clipContent.join(\"\");\n    }\n\n    QList<ChatItem *> childItems() const\n    {\n        // We currently have leaf nodes at depth 3 with nodes at depth 2 as mere containers we don't\n        // care about in GUI\n        QList<ChatItem *> items;\n        for (const ChatItem *item : subItems) {\n            items.reserve(items.size() + item->subItems.size());\n            ranges::copy(item->subItems, std::back_inserter(items));\n        }\n        return items;\n    }\n\n    QString possibleToolCall() const\n    {\n        if (!subItems.empty())\n            return subItems.back()->possibleToolCall();\n        if (type() == Type::ToolCall)\n            return value;\n        else\n            return QString();\n    }\n\n    void setCurrentResponse(bool b)\n    {\n        if (!subItems.empty())\n            subItems.back()->setCurrentResponse(b);\n        isCurrentResponse = b;\n        emit isCurrentResponseChanged();\n    }\n\n    void setValue(const QString &v)\n    {\n        if (!subItems.empty() && subItems.back()->isCurrentResponse) {\n            subItems.back()->setValue(v);\n            return;\n        }\n\n        value = v;\n        emit contentChanged();\n    }\n\n    void setToolCallInfo(const ToolCallInfo &info)\n    {\n        toolCallInfo = info;\n        emit contentChanged();\n        emit isTooCallErrorChanged();\n    }\n\n    bool isToolCallError() const\n    {\n        return toolCallInfo.error != ToolEnums::Error::NoError;\n    }\n\n    void setThinkingTime(int t)\n    {\n        thinkingTime = t;\n        emit thinkingTimeChanged();\n    }\n\n    // NB: Assumes response is not current.\n    static ChatItem *fromMessageInput(QObject *parent, const MessageInput &message)\n    {\n        switch (message.type) {\n            using enum MessageInput::Type;\n            case Prompt:   return new ChatItem(parent, prompt_tag,   message.content);\n            case Response: return new ChatItem(parent, response_tag, message.content);\n            case System:   return new ChatItem(parent, system_tag,   message.content);\n        }\n        Q_UNREACHABLE();\n    }\n\n    MessageItem asMessageItem(qsizetype index) const\n    {\n        MessageItem::Type msgType;\n        switch (auto typ = type()) {\n            using enum ChatItem::Type;\n            case System:       msgType = MessageItem::Type::System;       break;\n            case Prompt:       msgType = MessageItem::Type::Prompt;       break;\n            case Response:     msgType = MessageItem::Type::Response;     break;\n            case ToolResponse: msgType = MessageItem::Type::ToolResponse; break;\n            case Text:\n            case ToolCall:\n            case Think:\n                throw std::invalid_argument(fmt::format(\"cannot convert ChatItem type {} to message item\", int(typ)));\n        }\n        return { index, msgType, flattenedContent(), sources, promptAttachments };\n    }\n\n    static QList<ResultInfo> consolidateSources(const QList<ResultInfo> &sources);\n\n    void serializeResponse(QDataStream &stream, int version);\n    void serializeToolCall(QDataStream &stream, int version);\n    void serializeToolResponse(QDataStream &stream, int version);\n    void serializeText(QDataStream &stream, int version);\n    void serializeThink(QDataStream &stream, int version);\n    void serializeSubItems(QDataStream &stream, int version); // recursive\n    void serialize(QDataStream &stream, int version);\n\n\n    bool deserializeResponse(QDataStream &stream, int version);\n    bool deserializeToolCall(QDataStream &stream, int version);\n    bool deserializeToolResponse(QDataStream &stream, int version);\n    bool deserializeText(QDataStream &stream, int version);\n    bool deserializeThink(QDataStream &stream, int version);\n    bool deserializeSubItems(QDataStream &stream, int version); // recursive\n    bool deserialize(QDataStream &stream, int version);\n\nQ_SIGNALS:\n    void contentChanged();\n    void isTooCallErrorChanged();\n    void isCurrentResponseChanged();\n    void thinkingTimeChanged();\n\npublic:\n\n    // TODO: Maybe we should include the model name here as well as timestamp?\n    QString name;\n    QString value;\n\n    // prompts\n    QList<ResultInfo>       sources;\n    QList<ResultInfo>       consolidatedSources;\n    QList<PromptAttachment> promptAttachments;\n\n    // responses\n    bool isCurrentResponse = false;\n    bool isError           = false;\n    ToolCallInfo toolCallInfo;\n    std::list<ChatItem *> subItems;\n\n    // responses (DataLake)\n    QString newResponse;\n    bool    stopped         = false;\n    bool    thumbsUpState   = false;\n    bool    thumbsDownState = false;\n\n    // thinking time in ms\n    int     thinkingTime  = 0;\n};\n\nclass ChatModel : public QAbstractListModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n    Q_PROPERTY(bool hasError READ hasError NOTIFY hasErrorChanged)\n\npublic:\n    explicit ChatModel(QObject *parent = nullptr)\n        : QAbstractListModel(parent) {}\n\n    // FIXME(jared): can't this start at Qt::UserRole (no +1)?\n    enum Roles {\n        NameRole = Qt::UserRole + 1,\n        ValueRole,\n\n        // prompts and responses\n        ContentRole,\n\n        // prompts\n        PromptAttachmentsRole,\n\n        // responses\n        // NOTE: sources are stored on the *prompts*, but in the model, they are only on the *responses*!\n        SourcesRole,\n        ConsolidatedSourcesRole,\n        IsCurrentResponseRole,\n        IsErrorRole,\n        ChildItemsRole,\n\n        // responses (DataLake)\n        NewResponseRole,\n        StoppedRole,\n        ThumbsUpStateRole,\n        ThumbsDownStateRole,\n    };\n\n    int rowCount(const QModelIndex &parent = QModelIndex()) const override\n    {\n        QMutexLocker locker(&m_mutex);\n        Q_UNUSED(parent)\n        return m_chatItems.size();\n    }\n\n    /* a \"peer\" is a bidirectional 1:1 link between a prompt and the response that would cite its LocalDocs\n     * sources. Return std::nullopt if there is none, which is possible for e.g. server chats. */\n    template <typename T>\n    static std::optional<qsizetype> getPeer(const T *arr, qsizetype size, qsizetype index)\n    {\n        Q_ASSERT(index >= 0);\n        Q_ASSERT(index < size);\n        return getPeerInternal(arr, size, index);\n    }\n\nprivate:\n    static std::optional<qsizetype> getPeerInternal(const ChatItem * const *arr, qsizetype size, qsizetype index)\n    {\n        qsizetype peer;\n        ChatItem::Type expected;\n        switch (arr[index]->type()) {\n            using enum ChatItem::Type;\n            case Prompt:   peer = index + 1; expected = Response; break;\n            case Response: peer = index - 1; expected = Prompt;   break;\n            default: throw std::invalid_argument(\"getPeer() called on item that is not a prompt or response\");\n        }\n        if (peer >= 0 && peer < size && arr[peer]->type() == expected)\n            return peer;\n        return std::nullopt;\n    }\n\n    // FIXME(jared): this should really be done at the parent level, not the sub-item level\n    static std::optional<qsizetype> getPeerInternal(const MessageItem *arr, qsizetype size, qsizetype index)\n    {\n        qsizetype peer;\n        MessageItem::Type expected;\n        switch (arr[index].type()) {\n            using enum MessageItem::Type;\n            case Prompt:   peer = index + 1; expected = Response; break;\n            case Response: peer = index - 1; expected = Prompt;   break;\n            default: throw std::invalid_argument(\"getPeer() called on item that is not a prompt or response\");\n        }\n        if (peer >= 0 && peer < size && arr[peer].type() == expected)\n            return peer;\n        return std::nullopt;\n    }\n\npublic:\n    template <ranges::contiguous_range R>\n    static auto getPeer(R &&range, ranges::iterator_t<R> item) -> std::optional<ranges::iterator_t<R>>\n    {\n        auto begin = ranges::begin(range);\n        return getPeer(ranges::data(range), ranges::size(range), item - begin)\n            .transform([&](auto i) { return begin + i; });\n    }\n\n    auto getPeerUnlocked(QList<ChatItem *>::const_iterator item) const -> std::optional<QList<ChatItem *>::const_iterator>\n    { return getPeer(m_chatItems, item); }\n\n    std::optional<qsizetype> getPeerUnlocked(qsizetype index) const\n    { return getPeer(m_chatItems.constData(), m_chatItems.size(), index); }\n\n    QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override\n    {\n        QMutexLocker locker(&m_mutex);\n        if (!index.isValid() || index.row() < 0 || index.row() >= m_chatItems.size())\n            return QVariant();\n\n        auto itemIt = m_chatItems.cbegin() + index.row();\n        auto *item = *itemIt;\n        switch (role) {\n            case NameRole:\n                return item->name;\n            case ValueRole:\n                return item->value;\n            case PromptAttachmentsRole:\n                return QVariant::fromValue(item->promptAttachments);\n            case SourcesRole:\n                {\n                    QList<ResultInfo> data;\n                    if (item->type() == ChatItem::Type::Response) {\n                        if (auto prompt = getPeerUnlocked(itemIt))\n                            data = (**prompt)->sources;\n                    }\n                    return QVariant::fromValue(data);\n                }\n            case ConsolidatedSourcesRole:\n                {\n                    QList<ResultInfo> data;\n                    if (item->type() == ChatItem::Type::Response) {\n                        if (auto prompt = getPeerUnlocked(itemIt))\n                            data = (**prompt)->consolidatedSources;\n                    }\n                    return QVariant::fromValue(data);\n                }\n            case IsCurrentResponseRole:\n                return item->isCurrentResponse;\n            case NewResponseRole:\n                return item->newResponse;\n            case StoppedRole:\n                return item->stopped;\n            case ThumbsUpStateRole:\n                return item->thumbsUpState;\n            case ThumbsDownStateRole:\n                return item->thumbsDownState;\n            case IsErrorRole:\n                return item->type() == ChatItem::Type::Response && item->isError;\n            case ContentRole:\n                return item->content();\n            case ChildItemsRole:\n                return QVariant::fromValue(item->childItems());\n        }\n\n        return QVariant();\n    }\n\n    QHash<int, QByteArray> roleNames() const override\n    {\n        return {\n            { NameRole,                \"name\"                },\n            { ValueRole,               \"value\"               },\n            { PromptAttachmentsRole,   \"promptAttachments\"   },\n            { SourcesRole,             \"sources\"             },\n            { ConsolidatedSourcesRole, \"consolidatedSources\" },\n            { IsCurrentResponseRole,   \"isCurrentResponse\"   },\n            { IsErrorRole,             \"isError\"             },\n            { NewResponseRole,         \"newResponse\"         },\n            { StoppedRole,             \"stopped\"             },\n            { ThumbsUpStateRole,       \"thumbsUpState\"       },\n            { ThumbsDownStateRole,     \"thumbsDownState\"     },\n            { ContentRole,             \"content\"             },\n            { ChildItemsRole,          \"childItems\"          },\n        };\n    }\n\n    void appendPrompt(const QString &value, const QList<PromptAttachment> &attachments = {})\n    {\n        qsizetype count;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (hasErrorUnlocked())\n                throw std::logic_error(\"cannot append to a failed chat\");\n            count = m_chatItems.count();\n        }\n\n        beginInsertRows(QModelIndex(), count, count);\n        {\n            QMutexLocker locker(&m_mutex);\n            m_chatItems << new ChatItem(this, ChatItem::prompt_tag, value, attachments);\n        }\n        endInsertRows();\n        emit countChanged();\n    }\n\n    void appendResponse()\n    {\n        qsizetype count;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (hasErrorUnlocked())\n                throw std::logic_error(\"cannot append to a failed chat\");\n            count = m_chatItems.count();\n        }\n\n        beginInsertRows(QModelIndex(), count, count);\n        {\n            QMutexLocker locker(&m_mutex);\n            m_chatItems << new ChatItem(this, ChatItem::response_tag);\n        }\n        endInsertRows();\n        emit countChanged();\n    }\n\n    // Used by Server to append a new conversation to the chat log.\n    // Returns the offset of the appended items.\n    qsizetype appendResponseWithHistory(std::span<const MessageInput> history)\n    {\n        if (history.empty())\n            throw std::invalid_argument(\"at least one message is required\");\n\n        m_mutex.lock();\n        qsizetype startIndex = m_chatItems.size();\n        m_mutex.unlock();\n\n        qsizetype nNewItems = history.size() + 1;\n        qsizetype endIndex  = startIndex + nNewItems;\n        beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/);\n        bool hadError;\n        QList<ChatItem> newItems;\n        {\n            QMutexLocker locker(&m_mutex);\n            startIndex = m_chatItems.size(); // just in case\n            hadError = hasErrorUnlocked();\n            m_chatItems.reserve(m_chatItems.count() + nNewItems);\n            for (auto &message : history)\n                m_chatItems << ChatItem::fromMessageInput(this, message);\n            m_chatItems << new ChatItem(this, ChatItem::response_tag);\n        }\n        endInsertRows();\n        emit countChanged();\n        // Server can add messages when there is an error because each call is a new conversation\n        if (hadError)\n            emit hasErrorChanged(false);\n        return startIndex;\n    }\n\n    void truncate(qsizetype size)\n    {\n        qsizetype oldSize;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (size >= (oldSize = m_chatItems.size()))\n                return;\n            if (size && m_chatItems.at(size - 1)->type() != ChatItem::Type::Response)\n                throw std::invalid_argument(\n                    fmt::format(\"chat model truncated to {} items would not end in a response\", size)\n                );\n        }\n\n        bool oldHasError;\n        beginRemoveRows(QModelIndex(), size, oldSize - 1 /*inclusive*/);\n        {\n            QMutexLocker locker(&m_mutex);\n            oldHasError = hasErrorUnlocked();\n            Q_ASSERT(size < m_chatItems.size());\n            m_chatItems.resize(size);\n        }\n        endRemoveRows();\n        emit countChanged();\n        if (oldHasError)\n            emit hasErrorChanged(false);\n    }\n\n    QString popPrompt(int index)\n    {\n        QString content;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size() || m_chatItems[index]->type() != ChatItem::Type::Prompt)\n                throw std::logic_error(\"attempt to pop a prompt, but this is not a prompt\");\n            content = m_chatItems[index]->content();\n        }\n        truncate(index);\n        return content;\n    }\n\n    bool regenerateResponse(int index)\n    {\n        int promptIdx;\n        {\n            QMutexLocker locker(&m_mutex);\n            auto items = m_chatItems; // holds lock\n            if (index < 1 || index >= items.size() || items[index]->type() != ChatItem::Type::Response)\n                return false;\n            promptIdx = getPeerUnlocked(index).value_or(-1);\n        }\n\n        truncate(index + 1);\n        clearSubItems(index);\n        setResponseValue({});\n        updateCurrentResponse(index, true );\n        updateNewResponse    (index, {}   );\n        updateStopped        (index, false);\n        updateThumbsUpState  (index, false);\n        updateThumbsDownState(index, false);\n        setError(false);\n        if (promptIdx >= 0)\n            updateSources(promptIdx, {});\n        return true;\n    }\n\n    Q_INVOKABLE void clear()\n    {\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty()) return;\n        }\n\n        bool oldHasError;\n        beginResetModel();\n        {\n            QMutexLocker locker(&m_mutex);\n            oldHasError = hasErrorUnlocked();\n            m_chatItems.clear();\n        }\n        endResetModel();\n        emit countChanged();\n        if (oldHasError)\n            emit hasErrorChanged(false);\n    }\n\n    Q_INVOKABLE QString possibleToolcall() const\n    {\n        QMutexLocker locker(&m_mutex);\n        if (m_chatItems.empty()) return QString();\n        return m_chatItems.back()->possibleToolCall();\n    }\n\n    Q_INVOKABLE void updateCurrentResponse(int index, bool b)\n    {\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            ChatItem *item = m_chatItems[index];\n            item->setCurrentResponse(b);\n        }\n\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {IsCurrentResponseRole});\n    }\n\n    Q_INVOKABLE void updateStopped(int index, bool b)\n    {\n        bool changed = false;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            ChatItem *item = m_chatItems[index];\n            if (item->stopped != b) {\n                item->stopped = b;\n                changed = true;\n            }\n        }\n        if (changed) emit dataChanged(createIndex(index, 0), createIndex(index, 0), {StoppedRole});\n    }\n\n    Q_INVOKABLE void setResponseValue(const QString &value)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"we only set this on a response\");\n\n            index = m_chatItems.count() - 1;\n            ChatItem *item = m_chatItems.back();\n            item->setValue(value);\n        }\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ValueRole, ContentRole});\n    }\n\n    Q_INVOKABLE void updateSources(int index, const QList<ResultInfo> &sources)\n    {\n        int responseIndex = -1;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            auto promptItem = m_chatItems.begin() + index;\n            if ((*promptItem)->type() != ChatItem::Type::Prompt)\n                throw std::invalid_argument(fmt::format(\"item at index {} is not a prompt\", index));\n            if (auto peer = getPeerUnlocked(promptItem))\n                responseIndex = *peer - m_chatItems.cbegin();\n            (*promptItem)->sources = sources;\n            (*promptItem)->consolidatedSources = ChatItem::consolidateSources(sources);\n        }\n        if (responseIndex >= 0) {\n            emit dataChanged(createIndex(responseIndex, 0), createIndex(responseIndex, 0), {SourcesRole});\n            emit dataChanged(createIndex(responseIndex, 0), createIndex(responseIndex, 0), {ConsolidatedSourcesRole});\n        }\n    }\n\n    Q_INVOKABLE void updateThumbsUpState(int index, bool b)\n    {\n        bool changed = false;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            ChatItem *item = m_chatItems[index];\n            if (item->thumbsUpState != b) {\n                item->thumbsUpState = b;\n                changed = true;\n            }\n        }\n        if (changed) emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ThumbsUpStateRole});\n    }\n\n    Q_INVOKABLE void updateThumbsDownState(int index, bool b)\n    {\n        bool changed = false;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            ChatItem *item = m_chatItems[index];\n            if (item->thumbsDownState != b) {\n                item->thumbsDownState = b;\n                changed = true;\n            }\n        }\n        if (changed) emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ThumbsDownStateRole});\n    }\n\n    Q_INVOKABLE void updateNewResponse(int index, const QString &newResponse)\n    {\n        bool changed = false;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n\n            ChatItem *item = m_chatItems[index];\n            if (item->newResponse != newResponse) {\n                item->newResponse = newResponse;\n                changed = true;\n            }\n        }\n        if (changed) emit dataChanged(createIndex(index, 0), createIndex(index, 0), {NewResponseRole});\n    }\n\n    Q_INVOKABLE void splitThinking(const QPair<QString, QString> &split)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only set thinking on a chat that ends with a response\");\n\n            index = m_chatItems.count() - 1;\n            ChatItem *currentResponse = m_chatItems.back();\n            Q_ASSERT(currentResponse->isCurrentResponse);\n\n            // Create a new response container for any text and the thinking\n            ChatItem *newResponse = new ChatItem(this, ChatItem::response_tag);\n\n            // Add preceding text if any\n            if (!split.first.isEmpty()) {\n                ChatItem *textItem = new ChatItem(this, ChatItem::text_tag, split.first);\n                newResponse->subItems.push_back(textItem);\n            }\n\n            // Add the thinking item\n            Q_ASSERT(!split.second.isEmpty());\n            ChatItem *thinkingItem = new ChatItem(this, ChatItem::think_tag, split.second);\n            thinkingItem->isCurrentResponse = true;\n            newResponse->subItems.push_back(thinkingItem);\n\n            // Add new response and reset our value\n            currentResponse->subItems.push_back(newResponse);\n            currentResponse->value = QString();\n        }\n\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ChildItemsRole, ContentRole});\n    }\n\n    Q_INVOKABLE void endThinking(const QPair<QString, QString> &split, int thinkingTime)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only end thinking on a chat that ends with a response\");\n\n            index = m_chatItems.count() - 1;\n            ChatItem *currentResponse = m_chatItems.back();\n            Q_ASSERT(currentResponse->isCurrentResponse);\n\n            ChatItem *subResponse = currentResponse->subItems.back();\n            Q_ASSERT(subResponse->type() == ChatItem::Type::Response);\n            Q_ASSERT(subResponse->isCurrentResponse);\n            subResponse->setCurrentResponse(false);\n\n            ChatItem *thinkingItem = subResponse->subItems.back();\n            Q_ASSERT(thinkingItem->type() == ChatItem::Type::Think);\n            thinkingItem->setCurrentResponse(false);\n            thinkingItem->setValue(split.first);\n            thinkingItem->setThinkingTime(thinkingTime);\n\n            currentResponse->setValue(split.second);\n        }\n\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ChildItemsRole, ContentRole});\n    }\n\n    Q_INVOKABLE void splitToolCall(const QPair<QString, QString> &split)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only set toolcall on a chat that ends with a response\");\n\n            index = m_chatItems.count() - 1;\n            ChatItem *currentResponse = m_chatItems.back();\n            Q_ASSERT(currentResponse->isCurrentResponse);\n\n            // Create a new response container for any text and the tool call\n            ChatItem *newResponse = new ChatItem(this, ChatItem::response_tag);\n\n            // Add preceding text if any\n            if (!split.first.isEmpty()) {\n                ChatItem *textItem = new ChatItem(this, ChatItem::text_tag, split.first);\n                newResponse->subItems.push_back(textItem);\n            }\n\n            // Add the toolcall\n            Q_ASSERT(!split.second.isEmpty());\n            ChatItem *toolCallItem = new ChatItem(this, ChatItem::tool_call_tag, split.second);\n            toolCallItem->isCurrentResponse = true;\n            newResponse->subItems.push_back(toolCallItem);\n\n            // Add new response and reset our value\n            currentResponse->subItems.push_back(newResponse);\n            currentResponse->value = QString();\n        }\n\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ChildItemsRole, ContentRole});\n    }\n\n    Q_INVOKABLE void updateToolCall(const ToolCallInfo &toolCallInfo)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only set toolcall on a chat that ends with a response\");\n\n            index = m_chatItems.count() - 1;\n            ChatItem *currentResponse = m_chatItems.back();\n            Q_ASSERT(currentResponse->isCurrentResponse);\n\n            ChatItem *subResponse = currentResponse->subItems.back();\n            Q_ASSERT(subResponse->type() == ChatItem::Type::Response);\n            Q_ASSERT(subResponse->isCurrentResponse);\n\n            ChatItem *toolCallItem = subResponse->subItems.back();\n            Q_ASSERT(toolCallItem->type() == ChatItem::Type::ToolCall);\n            toolCallItem->setToolCallInfo(toolCallInfo);\n            toolCallItem->setCurrentResponse(false);\n\n            // Add tool response\n            ChatItem *toolResponseItem = new ChatItem(this, ChatItem::tool_response_tag, toolCallInfo.result);\n            currentResponse->subItems.push_back(toolResponseItem);\n        }\n\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ChildItemsRole, ContentRole});\n    }\n\n    void clearSubItems(int index)\n    {\n        bool changed = false;\n        {\n            QMutexLocker locker(&m_mutex);\n            if (index < 0 || index >= m_chatItems.size()) return;\n            if (m_chatItems.isEmpty() || m_chatItems[index]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only clear subitems on a chat that ends with a response\");\n\n            ChatItem *item = m_chatItems.back();\n            if (!item->subItems.empty()) {\n                item->subItems.clear();\n                changed = true;\n            }\n        }\n        if (changed) {\n            emit dataChanged(createIndex(index, 0), createIndex(index, 0), {ChildItemsRole, ContentRole});\n        }\n    }\n\n    Q_INVOKABLE void setError(bool value = true)\n    {\n        qsizetype index;\n        {\n            QMutexLocker locker(&m_mutex);\n\n            if (m_chatItems.isEmpty() || m_chatItems.cend()[-1]->type() != ChatItem::Type::Response)\n                throw std::logic_error(\"can only set error on a chat that ends with a response\");\n\n            index = m_chatItems.count() - 1;\n            auto &last = m_chatItems.back();\n            if (last->isError == value)\n                return; // already set\n            last->isError = value;\n        }\n        emit dataChanged(createIndex(index, 0), createIndex(index, 0), {IsErrorRole});\n        emit hasErrorChanged(value);\n    }\n\n    Q_INVOKABLE void copyToClipboard()\n    {\n        QMutexLocker locker(&m_mutex);\n        QString conversation;\n        for (ChatItem *item : m_chatItems) {\n            QString string = item->name;\n            string += item->clipboardContent();\n            string += \"\\n\";\n            conversation += string;\n        }\n        QClipboard *clipboard = QGuiApplication::clipboard();\n        clipboard->setText(conversation, QClipboard::Clipboard);\n    }\n\n    Q_INVOKABLE void copyToClipboard(int index)\n    {\n        QMutexLocker locker(&m_mutex);\n        if (index < 0 || index >= m_chatItems.size())\n            return;\n        ChatItem *item = m_chatItems.at(index);\n        QClipboard *clipboard = QGuiApplication::clipboard();\n        clipboard->setText(item->clipboardContent(), QClipboard::Clipboard);\n    }\n\n    qsizetype count() const { QMutexLocker locker(&m_mutex); return m_chatItems.size(); }\n\n    std::vector<MessageItem> messageItems() const\n    {\n        // A flattened version of the chat item tree used by the backend and jinja\n        QMutexLocker locker(&m_mutex);\n        std::vector<MessageItem> chatItems;\n        for (qsizetype i : views::iota(0, m_chatItems.size())) {\n            auto *parent = m_chatItems.at(i);\n            chatItems.reserve(chatItems.size() + parent->subItems.size() + 1);\n            ranges::copy(parent->subItems | views::transform([&](auto *s) { return s->asMessageItem(i); }),\n                         std::back_inserter(chatItems));\n            chatItems.push_back(parent->asMessageItem(i));\n        }\n        return chatItems;\n    }\n\n    bool hasError() const { QMutexLocker locker(&m_mutex); return hasErrorUnlocked(); }\n\n    bool serialize(QDataStream &stream, int version) const\n    {\n        // FIXME: need to serialize new chatitem tree\n        QMutexLocker locker(&m_mutex);\n        stream << int(m_chatItems.size());\n        for (auto itemIt = m_chatItems.cbegin(); itemIt < m_chatItems.cend(); ++itemIt) {\n            auto c = *itemIt; // NB: copies\n            if (version < 11) {\n                // move sources from their prompt to the next response\n                switch (c->type()) {\n                    using enum ChatItem::Type;\n                case Prompt:\n                    c->sources.clear();\n                    c->consolidatedSources.clear();\n                    break;\n                case Response:\n                    // note: we drop sources for responseless prompts\n                    if (auto peer = getPeerUnlocked(itemIt)) {\n                        c->sources             = (**peer)->sources;\n                        c->consolidatedSources = (**peer)->consolidatedSources;\n                    }\n                default:\n                    ;\n                }\n            }\n\n            c->serialize(stream, version);\n        }\n        return stream.status() == QDataStream::Ok;\n    }\n\n    bool deserialize(QDataStream &stream, int version)\n    {\n        clear(); // reset to known state\n\n        int size;\n        stream >> size;\n        int lastPromptIndex = -1;\n        QList<ChatItem*> chatItems;\n        for (int i = 0; i < size; ++i) {\n            ChatItem *c = new ChatItem(this);\n            if (!c->deserialize(stream, version)) {\n                delete c;\n                return false;\n            }\n            if (version < 11 && c->type() == ChatItem::Type::Response) {\n                // move sources from the response to their last prompt\n                if (lastPromptIndex >= 0) {\n                    auto &prompt = chatItems[lastPromptIndex];\n                    prompt->sources             = std::move(c->sources            );\n                    prompt->consolidatedSources = std::move(c->consolidatedSources);\n                    lastPromptIndex = -1;\n                } else {\n                    // drop sources for promptless responses\n                    c->sources.clear();\n                    c->consolidatedSources.clear();\n                }\n            }\n\n            chatItems << c;\n            if (c->type() == ChatItem::Type::Prompt)\n                lastPromptIndex = chatItems.size() - 1;\n        }\n\n        bool hasError;\n        beginInsertRows(QModelIndex(), 0, chatItems.size() - 1 /*inclusive*/);\n        {\n            QMutexLocker locker(&m_mutex);\n            m_chatItems = chatItems;\n            hasError = hasErrorUnlocked();\n        }\n        endInsertRows();\n        emit countChanged();\n        if (hasError)\n            emit hasErrorChanged(true);\n        return stream.status() == QDataStream::Ok;\n    }\n\nQ_SIGNALS:\n    void countChanged();\n    void hasErrorChanged(bool value);\n\nprivate:\n    bool hasErrorUnlocked() const\n    {\n        if (m_chatItems.isEmpty())\n            return false;\n        auto &last = m_chatItems.back();\n        return last->type() == ChatItem::Type::Response && last->isError;\n    }\n\nprivate:\n    mutable QMutex m_mutex;\n    QList<ChatItem *> m_chatItems;\n};\n\n#endif // CHATMODEL_H\n"
  },
  {
    "path": "gpt4all-chat/src/chatviewtextprocessor.cpp",
    "content": "#include \"chatviewtextprocessor.h\"\n\n#include <QAbstractTextDocumentLayout>\n#include <QBrush>\n#include <QChar>\n#include <QClipboard>\n#include <QDebug>\n#include <QFlag>\n#include <QFont>\n#include <QGuiApplication>\n#include <QList> // IWYU pragma: keep\n#include <QPair>\n#include <QQuickTextDocument>\n#include <QRegularExpression>\n#include <QStringList> // IWYU pragma: keep\n#include <QTextBlock> // IWYU pragma: keep\n#include <QTextCharFormat> // IWYU pragma: keep\n#include <QTextCursor>\n#include <QTextDocument>\n#include <QTextDocumentFragment>\n#include <QTextFrame> // IWYU pragma: keep\n#include <QTextFrameFormat> // IWYU pragma: keep\n#include <QTextTableCell>\n#include <QtAssert>\n#include <QtLogging>\n\n#include <algorithm>\n#include <utility>\n\n\nenum Language {\n    None,\n    Python,\n    Cpp,\n    Bash,\n    TypeScript,\n    Java,\n    Go,\n    Json,\n    Csharp,\n    Latex,\n    Html,\n    Php,\n    Markdown\n};\n\nstatic Language stringToLanguage(const QString &language)\n{\n    if (language == \"python\")\n        return Python;\n    if (language == \"cpp\")\n        return Cpp;\n    if (language == \"c++\")\n        return Cpp;\n    if (language == \"csharp\")\n        return Csharp;\n    if (language == \"c#\")\n        return Csharp;\n    if (language == \"c\")\n        return Cpp;\n    if (language == \"bash\")\n        return Bash;\n    if (language == \"javascript\")\n        return TypeScript;\n    if (language == \"typescript\")\n        return TypeScript;\n    if (language == \"java\")\n        return Java;\n    if (language == \"go\")\n        return Go;\n    if (language == \"golang\")\n        return Go;\n    if (language == \"json\")\n        return Json;\n    if (language == \"latex\")\n        return Latex;\n    if (language == \"html\")\n        return Html;\n    if (language == \"php\")\n        return Php;\n    return None;\n}\n\nenum Code {\n    Default,\n    Keyword,\n    Function,\n    FunctionCall,\n    Comment,\n    String,\n    Number,\n    Header,\n    Preprocessor,\n    Type,\n    Arrow,\n    Command,\n    Variable,\n    Key,\n    Value,\n    Parameter,\n    AttributeName,\n    AttributeValue,\n    SpecialCharacter,\n    DocType\n};\n\nstruct HighlightingRule {\n    QRegularExpression pattern;\n    Code format;\n};\n\nstatic QColor formatToColor(Code c, const CodeColors &colors)\n{\n    switch (c) {\n    case Default: return colors.defaultColor;\n    case Keyword: return colors.keywordColor;\n    case Function: return colors.functionColor;\n    case FunctionCall: return colors.functionCallColor;\n    case Comment: return colors.commentColor;\n    case String: return colors.stringColor;\n    case Number: return colors.numberColor;\n    case Header: return colors.headerColor;\n    case Preprocessor: return colors.preprocessorColor;\n    case Type: return colors.typeColor;\n    case Arrow: return colors.arrowColor;\n    case Command: return colors.commandColor;\n    case Variable: return colors.variableColor;\n    case Key: return colors.keyColor;\n    case Value: return colors.valueColor;\n    case Parameter: return colors.parameterColor;\n    case AttributeName: return colors.attributeNameColor;\n    case AttributeValue: return colors.attributeValueColor;\n    case SpecialCharacter: return colors.specialCharacterColor;\n    case DocType: return colors.doctypeColor;\n    default: Q_UNREACHABLE();\n    }\n    return QColor();\n}\n\nstatic QVector<HighlightingRule> pythonHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\bdef\\\\s+(\\\\w+)\\\\b\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bdef\\\\b\", \"\\\\bclass\\\\b\", \"\\\\bif\\\\b\", \"\\\\belse\\\\b\", \"\\\\belif\\\\b\",\n            \"\\\\bwhile\\\\b\", \"\\\\bfor\\\\b\", \"\\\\breturn\\\\b\", \"\\\\bprint\\\\b\", \"\\\\bimport\\\\b\",\n            \"\\\\bfrom\\\\b\", \"\\\\bas\\\\b\", \"\\\\btry\\\\b\", \"\\\\bexcept\\\\b\", \"\\\\braise\\\\b\",\n            \"\\\\bwith\\\\b\", \"\\\\bfinally\\\\b\", \"\\\\bcontinue\\\\b\", \"\\\\bbreak\\\\b\", \"\\\\bpass\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\'.*?\\'\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"#[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> csharpHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        // Function call highlighting\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        // Function definition highlighting\n        rule.pattern = QRegularExpression(\"\\\\bvoid|int|double|string|bool\\\\s+(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        // Number highlighting\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        // Keyword highlighting\n        QStringList keywordPatterns = {\n            \"\\\\bvoid\\\\b\", \"\\\\bint\\\\b\", \"\\\\bdouble\\\\b\", \"\\\\bstring\\\\b\", \"\\\\bbool\\\\b\",\n            \"\\\\bclass\\\\b\", \"\\\\bif\\\\b\", \"\\\\belse\\\\b\", \"\\\\bwhile\\\\b\", \"\\\\bfor\\\\b\",\n            \"\\\\breturn\\\\b\", \"\\\\bnew\\\\b\", \"\\\\bthis\\\\b\", \"\\\\bpublic\\\\b\", \"\\\\bprivate\\\\b\",\n            \"\\\\bprotected\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\btrue\\\\b\", \"\\\\bfalse\\\\b\", \"\\\\bnull\\\\b\",\n            \"\\\\bnamespace\\\\b\", \"\\\\busing\\\\b\", \"\\\\btry\\\\b\", \"\\\\bcatch\\\\b\", \"\\\\bfinally\\\\b\",\n            \"\\\\bthrow\\\\b\", \"\\\\bvar\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        // String highlighting\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        // Single-line comment highlighting\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        // Multi-line comment highlighting\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> cppHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[a-zA-Z_][a-zA-Z0-9_]*\\\\s+(\\\\w+)\\\\s*\\\\(\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bauto\\\\b\", \"\\\\bbool\\\\b\", \"\\\\bbreak\\\\b\", \"\\\\bcase\\\\b\", \"\\\\bcatch\\\\b\",\n            \"\\\\bchar\\\\b\", \"\\\\bclass\\\\b\", \"\\\\bconst\\\\b\", \"\\\\bconstexpr\\\\b\", \"\\\\bcontinue\\\\b\",\n            \"\\\\bdefault\\\\b\", \"\\\\bdelete\\\\b\", \"\\\\bdo\\\\b\", \"\\\\bdouble\\\\b\", \"\\\\belse\\\\b\",\n            \"\\\\belifdef\\\\b\", \"\\\\belifndef\\\\b\", \"\\\\bembed\\\\b\", \"\\\\benum\\\\b\", \"\\\\bexplicit\\\\b\",\n            \"\\\\bextern\\\\b\", \"\\\\bfalse\\\\b\", \"\\\\bfloat\\\\b\", \"\\\\bfor\\\\b\", \"\\\\bfriend\\\\b\", \"\\\\bgoto\\\\b\",\n            \"\\\\bif\\\\b\", \"\\\\binline\\\\b\", \"\\\\bint\\\\b\", \"\\\\blong\\\\b\", \"\\\\bmutable\\\\b\", \"\\\\bnamespace\\\\b\",\n            \"\\\\bnew\\\\b\", \"\\\\bnoexcept\\\\b\", \"\\\\bnullptr\\\\b\", \"\\\\boperator\\\\b\", \"\\\\boverride\\\\b\",\n            \"\\\\bprivate\\\\b\", \"\\\\bprotected\\\\b\", \"\\\\bpublic\\\\b\", \"\\\\bregister\\\\b\", \"\\\\breinterpret_cast\\\\b\",\n            \"\\\\breturn\\\\b\", \"\\\\bshort\\\\b\", \"\\\\bsigned\\\\b\", \"\\\\bsizeof\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\bstatic_assert\\\\b\",\n            \"\\\\bstatic_cast\\\\b\", \"\\\\bstruct\\\\b\", \"\\\\bswitch\\\\b\", \"\\\\btemplate\\\\b\", \"\\\\bthis\\\\b\",\n            \"\\\\bthrow\\\\b\", \"\\\\btrue\\\\b\", \"\\\\btry\\\\b\", \"\\\\btypedef\\\\b\", \"\\\\btypeid\\\\b\", \"\\\\btypename\\\\b\",\n            \"\\\\bunion\\\\b\", \"\\\\bunsigned\\\\b\", \"\\\\busing\\\\b\", \"\\\\bvirtual\\\\b\", \"\\\\bvoid\\\\b\",\n            \"\\\\bvolatile\\\\b\", \"\\\\bwchar_t\\\\b\", \"\\\\bwhile\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\'.*?\\'\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"#(?:include|define|undef|ifdef|ifndef|if|else|elif|endif|error|pragma)\\\\b.*\");\n        rule.format = Preprocessor;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> typescriptHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\bfunction\\\\s+(\\\\w+)\\\\b\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bfunction\\\\b\", \"\\\\bvar\\\\b\", \"\\\\blet\\\\b\", \"\\\\bconst\\\\b\", \"\\\\bif\\\\b\", \"\\\\belse\\\\b\",\n            \"\\\\bfor\\\\b\", \"\\\\bwhile\\\\b\", \"\\\\breturn\\\\b\", \"\\\\btry\\\\b\", \"\\\\bcatch\\\\b\", \"\\\\bfinally\\\\b\",\n            \"\\\\bthrow\\\\b\", \"\\\\bnew\\\\b\", \"\\\\bdelete\\\\b\", \"\\\\btypeof\\\\b\", \"\\\\binstanceof\\\\b\",\n            \"\\\\bdo\\\\b\", \"\\\\bswitch\\\\b\", \"\\\\bcase\\\\b\", \"\\\\bbreak\\\\b\", \"\\\\bcontinue\\\\b\",\n            \"\\\\bpublic\\\\b\", \"\\\\bprivate\\\\b\", \"\\\\bprotected\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\breadonly\\\\b\",\n            \"\\\\benum\\\\b\", \"\\\\binterface\\\\b\", \"\\\\bextends\\\\b\", \"\\\\bimplements\\\\b\", \"\\\\bexport\\\\b\",\n            \"\\\\bimport\\\\b\", \"\\\\btype\\\\b\", \"\\\\bnamespace\\\\b\", \"\\\\babstract\\\\b\", \"\\\\bas\\\\b\",\n            \"\\\\basync\\\\b\", \"\\\\bawait\\\\b\", \"\\\\bclass\\\\b\", \"\\\\bconstructor\\\\b\", \"\\\\bget\\\\b\",\n            \"\\\\bset\\\\b\", \"\\\\bnull\\\\b\", \"\\\\bundefined\\\\b\", \"\\\\btrue\\\\b\", \"\\\\bfalse\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        QStringList typePatterns = {\n            \"\\\\bstring\\\\b\", \"\\\\bnumber\\\\b\", \"\\\\bboolean\\\\b\", \"\\\\bany\\\\b\", \"\\\\bvoid\\\\b\",\n            \"\\\\bnever\\\\b\", \"\\\\bunknown\\\\b\", \"\\\\bObject\\\\b\", \"\\\\bArray\\\\b\"\n        };\n\n        for (const QString &pattern : typePatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Type;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"|'.*?'|`.*?`\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"=>\");\n        rule.format = Arrow;\n        highlightingRules.append(rule);\n\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> javaHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\bvoid\\\\s+(\\\\w+)\\\\b\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bpublic\\\\b\", \"\\\\bprivate\\\\b\", \"\\\\bprotected\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\bfinal\\\\b\",\n            \"\\\\bclass\\\\b\", \"\\\\bif\\\\b\", \"\\\\belse\\\\b\", \"\\\\bwhile\\\\b\", \"\\\\bfor\\\\b\",\n            \"\\\\breturn\\\\b\", \"\\\\bnew\\\\b\", \"\\\\bimport\\\\b\", \"\\\\bpackage\\\\b\", \"\\\\btry\\\\b\",\n            \"\\\\bcatch\\\\b\", \"\\\\bthrow\\\\b\", \"\\\\bthrows\\\\b\", \"\\\\bfinally\\\\b\", \"\\\\binterface\\\\b\",\n            \"\\\\bextends\\\\b\", \"\\\\bimplements\\\\b\", \"\\\\bsuper\\\\b\", \"\\\\bthis\\\\b\", \"\\\\bvoid\\\\b\",\n            \"\\\\bboolean\\\\b\", \"\\\\bbyte\\\\b\", \"\\\\bchar\\\\b\", \"\\\\bdouble\\\\b\", \"\\\\bfloat\\\\b\",\n            \"\\\\bint\\\\b\", \"\\\\blong\\\\b\", \"\\\\bshort\\\\b\", \"\\\\bswitch\\\\b\", \"\\\\bcase\\\\b\",\n            \"\\\\bdefault\\\\b\", \"\\\\bcontinue\\\\b\", \"\\\\bbreak\\\\b\", \"\\\\babstract\\\\b\", \"\\\\bassert\\\\b\",\n            \"\\\\benum\\\\b\", \"\\\\binstanceof\\\\b\", \"\\\\bnative\\\\b\", \"\\\\bstrictfp\\\\b\", \"\\\\bsynchronized\\\\b\",\n            \"\\\\btransient\\\\b\", \"\\\\bvolatile\\\\b\", \"\\\\bconst\\\\b\", \"\\\\bgoto\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\'.*?\\'\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> goHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\bfunc\\\\s+(\\\\w+)\\\\b\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bfunc\\\\b\", \"\\\\bpackage\\\\b\", \"\\\\bimport\\\\b\", \"\\\\bvar\\\\b\", \"\\\\bconst\\\\b\",\n            \"\\\\btype\\\\b\", \"\\\\bstruct\\\\b\", \"\\\\binterface\\\\b\", \"\\\\bfor\\\\b\", \"\\\\bif\\\\b\",\n            \"\\\\belse\\\\b\", \"\\\\bswitch\\\\b\", \"\\\\bcase\\\\b\", \"\\\\bdefault\\\\b\", \"\\\\breturn\\\\b\",\n            \"\\\\bbreak\\\\b\", \"\\\\bcontinue\\\\b\", \"\\\\bgoto\\\\b\", \"\\\\bfallthrough\\\\b\",\n            \"\\\\bdefer\\\\b\", \"\\\\bchan\\\\b\", \"\\\\bmap\\\\b\", \"\\\\brange\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"`.*?`\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> bashHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        QStringList commandPatterns = {\n            \"\\\\b(grep|awk|sed|ls|cat|echo|rm|mkdir|cp|break|alias|eval|cd|exec|head|tail|strings|printf|touch|mv|chmod)\\\\b\"\n        };\n\n        for (const QString &pattern : commandPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Command;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bif\\\\b\", \"\\\\bthen\\\\b\", \"\\\\belse\\\\b\", \"\\\\bfi\\\\b\", \"\\\\bfor\\\\b\",\n            \"\\\\bin\\\\b\", \"\\\\bdo\\\\b\", \"\\\\bdone\\\\b\", \"\\\\bwhile\\\\b\", \"\\\\buntil\\\\b\",\n            \"\\\\bcase\\\\b\", \"\\\\besac\\\\b\", \"\\\\bfunction\\\\b\", \"\\\\breturn\\\\b\",\n            \"\\\\blocal\\\\b\", \"\\\\bdeclare\\\\b\", \"\\\\bunset\\\\b\", \"\\\\bexport\\\\b\",\n            \"\\\\breadonly\\\\b\", \"\\\\bshift\\\\b\", \"\\\\bexit\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\'.*?\\'\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\$(\\\\w+|\\\\{[^}]+\\\\})\");\n        rule.format = Variable;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"#[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> latexHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\\\\\[A-Za-z]+\"); // Pattern for LaTeX commands\n        rule.format = Command;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"%[^\\n]*\"); // Pattern for LaTeX comments\n        rule.format = Comment;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> htmlHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*=\");\n        rule.format = AttributeName;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"|'.*?'\");\n        rule.format = AttributeValue;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"<!--.*?-->\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"&[a-zA-Z0-9#]*;\");\n        rule.format = SpecialCharacter;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"<!DOCTYPE.*?>\");\n        rule.format = DocType;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nstatic QVector<HighlightingRule> phpHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b(\\\\w+)\\\\s*(?=\\\\()\");\n        rule.format = FunctionCall;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\bfunction\\\\s+(\\\\w+)\\\\b\");\n        rule.format = Function;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\\\b[0-9]*\\\\.?[0-9]+\\\\b\");\n        rule.format = Number;\n        highlightingRules.append(rule);\n\n        QStringList keywordPatterns = {\n            \"\\\\bif\\\\b\", \"\\\\belse\\\\b\", \"\\\\belseif\\\\b\", \"\\\\bwhile\\\\b\", \"\\\\bfor\\\\b\",\n            \"\\\\bforeach\\\\b\", \"\\\\breturn\\\\b\", \"\\\\bprint\\\\b\", \"\\\\binclude\\\\b\", \"\\\\brequire\\\\b\",\n            \"\\\\binclude_once\\\\b\", \"\\\\brequire_once\\\\b\", \"\\\\btry\\\\b\", \"\\\\bcatch\\\\b\",\n            \"\\\\bfinally\\\\b\", \"\\\\bcontinue\\\\b\", \"\\\\bbreak\\\\b\", \"\\\\bclass\\\\b\", \"\\\\bfunction\\\\b\",\n            \"\\\\bnew\\\\b\", \"\\\\bthrow\\\\b\", \"\\\\barray\\\\b\", \"\\\\bpublic\\\\b\", \"\\\\bprivate\\\\b\",\n            \"\\\\bprotected\\\\b\", \"\\\\bstatic\\\\b\", \"\\\\bglobal\\\\b\", \"\\\\bisset\\\\b\", \"\\\\bunset\\\\b\",\n            \"\\\\bnull\\\\b\", \"\\\\btrue\\\\b\", \"\\\\bfalse\\\\b\"\n        };\n\n        for (const QString &pattern : keywordPatterns) {\n            rule.pattern = QRegularExpression(pattern);\n            rule.format = Keyword;\n            highlightingRules.append(rule);\n        }\n\n        rule.pattern = QRegularExpression(\"\\\".*?\\\"\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"\\'.*?\\'\");\n        rule.format = String;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"//[^\\n]*\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n\n        rule.pattern = QRegularExpression(\"/\\\\*.*?\\\\*/\");\n        rule.format = Comment;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\n\nstatic QVector<HighlightingRule> jsonHighlightingRules()\n{\n    static QVector<HighlightingRule> highlightingRules;\n    if (highlightingRules.isEmpty()) {\n\n        HighlightingRule rule;\n\n        rule.pattern = QRegularExpression(\".*\");\n        rule.format = Default;\n        highlightingRules.append(rule);\n\n        // Key string rule\n        rule.pattern = QRegularExpression(\"\\\".*?\\\":\");  // keys are typically in the \"key\": format\n        rule.format = Key;\n        highlightingRules.append(rule);\n\n        // Value string rule\n        rule.pattern = QRegularExpression(\":\\\\s*(\\\".*?\\\")\");  // values are typically in the : \"value\" format\n        rule.format = Value;\n        highlightingRules.append(rule);\n    }\n    return highlightingRules;\n}\n\nSyntaxHighlighter::SyntaxHighlighter(QObject *parent)\n    : QSyntaxHighlighter(parent)\n{\n}\n\nSyntaxHighlighter::~SyntaxHighlighter()\n{\n}\n\nvoid SyntaxHighlighter::highlightBlock(const QString &text)\n{\n    QTextBlock block = this->currentBlock();\n\n    // Search the first block of the frame we're in for the code to use for highlighting\n    int userState = block.userState();\n    if (QTextFrame *frame = block.document()->frameAt(block.position())) {\n        QTextBlock firstBlock = frame->begin().currentBlock();\n        if (firstBlock.isValid())\n            userState = firstBlock.userState();\n    }\n\n    QVector<HighlightingRule> rules;\n    switch (userState) {\n    case Python:\n        rules = pythonHighlightingRules(); break;\n    case Cpp:\n        rules = cppHighlightingRules(); break;\n    case Csharp:\n        rules = csharpHighlightingRules(); break;\n    case Bash:\n        rules = bashHighlightingRules(); break;\n    case TypeScript:\n        rules = typescriptHighlightingRules(); break;\n    case Java:\n        rules = javaHighlightingRules(); break;\n    case Go:\n        rules = goHighlightingRules(); break;\n    case Json:\n        rules = jsonHighlightingRules(); break;\n    case Latex:\n        rules = latexHighlightingRules(); break;\n    case Html:\n        rules = htmlHighlightingRules(); break;\n    case Php:\n        rules = phpHighlightingRules(); break;\n    default: break;\n    }\n\n    for (const HighlightingRule &rule : std::as_const(rules)) {\n        QRegularExpressionMatchIterator matchIterator = rule.pattern.globalMatch(text);\n        while (matchIterator.hasNext()) {\n            QRegularExpressionMatch match = matchIterator.next();\n            int startIndex = match.capturedStart();\n            int length = match.capturedLength();\n            QTextCharFormat format;\n            format.setForeground(formatToColor(rule.format, m_codeColors));\n            setFormat(startIndex, length, format);\n        }\n    }\n}\n\n// TODO (Adam) This class replaces characters in the text in order to provide markup and syntax highlighting\n// which destroys the original text in favor of the replaced text. This is a problem when we select\n// text and then the user tries to 'copy' the text: the original text should be placed in the clipboard\n// not the replaced text. A possible solution is to have this class keep a mapping of the original\n// indices and the replacement indices and then use the original text that is stored in memory in the\n// chat class to populate the clipboard.\nChatViewTextProcessor::ChatViewTextProcessor(QObject *parent)\n    : QObject{parent}\n    , m_quickTextDocument(nullptr)\n    , m_syntaxHighlighter(new SyntaxHighlighter(this))\n    , m_shouldProcessText(true)\n    , m_fontPixelSize(QGuiApplication::font().pointSizeF())\n{\n}\n\nQQuickTextDocument* ChatViewTextProcessor::textDocument() const\n{\n    return m_quickTextDocument;\n}\n\nvoid ChatViewTextProcessor::setTextDocument(QQuickTextDocument* quickTextDocument)\n{\n    m_quickTextDocument = quickTextDocument;\n    m_syntaxHighlighter->setDocument(m_quickTextDocument->textDocument());\n    handleTextChanged();\n}\n\nvoid ChatViewTextProcessor::setValue(const QString &value)\n{\n    m_quickTextDocument->textDocument()->setPlainText(value);\n    handleTextChanged();\n}\n\nbool ChatViewTextProcessor::tryCopyAtPosition(int position) const\n{\n    for (const auto &copy : m_copies) {\n        if (position >= copy.startPos && position <= copy.endPos) {\n            QClipboard *clipboard = QGuiApplication::clipboard();\n            clipboard->setText(copy.text);\n            return true;\n        }\n    }\n    return false;\n}\n\nbool ChatViewTextProcessor::shouldProcessText() const\n{\n    return m_shouldProcessText;\n}\n\nvoid ChatViewTextProcessor::setShouldProcessText(bool b)\n{\n    if (m_shouldProcessText == b)\n        return;\n    m_shouldProcessText = b;\n    emit shouldProcessTextChanged();\n    handleTextChanged();\n}\n\nqreal ChatViewTextProcessor::fontPixelSize() const\n{\n    return m_fontPixelSize;\n}\n\nvoid ChatViewTextProcessor::setFontPixelSize(qreal sz)\n{\n    if (m_fontPixelSize == sz)\n        return;\n    m_fontPixelSize = sz;\n    emit fontPixelSizeChanged();\n    handleTextChanged();\n}\n\nCodeColors ChatViewTextProcessor::codeColors() const\n{\n    return m_syntaxHighlighter->codeColors();\n}\n\nvoid ChatViewTextProcessor::setCodeColors(const CodeColors &colors)\n{\n    m_syntaxHighlighter->setCodeColors(colors);\n    emit codeColorsChanged();\n}\n\nvoid traverseDocument(QTextDocument *doc, QTextFrame *frame)\n{\n    QTextFrame *rootFrame = frame ? frame : doc->rootFrame();\n    QTextFrame::iterator rootIt;\n\n    if (!frame)\n        qDebug() << \"Begin traverse\";\n\n    for (rootIt = rootFrame->begin(); !rootIt.atEnd(); ++rootIt) {\n        QTextFrame *childFrame = rootIt.currentFrame();\n        QTextBlock childBlock = rootIt.currentBlock();\n\n        if (childFrame) {\n            qDebug() << \"Frame from\" << childFrame->firstPosition() << \"to\" << childFrame->lastPosition();\n            traverseDocument(doc, childFrame);\n        } else if (childBlock.isValid()) {\n            qDebug() << QString(\"    Block %1 position:\").arg(childBlock.userState()) << childBlock.position();\n            qDebug() << QString(\"    Block %1 text:\").arg(childBlock.userState()) << childBlock.text();\n\n            // Iterate over lines within the block\n            for (QTextBlock::iterator blockIt = childBlock.begin(); !(blockIt.atEnd()); ++blockIt) {\n                QTextFragment fragment = blockIt.fragment();\n                if (fragment.isValid()) {\n                    qDebug() << \"    Fragment text:\" << fragment.text();\n                }\n            }\n        }\n    }\n\n    if (!frame)\n        qDebug() << \"End traverse\";\n}\n\nvoid ChatViewTextProcessor::handleTextChanged()\n{\n    if (!m_quickTextDocument || !m_shouldProcessText)\n        return;\n\n    // Force full layout of the text document to work around a bug in Qt\n    // TODO(jared): report the Qt bug and link to the report here\n    QTextDocument* doc = m_quickTextDocument->textDocument();\n    (void)doc->documentLayout()->documentSize();\n\n    handleCodeBlocks();\n    handleMarkdown();\n\n    // We insert an invisible char at the end to make sure the document goes back to the default\n    // text format\n    QTextCursor cursor(doc);\n    QString invisibleCharacter = QString(QChar(0xFEFF));\n    cursor.insertText(invisibleCharacter, QTextCharFormat());\n}\n\nvoid ChatViewTextProcessor::handleCodeBlocks()\n{\n    QTextDocument* doc = m_quickTextDocument->textDocument();\n    QTextCursor cursor(doc);\n\n    QTextCharFormat textFormat;\n    textFormat.setFontFamilies(QStringList() << \"Monospace\");\n    textFormat.setForeground(QColor(\"white\"));\n\n    QTextFrameFormat frameFormatBase;\n    frameFormatBase.setBackground(codeColors().backgroundColor);\n\n    QTextTableFormat tableFormat;\n    tableFormat.setMargin(0);\n    tableFormat.setPadding(0);\n    tableFormat.setBorder(0);\n    tableFormat.setBorderCollapse(true);\n    QList<QTextLength> constraints;\n    constraints << QTextLength(QTextLength::PercentageLength, 100);\n    tableFormat.setColumnWidthConstraints(constraints);\n\n    QTextTableFormat headerTableFormat;\n    headerTableFormat.setBackground(codeColors().headerColor);\n    headerTableFormat.setPadding(0);\n    headerTableFormat.setBorder(0);\n    headerTableFormat.setBorderCollapse(true);\n    headerTableFormat.setTopMargin(10);\n    headerTableFormat.setBottomMargin(10);\n    headerTableFormat.setLeftMargin(15);\n    headerTableFormat.setRightMargin(15);\n    QList<QTextLength> headerConstraints;\n    headerConstraints << QTextLength(QTextLength::PercentageLength, 80);\n    headerConstraints << QTextLength(QTextLength::PercentageLength, 20);\n    headerTableFormat.setColumnWidthConstraints(headerConstraints);\n\n    QTextTableFormat codeBlockTableFormat;\n    codeBlockTableFormat.setBackground(codeColors().backgroundColor);\n    codeBlockTableFormat.setPadding(0);\n    codeBlockTableFormat.setBorder(0);\n    codeBlockTableFormat.setBorderCollapse(true);\n    codeBlockTableFormat.setTopMargin(15);\n    codeBlockTableFormat.setBottomMargin(15);\n    codeBlockTableFormat.setLeftMargin(15);\n    codeBlockTableFormat.setRightMargin(15);\n    codeBlockTableFormat.setColumnWidthConstraints(constraints);\n\n    QTextImageFormat copyImageFormat;\n    copyImageFormat.setWidth(24);\n    copyImageFormat.setHeight(24);\n    copyImageFormat.setName(\"qrc:/gpt4all/icons/copy.svg\");\n\n    // Regex for code blocks\n    static const QRegularExpression reCode(\"```(.*?)(```|$)\", QRegularExpression::DotMatchesEverythingOption);\n    QRegularExpressionMatchIterator iCode = reCode.globalMatch(doc->toPlainText());\n\n    QList<QRegularExpressionMatch> matchesCode;\n    while (iCode.hasNext())\n        matchesCode.append(iCode.next());\n\n    QVector<CodeCopy> newCopies;\n    QVector<QTextFrame*> frames;\n\n    for(int index = matchesCode.count() - 1; index >= 0; --index) {\n        cursor.setPosition(matchesCode[index].capturedStart());\n        cursor.setPosition(matchesCode[index].capturedEnd(), QTextCursor::KeepAnchor);\n        cursor.removeSelectedText();\n\n        QTextFrameFormat frameFormat = frameFormatBase;\n        QString capturedText = matchesCode[index].captured(1);\n        QString codeLanguage;\n\n        QStringList lines = capturedText.split('\\n');\n        if (lines.last().isEmpty()) {\n            lines.removeLast();\n        }\n\n        if (lines.count() >= 2) {\n            const auto &firstWord = lines.first();\n            if (firstWord == \"python\"\n                || firstWord == \"cpp\"\n                || firstWord == \"c++\"\n                || firstWord == \"csharp\"\n                || firstWord == \"c#\"\n                || firstWord == \"c\"\n                || firstWord == \"bash\"\n                || firstWord == \"javascript\"\n                || firstWord == \"typescript\"\n                || firstWord == \"java\"\n                || firstWord == \"go\"\n                || firstWord == \"golang\"\n                || firstWord == \"json\"\n                || firstWord == \"latex\"\n                || firstWord == \"html\"\n                || firstWord == \"php\") {\n                codeLanguage = firstWord;\n            }\n            lines.removeFirst();\n        }\n\n        QTextFrame *mainFrame = cursor.currentFrame();\n        cursor.setCharFormat(textFormat);\n\n        cursor.insertFrame(frameFormat);\n        QTextTable *table = cursor.insertTable(codeLanguage.isEmpty() ? 1 : 2, 1, tableFormat);\n\n        if (!codeLanguage.isEmpty()) {\n            QTextTableCell headerCell = table->cellAt(0, 0);\n            QTextCursor headerCellCursor = headerCell.firstCursorPosition();\n            QTextTable *headerTable = headerCellCursor.insertTable(1, 2, headerTableFormat);\n            QTextTableCell header = headerTable->cellAt(0, 0);\n            QTextCursor headerCursor = header.firstCursorPosition();\n            headerCursor.insertText(codeLanguage);\n            QTextTableCell copy = headerTable->cellAt(0, 1);\n            QTextCursor copyCursor = copy.firstCursorPosition();\n            CodeCopy newCopy;\n            newCopy.text = lines.join(\"\\n\");\n            newCopy.startPos = copyCursor.position();\n            newCopy.endPos = newCopy.startPos + 1;\n            newCopies.append(newCopy);\n// FIXME: There are two reasons this is commented out. Odd drawing behavior is seen when this is added\n// and one selects with the mouse the code language in a code block. The other reason is the code that\n// tries to do a hit test for the image is just very broken and buggy and does not always work. So I'm\n// disabling this code and included functionality for v3.0.0 until I can figure out how to make this much\n// less buggy\n#if 0\n//            QTextBlockFormat blockFormat;\n//            blockFormat.setAlignment(Qt::AlignRight);\n//            copyCursor.setBlockFormat(blockFormat);\n//            copyCursor.insertImage(copyImageFormat, QTextFrameFormat::FloatRight);\n#endif\n        }\n\n        QTextTableCell codeCell = table->cellAt(codeLanguage.isEmpty() ? 0 : 1, 0);\n        QTextCursor codeCellCursor = codeCell.firstCursorPosition();\n        QTextTable *codeTable = codeCellCursor.insertTable(1, 1, codeBlockTableFormat);\n        QTextTableCell code = codeTable->cellAt(0, 0);\n\n        QTextCharFormat codeBlockCharFormat;\n        codeBlockCharFormat.setForeground(codeColors().defaultColor);\n\n        QFont monospaceFont(\"Courier\");\n        monospaceFont.setPointSize(m_fontPixelSize);\n        if (monospaceFont.family() != \"Courier\") {\n            monospaceFont.setFamily(\"Monospace\"); // Fallback if Courier isn't available\n        }\n\n        QTextCursor codeCursor = code.firstCursorPosition();\n        codeBlockCharFormat.setFont(monospaceFont); // Update the font for the codeblock\n        codeCursor.setCharFormat(codeBlockCharFormat);\n\n        codeCursor.block().setUserState(stringToLanguage(codeLanguage));\n        codeCursor.insertText(lines.join('\\n'));\n\n        cursor = mainFrame->lastCursorPosition();\n        cursor.setCharFormat(QTextCharFormat());\n    }\n\n    m_copies = newCopies;\n}\n\nvoid replaceAndInsertMarkdown(int startIndex, int endIndex, QTextDocument *doc)\n{\n    QTextCursor cursor(doc);\n    cursor.setPosition(startIndex);\n    cursor.setPosition(endIndex, QTextCursor::KeepAnchor);\n    QTextDocumentFragment fragment(cursor);\n    const QString plainText = fragment.toPlainText();\n    cursor.removeSelectedText();\n    QTextDocument::MarkdownFeatures features = static_cast<QTextDocument::MarkdownFeatures>(\n        QTextDocument::MarkdownNoHTML | QTextDocument::MarkdownDialectGitHub);\n    cursor.insertMarkdown(plainText, features);\n    cursor.block().setUserState(Markdown);\n}\n\nvoid ChatViewTextProcessor::handleMarkdown()\n{\n    QTextDocument* doc = m_quickTextDocument->textDocument();\n    QTextCursor cursor(doc);\n\n    QVector<QPair<int, int>> codeBlockPositions;\n\n    QTextFrame *rootFrame = doc->rootFrame();\n    QTextFrame::iterator rootIt;\n\n    bool hasAlreadyProcessedMarkdown = false;\n    for (rootIt = rootFrame->begin(); !rootIt.atEnd(); ++rootIt) {\n        QTextFrame *childFrame = rootIt.currentFrame();\n        QTextBlock childBlock = rootIt.currentBlock();\n        if (childFrame) {\n            codeBlockPositions.append(qMakePair(childFrame->firstPosition()-1, childFrame->lastPosition()+1));\n\n            for (QTextFrame::iterator frameIt = childFrame->begin(); !frameIt.atEnd(); ++frameIt) {\n                QTextBlock block = frameIt.currentBlock();\n                if (block.isValid() && block.userState() == Markdown)\n                    hasAlreadyProcessedMarkdown = true;\n            }\n        } else if (childBlock.isValid() && childBlock.userState() == Markdown)\n            hasAlreadyProcessedMarkdown = true;\n    }\n\n\n    if (!hasAlreadyProcessedMarkdown) {\n        std::sort(codeBlockPositions.begin(), codeBlockPositions.end(), [](const QPair<int, int> &a, const QPair<int, int> &b) {\n            return a.first > b.first;\n        });\n\n        int lastIndex = doc->characterCount() - 1;\n        for (const auto &pos : codeBlockPositions) {\n            int nonCodeStart = pos.second;\n            int nonCodeEnd = lastIndex;\n            if (nonCodeEnd > nonCodeStart) {\n                replaceAndInsertMarkdown(nonCodeStart, nonCodeEnd, doc);\n            }\n            lastIndex = pos.first;\n        }\n\n        if (lastIndex > 0)\n            replaceAndInsertMarkdown(0, lastIndex, doc);\n    }\n}\n"
  },
  {
    "path": "gpt4all-chat/src/chatviewtextprocessor.h",
    "content": "#ifndef CHATVIEWTEXTPROCESSOR_H\n#define CHATVIEWTEXTPROCESSOR_H\n\n#include <QColor>\n#include <QObject>\n#include <QQmlEngine> // IWYU pragma: keep\n#include <QQuickTextDocument>\n#include <QString>\n#include <QSyntaxHighlighter>\n#include <QVector> // IWYU pragma: keep\n#include <QtTypes>\n\n// IWYU pragma: no_forward_declare QQuickTextDocument\n\n\nstruct CodeColors {\n    Q_GADGET\n    Q_PROPERTY(QColor defaultColor MEMBER defaultColor)\n    Q_PROPERTY(QColor keywordColor MEMBER keywordColor)\n    Q_PROPERTY(QColor functionColor MEMBER functionColor)\n    Q_PROPERTY(QColor functionCallColor MEMBER functionCallColor)\n    Q_PROPERTY(QColor commentColor MEMBER commentColor)\n    Q_PROPERTY(QColor stringColor MEMBER stringColor)\n    Q_PROPERTY(QColor numberColor MEMBER numberColor)\n    Q_PROPERTY(QColor headerColor MEMBER headerColor)\n    Q_PROPERTY(QColor backgroundColor MEMBER backgroundColor)\n\npublic:\n    QColor defaultColor;\n    QColor keywordColor;\n    QColor functionColor;\n    QColor functionCallColor;\n    QColor commentColor;\n    QColor stringColor;\n    QColor numberColor;\n    QColor headerColor;\n    QColor backgroundColor;\n\n    QColor preprocessorColor = keywordColor;\n    QColor typeColor = numberColor;\n    QColor arrowColor = functionColor;\n    QColor commandColor = functionCallColor;\n    QColor variableColor = numberColor;\n    QColor keyColor = functionColor;\n    QColor valueColor = stringColor;\n    QColor parameterColor = stringColor;\n    QColor attributeNameColor = numberColor;\n    QColor attributeValueColor = stringColor;\n    QColor specialCharacterColor = functionColor;\n    QColor doctypeColor = commentColor;\n};\n\nQ_DECLARE_METATYPE(CodeColors)\n\nclass SyntaxHighlighter : public QSyntaxHighlighter {\n    Q_OBJECT\npublic:\n    SyntaxHighlighter(QObject *parent);\n    ~SyntaxHighlighter();\n    void highlightBlock(const QString &text) override;\n\n    CodeColors codeColors() const { return m_codeColors; }\n    void setCodeColors(const CodeColors &colors) { m_codeColors = colors; }\n\nprivate:\n    CodeColors m_codeColors;\n};\n\nstruct ContextLink {\n    int startPos = -1;\n    int endPos = -1;\n    QString text;\n    QString href;\n};\n\nstruct CodeCopy {\n    int startPos = -1;\n    int endPos = -1;\n    QString text;\n};\n\nclass ChatViewTextProcessor : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(QQuickTextDocument* textDocument READ textDocument WRITE setTextDocument NOTIFY textDocumentChanged())\n    Q_PROPERTY(bool shouldProcessText READ shouldProcessText WRITE setShouldProcessText NOTIFY shouldProcessTextChanged())\n    Q_PROPERTY(qreal fontPixelSize READ fontPixelSize WRITE setFontPixelSize NOTIFY fontPixelSizeChanged())\n    Q_PROPERTY(CodeColors codeColors READ codeColors WRITE setCodeColors NOTIFY codeColorsChanged())\n    QML_ELEMENT\npublic:\n    explicit ChatViewTextProcessor(QObject *parent = nullptr);\n\n    QQuickTextDocument* textDocument() const;\n    void setTextDocument(QQuickTextDocument* textDocument);\n\n    Q_INVOKABLE void setValue(const QString &value);\n    Q_INVOKABLE bool tryCopyAtPosition(int position) const;\n\n    bool shouldProcessText() const;\n    void setShouldProcessText(bool b);\n\n    qreal fontPixelSize() const;\n    void setFontPixelSize(qreal b);\n\n    CodeColors codeColors() const;\n    void setCodeColors(const CodeColors &colors);\n\nQ_SIGNALS:\n    void textDocumentChanged();\n    void shouldProcessTextChanged();\n    void fontPixelSizeChanged();\n    void codeColorsChanged();\n\nprivate Q_SLOTS:\n    void handleTextChanged();\n    void handleCodeBlocks();\n    void handleMarkdown();\n\nprivate:\n    QQuickTextDocument *m_quickTextDocument;\n    SyntaxHighlighter *m_syntaxHighlighter;\n    QVector<ContextLink> m_links;\n    QVector<CodeCopy> m_copies;\n    bool m_shouldProcessText = false;\n    qreal m_fontPixelSize;\n};\n\n#endif // CHATVIEWTEXTPROCESSOR_H\n"
  },
  {
    "path": "gpt4all-chat/src/codeinterpreter.cpp",
    "content": "#include \"codeinterpreter.h\"\n\n#include <QJSEngine>\n#include <QJSValue>\n#include <QList>\n#include <QStringList> // IWYU pragma: keep\n#include <QThread>\n#include <QVariant>\n#include <Qt>\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nCodeInterpreter::CodeInterpreter()\n    : Tool()\n    , m_error(ToolEnums::Error::NoError)\n{\n    m_worker = new CodeInterpreterWorker;\n    connect(this, &CodeInterpreter::request, m_worker, &CodeInterpreterWorker::request, Qt::QueuedConnection);\n}\n\nvoid CodeInterpreter::run(const QList<ToolParam> &params)\n{\n    m_error = ToolEnums::Error::NoError;\n    m_errorString = QString();\n\n    Q_ASSERT(params.count() == 1\n          && params.first().name == \"code\"\n          && params.first().type == ToolEnums::ParamType::String);\n\n    const QString code = params.first().value.toString();\n    connect(m_worker, &CodeInterpreterWorker::finished, [this, params] {\n        m_error = m_worker->error();\n        m_errorString = m_worker->errorString();\n        emit runComplete({\n        ToolCallConstants::CodeInterpreterFunction,\n            params,\n            m_worker->response(),\n            m_error,\n            m_errorString\n        });\n    });\n\n    emit request(code);\n}\n\nbool CodeInterpreter::interrupt()\n{\n    return m_worker->interrupt();\n}\n\nQList<ToolParamInfo> CodeInterpreter::parameters() const\n{\n    return {{\n        \"code\",\n        ToolEnums::ParamType::String,\n        \"javascript code to compute\",\n        true\n    }};\n}\n\nQString CodeInterpreter::symbolicFormat() const\n{\n    return \"{human readable plan to complete the task}\\n\" + ToolCallConstants::CodeInterpreterPrefix + \"{code}\\n\" + ToolCallConstants::CodeInterpreterSuffix;\n}\n\nQString CodeInterpreter::examplePrompt() const\n{\n    return R\"(Write code to check if a number is prime, use that to see if the number 7 is prime)\";\n}\n\nQString CodeInterpreter::exampleCall() const\n{\n    static const QString example = R\"(function isPrime(n) {\n    if (n <= 1) {\n        return false;\n    }\n    for (let i = 2; i <= Math.sqrt(n); i++) {\n        if (n % i === 0) {\n            return false;\n        }\n    }\n    return true;\n}\n\nconst number = 7;\nconsole.log(`The number ${number} is prime: ${isPrime(number)}`);\n)\";\n\n    return \"Certainly! Let's compute the answer to whether the number 7 is prime.\\n\" + ToolCallConstants::CodeInterpreterPrefix + example + ToolCallConstants::CodeInterpreterSuffix;\n}\n\nQString CodeInterpreter::exampleReply() const\n{\n    return R\"(\"The computed result shows that 7 is a prime number.)\";\n}\n\nCodeInterpreterWorker::CodeInterpreterWorker()\n    : QObject(nullptr)\n    , m_engine(new QJSEngine(this))\n{\n    moveToThread(&m_thread);\n\n    QJSValue consoleInternalObject = m_engine->newQObject(&m_consoleCapture);\n    m_engine->globalObject().setProperty(\"console_internal\", consoleInternalObject);\n\n    // preprocess console.log args in JS since Q_INVOKE doesn't support varargs\n    auto consoleObject = m_engine->evaluate(uR\"(\n        class Console {\n            log(...args) {\n                if (args.length == 0)\n                    return;\n                if (args.length >= 2 && typeof args[0] === 'string')\n                    throw new Error('console.log string formatting not supported');\n                let cat = '';\n                for (const arg of args) {\n                    cat += String(arg);\n                }\n                console_internal.log(cat);\n            }\n        }\n\n        new Console();\n    )\"_s);\n    m_engine->globalObject().setProperty(\"console\", consoleObject);\n    m_thread.start();\n}\n\nvoid CodeInterpreterWorker::reset()\n{\n    m_response.clear();\n    m_error = ToolEnums::Error::NoError;\n    m_errorString.clear();\n    m_consoleCapture.output.clear();\n    m_engine->setInterrupted(false);\n}\n\nvoid CodeInterpreterWorker::request(const QString &code)\n{\n    reset();\n    const QJSValue result = m_engine->evaluate(code);\n    QString resultString;\n\n    if (m_engine->isInterrupted()) {\n        resultString = QString(\"Error: code execution was interrupted or timed out.\");\n   } else if (result.isError()) {\n        // NOTE: We purposely do not set the m_error or m_errorString for the code interpreter since\n        // we *want* the model to see the response has an error so it can hopefully correct itself. The\n        // error member variables are intended for tools that have error conditions that cannot be corrected.\n        // For instance, a tool depending upon the network might set these error variables if the network\n        // is not available.\n        const QStringList lines = code.split('\\n');\n        const int line = result.property(\"lineNumber\").toInt();\n        const int index = line - 1;\n        const QString lineContent = (index >= 0 && index < lines.size()) ? lines.at(index) : \"Line not found in code.\";\n            resultString = QString(\"Uncaught exception at line %1: %2\\n\\t%3\")\n                .arg(line)\n                .arg(result.toString())\n                .arg(lineContent);\n        m_error = ToolEnums::Error::UnknownError;\n        m_errorString = resultString;\n    } else {\n        resultString = result.isUndefined() ? QString() : result.toString();\n    }\n\n    if (resultString.isEmpty())\n        resultString = m_consoleCapture.output;\n    else if (!m_consoleCapture.output.isEmpty())\n        resultString += \"\\n\" + m_consoleCapture.output;\n    m_response = resultString;\n    emit finished();\n}\n\nbool CodeInterpreterWorker::interrupt()\n{\n    m_error = ToolEnums::Error::TimeoutError;\n    m_engine->setInterrupted(true);\n    return true;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/codeinterpreter.h",
    "content": "#ifndef CODEINTERPRETER_H\n#define CODEINTERPRETER_H\n\n#include \"tool.h\"\n#include \"toolcallparser.h\"\n\n#include <QObject>\n#include <QString>\n#include <QThread>\n#include <QtAssert>\n\nclass QJSEngine;\n\n\nclass JavaScriptConsoleCapture : public QObject\n{\n    Q_OBJECT\npublic:\n    QString output;\n    Q_INVOKABLE void log(const QString &message)\n    {\n        const int maxLength = 1024;\n        if (output.length() >= maxLength)\n            return;\n\n        if (output.length() + message.length() + 1 > maxLength) {\n            static const QString trunc = \"\\noutput truncated at \" + QString::number(maxLength) + \" characters...\";\n            int remainingLength = maxLength - output.length();\n            if (remainingLength > 0)\n                output.append(message.left(remainingLength));\n            output.append(trunc);\n            Q_ASSERT(output.length() > maxLength);\n        } else {\n            output.append(message + \"\\n\");\n        }\n    }\n};\n\nclass CodeInterpreterWorker : public QObject {\n    Q_OBJECT\npublic:\n    CodeInterpreterWorker();\n    virtual ~CodeInterpreterWorker() {}\n\n    void reset();\n    QString response() const { return m_response; }\n    ToolEnums::Error error() const { return m_error; }\n    QString errorString() const { return m_errorString; }\n    bool interrupt();\n\npublic Q_SLOTS:\n    void request(const QString &code);\n\nQ_SIGNALS:\n    void finished();\n\nprivate:\n    QString m_response;\n    ToolEnums::Error m_error = ToolEnums::Error::NoError;\n    QString m_errorString;\n    QThread m_thread;\n    JavaScriptConsoleCapture m_consoleCapture;\n    QJSEngine *m_engine = nullptr;\n};\n\nclass CodeInterpreter : public Tool\n{\n    Q_OBJECT\npublic:\n    explicit CodeInterpreter();\n    virtual ~CodeInterpreter() {}\n\n    void run(const QList<ToolParam> &params) override;\n    bool interrupt() override;\n\n    ToolEnums::Error error() const override { return m_error; }\n    QString errorString() const override { return m_errorString; }\n\n    QString name() const override { return tr(\"Code Interpreter\"); }\n    QString description() const override { return tr(\"compute javascript code using console.log as output\"); }\n    QString function() const override { return ToolCallConstants::CodeInterpreterFunction; }\n    QList<ToolParamInfo> parameters() const override;\n    virtual QString symbolicFormat() const override;\n    QString examplePrompt() const override;\n    QString exampleCall() const override;\n    QString exampleReply() const override;\n\nQ_SIGNALS:\n    void request(const QString &code);\n\nprivate:\n    ToolEnums::Error m_error = ToolEnums::Error::NoError;\n    QString m_errorString;\n    CodeInterpreterWorker *m_worker;\n};\n\n#endif // CODEINTERPRETER_H\n"
  },
  {
    "path": "gpt4all-chat/src/config.h.in",
    "content": "#pragma once\n\n#define APP_VERSION \"@APP_VERSION@\"\n\n#define G4A_CONFIG(name) (1/G4A_CONFIG_##name == 1)\n\n#define G4A_CONFIG_force_d3d12 @GPT4ALL_CONFIG_FORCE_D3D12@\n"
  },
  {
    "path": "gpt4all-chat/src/database.cpp",
    "content": "#include \"database.h\"\n\n#include \"mysettings.h\"\n#include \"utils.h\" // IWYU pragma: keep\n\n#include <duckx/duckx.hpp>\n#include <fmt/format.h>\n#include <usearch/index.hpp>\n#include <usearch/index_plugins.hpp>\n\n#include <QDebug>\n#include <QDir>\n#include <QDirIterator>\n#include <QFile>\n#include <QFileSystemWatcher>\n#include <QFlags>\n#include <QIODevice>\n#include <QKeyValueIterator>\n#include <QRegularExpression>\n#include <QSqlError>\n#include <QSqlQuery>\n#include <QTextStream>\n#include <QTimer>\n#include <QMap>\n#include <QUtf8StringView>\n#include <QVariant>\n#include <QtLogging>\n#include <QtMinMax>\n#include <QtTypes>\n\n#include <algorithm>\n#include <cmath>\n#include <optional>\n#include <stdexcept>\n\n#ifdef GPT4ALL_USE_QTPDF\n#   include <QPdfDocument>\n#   include <QPdfSelection>\n#else\n#   include <fpdfview.h>\n#   include <fpdf_doc.h>\n#   include <fpdf_text.h>\n#endif\n\nusing namespace Qt::Literals::StringLiterals;\nnamespace ranges = std::ranges;\nnamespace us = unum::usearch;\n\n//#define DEBUG\n//#define DEBUG_EXAMPLE\n\n\nnamespace {\n\n/* QFile that checks input for binary data. If seen, it fails the read and returns true\n * for binarySeen(). */\nclass BinaryDetectingFile: public QFile {\npublic:\n    using QFile::QFile;\n\n    bool binarySeen() const { return m_binarySeen; }\n\nprotected:\n    qint64 readData(char *data, qint64 maxSize) override {\n        qint64 res = QFile::readData(data, maxSize);\n        return checkData(data, res);\n    }\n\n    qint64 readLineData(char *data, qint64 maxSize) override {\n        qint64 res = QFile::readLineData(data, maxSize);\n        return checkData(data, res);\n    }\n\nprivate:\n    qint64 checkData(const char *data, qint64 size) {\n        Q_ASSERT(!isTextModeEnabled()); // We need raw bytes from the underlying QFile\n        if (size != -1 && !m_binarySeen) {\n            for (qint64 i = 0; i < size; i++) {\n                /* Control characters we should never see in plain text:\n                 * 0x00 NUL - 0x06 ACK\n                 * 0x0E SO  - 0x1A SUB\n                 * 0x1C FS  - 0x1F US */\n                auto c = static_cast<unsigned char>(data[i]);\n                if (c < 0x07 || (c >= 0x0E && c < 0x1B) || (c >= 0x1C && c < 0x20)) {\n                    m_binarySeen = true;\n                    break;\n                }\n            }\n        }\n        return m_binarySeen ? -1 : size;\n    }\n\n    bool m_binarySeen = false;\n};\n\n} // namespace\n\nstatic int s_batchSize = 100;\n\nstatic const QString INIT_DB_SQL[] = {\n    // automatically free unused disk space\n    u\"pragma auto_vacuum = FULL;\"_s,\n    // create tables\n    uR\"(\n        create table chunks(\n            id            integer primary key autoincrement,\n            document_id   integer not null,\n            chunk_text    text not null,\n            file          text not null,\n            title         text,\n            author        text,\n            subject       text,\n            keywords      text,\n            page          integer,\n            line_from     integer,\n            line_to       integer,\n            words         integer default 0 not null,\n            tokens        integer default 0 not null,\n            foreign key(document_id) references documents(id)\n        );\n    )\"_s, uR\"(\n        create virtual table chunks_fts using fts5(\n            id unindexed,\n            document_id unindexed,\n            chunk_text,\n            file,\n            title,\n            author,\n            subject,\n            keywords,\n            content='chunks',\n            content_rowid='id',\n            tokenize='porter'\n        );\n    )\"_s, uR\"(\n        create table collections(\n            id                  integer primary key,\n            name                text unique not null,\n            start_update_time   integer,\n            last_update_time    integer,\n            embedding_model     text\n        );\n    )\"_s, uR\"(\n        create table folders(\n            id   integer primary key autoincrement,\n            path text unique not null\n        );\n    )\"_s, uR\"(\n        create table collection_items(\n            collection_id integer not null,\n            folder_id     integer not null,\n            foreign key(collection_id) references collections(id)\n            foreign key(folder_id)     references folders(id),\n            unique(collection_id, folder_id)\n        );\n    )\"_s, uR\"(\n        create table documents(\n            id            integer primary key,\n            folder_id     integer not null,\n            document_time integer not null,\n            document_path text unique not null,\n            foreign key(folder_id) references folders(id)\n        );\n    )\"_s, uR\"(\n        create table embeddings(\n            model         text not null,\n            folder_id     integer not null,\n            chunk_id      integer not null,\n            embedding     blob not null,\n            primary key(model, folder_id, chunk_id),\n            foreign key(folder_id) references folders(id),\n            foreign key(chunk_id)  references chunks(id),\n            unique(model, chunk_id)\n        );\n    )\"_s,\n};\n\nstatic const QString INSERT_CHUNK_SQL = uR\"(\n    insert into chunks(document_id, chunk_text,\n        file, title, author, subject, keywords, page, line_from, line_to, words)\n        values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n        returning id;\n)\"_s;\n\nstatic const QString INSERT_CHUNK_FTS_SQL = uR\"(\n        insert into chunks_fts(document_id, chunk_text,\n            file, title, author, subject, keywords)\n            values(?, ?, ?, ?, ?, ?, ?);\n)\"_s;\n\nstatic const QString SELECT_CHUNKED_DOCUMENTS_SQL[] = {\n    uR\"(\n        select distinct document_id from chunks;\n    )\"_s, uR\"(\n        select distinct document_id from chunks_fts;\n    )\"_s,\n};\n\nstatic const QString DELETE_CHUNKS_SQL[] = {\n    uR\"(\n        delete from embeddings\n        where chunk_id in (\n            select id from chunks where document_id = ?\n        );\n    )\"_s, uR\"(\n        delete from chunks where document_id = ?;\n    )\"_s, uR\"(\n        delete from chunks_fts where document_id = ?;\n    )\"_s,\n};\n\nstatic const QString SELECT_CHUNKS_BY_DOCUMENT_SQL = uR\"(\n    select id from chunks WHERE document_id = ?;\n)\"_s;\n\nstatic const QString SELECT_CHUNKS_SQL = uR\"(\n    select c.id, d.document_time, d.document_path, c.chunk_text, c.file, c.title, c.author, c.page, c.line_from, c.line_to, co.name\n    from chunks c\n    join documents d on d.id = c.document_id\n    join folders f on f.id = d.folder_id\n    join collection_items ci on ci.folder_id = f.id\n    join collections co on co.id = ci.collection_id\n    where c.id in (%1);\n)\"_s;\n\nstatic const QString SELECT_UNCOMPLETED_CHUNKS_SQL = uR\"(\n    select co.name, co.embedding_model, c.id, d.folder_id, c.chunk_text\n    from chunks c\n    join documents d on d.id = c.document_id\n    join folders f on f.id = d.folder_id\n    join collection_items ci on ci.folder_id = f.id\n    join collections co on co.id = ci.collection_id and co.embedding_model is not null\n    where not exists(\n        select 1\n        from embeddings e\n        where e.chunk_id = c.id and e.model = co.embedding_model\n    );\n)\"_s;\n\nstatic const QString SELECT_COUNT_CHUNKS_SQL = uR\"(\n    select count(c.id)\n    from chunks c\n    join documents d on d.id = c.document_id\n    where d.folder_id = ?;\n)\"_s;\n\nstatic const QString SELECT_CHUNKS_FTS_SQL = uR\"(\n    select fts.id, bm25(chunks_fts) as score\n    from chunks_fts fts\n    join documents d on fts.document_id = d.id\n    join collection_items ci on d.folder_id = ci.folder_id\n    join collections co on ci.collection_id = co.id\n    where chunks_fts match ?\n    and co.name in ('%1')\n    order by score limit %2;\n)\"_s;\n\n\n#define NAMED_PAIR(name, typea, a, typeb, b) \\\n    struct name { typea a; typeb b; }; \\\n    static bool operator==(const name &x, const name &y) { return x.a == y.a && x.b == y.b; } \\\n    static size_t qHash(const name &x, size_t seed) { return qHashMulti(seed, x.a, x.b); }\n\n// struct compared by embedding key, can be extended with additional unique data\nNAMED_PAIR(EmbeddingKey, QString, embedding_model, int, chunk_id)\n\nnamespace {\n    struct IncompleteChunk: EmbeddingKey { int folder_id; QString text; };\n} // namespace\n\nstatic bool selectAllUncompletedChunks(QSqlQuery &q, QHash<IncompleteChunk, QStringList> &chunks)\n{\n    if (!q.exec(SELECT_UNCOMPLETED_CHUNKS_SQL))\n        return false;\n    while (q.next()) {\n        QString collection = q.value(0).toString();\n        IncompleteChunk ic {\n            /*EmbeddingKey*/ {\n                .embedding_model = q.value(1).toString(),\n                .chunk_id        = q.value(2).toInt(),\n            },\n            /*folder_id =*/ q.value(3).toInt(),\n            /*text      =*/ q.value(4).toString(),\n        };\n        chunks[ic] << collection;\n    }\n    return true;\n}\n\nstatic bool selectCountChunks(QSqlQuery &q, int folder_id, int &count)\n{\n    if (!q.prepare(SELECT_COUNT_CHUNKS_SQL))\n        return false;\n    q.addBindValue(folder_id);\n    if (!q.exec())\n        return false;\n    if (!q.next()) {\n        count = 0;\n        return false;\n    }\n    count = q.value(0).toInt();\n    return true;\n}\n\nstatic bool selectChunk(QSqlQuery &q, const QList<int> &chunk_ids)\n{\n    QString chunk_ids_str = QString::number(chunk_ids[0]);\n    for (size_t i = 1; i < chunk_ids.size(); ++i)\n        chunk_ids_str += \",\" + QString::number(chunk_ids[i]);\n    const QString formatted_query = SELECT_CHUNKS_SQL.arg(chunk_ids_str);\n    if (!q.prepare(formatted_query))\n        return false;\n    return q.exec();\n}\n\nstatic const QString INSERT_COLLECTION_SQL = uR\"(\n    insert into collections(name, start_update_time, last_update_time, embedding_model)\n        values(?, ?, ?, ?)\n        returning id;\n    )\"_s;\n\nstatic const QString SELECT_FOLDERS_FROM_COLLECTIONS_SQL = uR\"(\n    select f.id, f.path\n    from collections c\n    join collection_items ci on ci.collection_id = c.id\n    join folders f on ci.folder_id = f.id\n    where c.name = ?;\n    )\"_s;\n\nstatic const QString SELECT_COLLECTIONS_SQL_V1 = uR\"(\n    select c.collection_name, f.folder_path, f.id\n    from collections c\n    join folders f on c.folder_id = f.id\n    order by c.collection_name asc, f.folder_path asc;\n    )\"_s;\n\nstatic const QString SELECT_COLLECTIONS_SQL_V2 = uR\"(\n    select c.id, c.name, f.path, f.id, c.start_update_time, c.last_update_time, c.embedding_model\n    from collections c\n    join collection_items ci on ci.collection_id = c.id\n    join folders f on ci.folder_id = f.id\n    order by c.name asc, f.path asc;\n    )\"_s;\n\nstatic const QString SELECT_COLLECTION_BY_NAME_SQL = uR\"(\n    select id, name, start_update_time, last_update_time, embedding_model\n    from collections c\n    where name = ?;\n    )\"_s;\n\nstatic const QString SET_COLLECTION_EMBEDDING_MODEL_SQL = uR\"(\n    update collections\n    set embedding_model = ?\n    where name = ?;\n    )\"_s;\n\nstatic const QString UPDATE_START_UPDATE_TIME_SQL = uR\"(\n    update collections set start_update_time = ? where id = ?;\n)\"_s;\n\nstatic const QString UPDATE_LAST_UPDATE_TIME_SQL = uR\"(\n    update collections set last_update_time = ? where id = ?;\n)\"_s;\n\nstatic const QString FTS_INTEGRITY_SQL = uR\"(\n    insert into chunks_fts(chunks_fts, rank) values('integrity-check', 1);\n)\"_s;\n\nstatic const QString FTS_REBUILD_SQL = uR\"(\n    insert into chunks_fts(chunks_fts) values('rebuild');\n)\"_s;\n\nstatic bool addCollection(QSqlQuery &q, const QString &collection_name, const QDateTime &start_update,\n                          const QDateTime &last_update, const QString &embedding_model, CollectionItem &item)\n{\n    if (!q.prepare(INSERT_COLLECTION_SQL))\n        return false;\n    q.addBindValue(collection_name);\n    q.addBindValue(start_update);\n    q.addBindValue(last_update);\n    q.addBindValue(embedding_model);\n    if (!q.exec() || !q.next())\n        return false;\n    item.collection_id = q.value(0).toInt();\n    item.collection = collection_name;\n    item.embeddingModel = embedding_model;\n    return true;\n}\n\nstatic bool selectFoldersFromCollection(QSqlQuery &q, const QString &collection_name, QList<QPair<int, QString>> *folders)\n{\n    if (!q.prepare(SELECT_FOLDERS_FROM_COLLECTIONS_SQL))\n        return false;\n    q.addBindValue(collection_name);\n    if (!q.exec())\n        return false;\n    while (q.next())\n        folders->append({q.value(0).toInt(), q.value(1).toString()});\n    return true;\n}\n\nstatic QList<CollectionItem> sqlExtractCollections(QSqlQuery &q, bool with_folder = false, int version = LOCALDOCS_VERSION)\n{\n    QList<CollectionItem> collections;\n    while (q.next()) {\n        CollectionItem i;\n        int idx = 0;\n        if (version >= 2)\n            i.collection_id = q.value(idx++).toInt();\n        i.collection = q.value(idx++).toString();\n        if (with_folder) {\n            i.folder_path = q.value(idx++).toString();\n            i.folder_id = q.value(idx++).toInt();\n        }\n        i.indexing = false;\n        i.installed = true;\n\n        if (version >= 2) {\n            bool ok;\n            const qint64 start_update = q.value(idx++).toLongLong(&ok);\n            if (ok) i.startUpdate = QDateTime::fromMSecsSinceEpoch(start_update);\n            const qint64 last_update = q.value(idx++).toLongLong(&ok);\n            if (ok) i.lastUpdate = QDateTime::fromMSecsSinceEpoch(last_update);\n\n            i.embeddingModel = q.value(idx++).toString();\n        }\n        if (i.embeddingModel.isNull()) {\n            // unknown embedding model -> need to re-index\n            i.forceIndexing = true;\n        }\n\n        collections << i;\n    }\n    return collections;\n}\n\nstatic bool selectAllFromCollections(QSqlQuery &q, QList<CollectionItem> *collections, int version = LOCALDOCS_VERSION)\n{\n\n    switch (version) {\n    case 1:\n        if (!q.prepare(SELECT_COLLECTIONS_SQL_V1))\n            return false;\n        break;\n    case 2:\n    case 3:\n        if (!q.prepare(SELECT_COLLECTIONS_SQL_V2))\n            return false;\n        break;\n    default:\n        Q_UNREACHABLE();\n        return false;\n    }\n\n    if (!q.exec())\n        return false;\n    *collections = sqlExtractCollections(q, true, version);\n    return true;\n}\n\nstatic bool selectCollectionByName(QSqlQuery &q, const QString &name, std::optional<CollectionItem> &collection)\n{\n    if (!q.prepare(SELECT_COLLECTION_BY_NAME_SQL))\n        return false;\n    q.addBindValue(name);\n    if (!q.exec())\n        return false;\n    QList<CollectionItem> collections = sqlExtractCollections(q);\n    Q_ASSERT(collections.count() <= 1);\n    collection.reset();\n    if (!collections.isEmpty())\n        collection = collections.first();\n    return true;\n}\n\nstatic bool setCollectionEmbeddingModel(QSqlQuery &q, const QString &collection_name, const QString &embedding_model)\n{\n    if (!q.prepare(SET_COLLECTION_EMBEDDING_MODEL_SQL))\n        return false;\n    q.addBindValue(embedding_model);\n    q.addBindValue(collection_name);\n    return q.exec();\n}\n\nstatic bool updateStartUpdateTime(QSqlQuery &q, int id, qint64 update_time)\n{\n    if (!q.prepare(UPDATE_START_UPDATE_TIME_SQL))\n        return false;\n    q.addBindValue(update_time);\n    q.addBindValue(id);\n    return q.exec();\n}\n\nstatic bool updateLastUpdateTime(QSqlQuery &q, int id, qint64 update_time)\n{\n    if (!q.prepare(UPDATE_LAST_UPDATE_TIME_SQL))\n        return false;\n    q.addBindValue(update_time);\n    q.addBindValue(id);\n    return q.exec();\n}\n\nstatic const QString INSERT_FOLDERS_SQL = uR\"(\n    insert into folders(path) values(?);\n    )\"_s;\n\nstatic const QString DELETE_FOLDERS_SQL = uR\"(\n    delete from folders where id = ?;\n    )\"_s;\n\nstatic const QString SELECT_FOLDERS_FROM_PATH_SQL = uR\"(\n    select id from folders where path = ?;\n    )\"_s;\n\nstatic const QString GET_FOLDER_EMBEDDING_MODEL_SQL = uR\"(\n    select co.embedding_model\n    from collections co\n    join collection_items ci on ci.collection_id = co.id\n    where ci.folder_id = ?;\n    )\"_s;\n\nstatic const QString FOLDER_REMOVE_ALL_DOCS_SQL[] = {\n    uR\"(\n        delete from embeddings\n        where chunk_id in (\n            select c.id\n            from chunks c\n            join documents d on d.id = c.document_id\n            join folders f on f.id = d.folder_id\n            where f.path = ?\n        );\n    )\"_s, uR\"(\n        delete from chunks\n        where document_id in (\n            select d.id\n            from documents d\n            join folders f on f.id = d.folder_id\n            where f.path = ?\n        );\n    )\"_s, uR\"(\n        delete from documents\n        where id in (\n            select d.id\n            from documents d\n            join folders f on f.id = d.folder_id\n            where f.path = ?\n        );\n    )\"_s,\n};\n\nstatic bool addFolderToDB(QSqlQuery &q, const QString &folder_path, int *folder_id)\n{\n    if (!q.prepare(INSERT_FOLDERS_SQL))\n        return false;\n    q.addBindValue(folder_path);\n    if (!q.exec())\n        return false;\n    *folder_id = q.lastInsertId().toInt();\n    return true;\n}\n\nstatic bool removeFolderFromDB(QSqlQuery &q, int folder_id)\n{\n    if (!q.prepare(DELETE_FOLDERS_SQL))\n        return false;\n    q.addBindValue(folder_id);\n    return q.exec();\n}\n\nstatic bool selectFolder(QSqlQuery &q, const QString &folder_path, int *id)\n{\n    if (!q.prepare(SELECT_FOLDERS_FROM_PATH_SQL))\n        return false;\n    q.addBindValue(folder_path);\n    if (!q.exec())\n        return false;\n    Q_ASSERT(q.size() < 2);\n    if (q.next())\n        *id = q.value(0).toInt();\n    return true;\n}\n\nstatic bool sqlGetFolderEmbeddingModel(QSqlQuery &q, int id, QString &embedding_model)\n{\n    if (!q.prepare(GET_FOLDER_EMBEDDING_MODEL_SQL))\n        return false;\n    q.addBindValue(id);\n    if (!q.exec() || !q.next())\n        return false;\n    // FIXME(jared): there may be more than one if a folder is shared between collections\n    Q_ASSERT(q.size() < 2);\n    embedding_model = q.value(0).toString();\n    return true;\n}\n\nstatic const QString INSERT_COLLECTION_ITEM_SQL = uR\"(\n    insert into collection_items(collection_id, folder_id)\n    values(?, ?)\n    on conflict do nothing;\n)\"_s;\n\nstatic const QString DELETE_COLLECTION_FOLDER_SQL = uR\"(\n    delete from collection_items\n    where collection_id = (select id from collections where name = :name) and folder_id = :folder_id\n    returning (select count(*) from collection_items where folder_id = :folder_id);\n)\"_s;\n\nstatic const QString PRUNE_COLLECTIONS_SQL = uR\"(\n    delete from collections\n    where id not in (select collection_id from collection_items);\n)\"_s;\n\n// 0 = already exists, 1 = added, -1 = error\nstatic int addCollectionItem(QSqlQuery &q, int collection_id, int folder_id)\n{\n    if (!q.prepare(INSERT_COLLECTION_ITEM_SQL))\n        return -1;\n    q.addBindValue(collection_id);\n    q.addBindValue(folder_id);\n    if (q.exec())\n        return q.numRowsAffected();\n    return -1;\n}\n\n// returns the number of remaining references to the folder, or -1 on error\nstatic int removeCollectionFolder(QSqlQuery &q, const QString &collection_name, int folder_id)\n{\n    if (!q.prepare(DELETE_COLLECTION_FOLDER_SQL))\n        return -1;\n    q.bindValue(\":name\", collection_name);\n    q.bindValue(\":folder_id\", folder_id);\n    if (!q.exec() || !q.next())\n        return -1;\n    return q.value(0).toInt();\n}\n\nstatic bool sqlPruneCollections(QSqlQuery &q)\n{\n    return q.exec(PRUNE_COLLECTIONS_SQL);\n}\n\nstatic const QString INSERT_DOCUMENTS_SQL = uR\"(\n    insert into documents(folder_id, document_time, document_path) values(?, ?, ?);\n    )\"_s;\n\nstatic const QString UPDATE_DOCUMENT_TIME_SQL = uR\"(\n    update documents set document_time = ? where id = ?;\n    )\"_s;\n\nstatic const QString DELETE_DOCUMENTS_SQL = uR\"(\n    delete from documents where id = ?;\n    )\"_s;\n\nstatic const QString SELECT_DOCUMENT_SQL = uR\"(\n    select id, document_time from documents where document_path = ?;\n    )\"_s;\n\nstatic const QString SELECT_DOCUMENTS_SQL = uR\"(\n    select id from documents where folder_id = ?;\n    )\"_s;\n\nstatic const QString SELECT_ALL_DOCUMENTS_SQL = uR\"(\n    select id, document_path from documents;\n    )\"_s;\n\nstatic const QString SELECT_COUNT_STATISTICS_SQL = uR\"(\n    select count(distinct d.id), sum(c.words), sum(c.tokens)\n    from documents d\n    left join chunks c on d.id = c.document_id\n    where d.folder_id = ?;\n    )\"_s;\n\nstatic bool addDocument(QSqlQuery &q, int folder_id, qint64 document_time, const QString &document_path, int *document_id)\n{\n    if (!q.prepare(INSERT_DOCUMENTS_SQL))\n        return false;\n    q.addBindValue(folder_id);\n    q.addBindValue(document_time);\n    q.addBindValue(document_path);\n    if (!q.exec())\n        return false;\n    *document_id = q.lastInsertId().toInt();\n    return true;\n}\n\nstatic bool removeDocument(QSqlQuery &q, int document_id)\n{\n    if (!q.prepare(DELETE_DOCUMENTS_SQL))\n        return false;\n    q.addBindValue(document_id);\n    return q.exec();\n}\n\nstatic bool updateDocument(QSqlQuery &q, int id, qint64 document_time)\n{\n    if (!q.prepare(UPDATE_DOCUMENT_TIME_SQL))\n        return false;\n    q.addBindValue(document_time);\n    q.addBindValue(id);\n    return q.exec();\n}\n\nstatic bool selectDocument(QSqlQuery &q, const QString &document_path, int *id, qint64 *document_time)\n{\n    if (!q.prepare(SELECT_DOCUMENT_SQL))\n        return false;\n    q.addBindValue(document_path);\n    if (!q.exec())\n        return false;\n    Q_ASSERT(q.size() < 2);\n    if (q.next()) {\n        *id = q.value(0).toInt();\n        *document_time = q.value(1).toLongLong();\n    }\n    return true;\n}\n\nstatic bool selectDocuments(QSqlQuery &q, int folder_id, QList<int> *documentIds)\n{\n    if (!q.prepare(SELECT_DOCUMENTS_SQL))\n        return false;\n    q.addBindValue(folder_id);\n    if (!q.exec())\n        return false;\n    while (q.next())\n        documentIds->append(q.value(0).toInt());\n    return true;\n}\n\nstatic bool selectCountStatistics(QSqlQuery &q, int folder_id, int *total_docs, int *total_words, int *total_tokens)\n{\n    if (!q.prepare(SELECT_COUNT_STATISTICS_SQL))\n        return false;\n    q.addBindValue(folder_id);\n    if (!q.exec())\n        return false;\n    if (q.next()) {\n        *total_docs = q.value(0).toInt();\n        *total_words = q.value(1).toInt();\n        *total_tokens = q.value(2).toInt();\n    }\n    return true;\n}\n\n// insert embedding only if still needed\nstatic const QString INSERT_EMBEDDING_SQL = uR\"(\n    insert into embeddings(model, folder_id, chunk_id, embedding)\n    select :model, d.folder_id, :chunk_id, :embedding\n    from chunks c\n    join documents d on d.id = c.document_id\n    join folders f on f.id = d.folder_id\n    join collection_items ci on ci.folder_id = f.id\n    join collections co on co.id = ci.collection_id\n    where co.embedding_model = :model and c.id = :chunk_id\n    limit 1;\n)\"_s;\n\nstatic const QString GET_COLLECTION_EMBEDDINGS_SQL = uR\"(\n    select e.chunk_id, e.embedding\n    from embeddings e\n    join collections co on co.embedding_model = e.model\n    join collection_items ci on ci.folder_id = e.folder_id and ci.collection_id = co.id\n    where co.name in ('%1');\n)\"_s;\n\nstatic const QString GET_CHUNK_EMBEDDINGS_SQL = uR\"(\n    select e.chunk_id, e.embedding\n    from embeddings e\n    where e.chunk_id in (%1);\n)\"_s;\n\nstatic const QString GET_CHUNK_FILE_SQL = uR\"(\n    select file from chunks where id = ?;\n)\"_s;\n\nnamespace {\n    struct Embedding { QString model; int folder_id; int chunk_id; QByteArray data; };\n    struct EmbeddingStat { QString lastFile; int nAdded; int nSkipped; };\n} // namespace\n\nNAMED_PAIR(EmbeddingFolder, QString, embedding_model, int, folder_id)\n\nstatic bool sqlAddEmbeddings(QSqlQuery &q, const QList<Embedding> &embeddings, QHash<EmbeddingFolder, EmbeddingStat> &embeddingStats)\n{\n    if (!q.prepare(INSERT_EMBEDDING_SQL))\n        return false;\n\n    // insert embedding if needed\n    for (const auto &e: embeddings) {\n        q.bindValue(\":model\", e.model);\n        q.bindValue(\":chunk_id\", e.chunk_id);\n        q.bindValue(\":embedding\", e.data);\n        if (!q.exec())\n            return false;\n\n        auto &stat = embeddingStats[{ e.model, e.folder_id }];\n        if (q.numRowsAffected()) {\n            stat.nAdded++; // embedding added\n        } else {\n            stat.nSkipped++; // embedding no longer needed\n        }\n    }\n\n    if (!q.prepare(GET_CHUNK_FILE_SQL))\n        return false;\n\n    // populate statistics for each collection item\n    for (const auto &e: embeddings) {\n        auto &stat = embeddingStats[{ e.model, e.folder_id }];\n        if (stat.nAdded && stat.lastFile.isNull()) {\n            q.addBindValue(e.chunk_id);\n            if (!q.exec() || !q.next())\n                return false;\n            stat.lastFile = q.value(0).toString();\n        }\n    }\n\n    return true;\n}\n\nvoid Database::transaction()\n{\n    bool ok = m_db.transaction();\n    Q_ASSERT(ok);\n}\n\nvoid Database::commit()\n{\n    bool ok = m_db.commit();\n    Q_ASSERT(ok);\n}\n\nvoid Database::rollback()\n{\n    bool ok = m_db.rollback();\n    Q_ASSERT(ok);\n}\n\nbool Database::refreshDocumentIdCache(QSqlQuery &q)\n{\n    m_documentIdCache.clear();\n    for (const auto &cmd: SELECT_CHUNKED_DOCUMENTS_SQL) {\n        if (!q.exec(cmd))\n            return false;\n        while (q.next())\n            m_documentIdCache << q.value(0).toInt();\n    }\n    return true;\n}\n\nbool Database::addChunk(QSqlQuery &q, int document_id, const QString &chunk_text, const QString &file,\n                        const QString &title, const QString &author, const QString &subject, const QString &keywords,\n                        int page, int from, int to, int words, int *chunk_id)\n{\n    if (!q.prepare(INSERT_CHUNK_SQL))\n        return false;\n    q.addBindValue(document_id);\n    q.addBindValue(chunk_text);\n    q.addBindValue(file);\n    q.addBindValue(title);\n    q.addBindValue(author);\n    q.addBindValue(subject);\n    q.addBindValue(keywords);\n    q.addBindValue(page);\n    q.addBindValue(from);\n    q.addBindValue(to);\n    q.addBindValue(words);\n    if (!q.exec() || !q.next())\n        return false;\n    *chunk_id = q.value(0).toInt();\n\n    if (!q.prepare(INSERT_CHUNK_FTS_SQL))\n        return false;\n    q.addBindValue(document_id);\n    q.addBindValue(chunk_text);\n    q.addBindValue(file);\n    q.addBindValue(title);\n    q.addBindValue(author);\n    q.addBindValue(subject);\n    q.addBindValue(keywords);\n    if (!q.exec())\n        return false;\n    m_documentIdCache << document_id;\n    return true;\n}\n\nbool Database::removeChunksByDocumentId(QSqlQuery &q, int document_id)\n{\n    for (const auto &cmd: DELETE_CHUNKS_SQL) {\n        if (!q.prepare(cmd))\n            return false;\n        q.addBindValue(document_id);\n        if (!q.exec())\n            return false;\n    }\n    m_documentIdCache.remove(document_id);\n    return true;\n}\n\nbool Database::sqlRemoveDocsByFolderPath(QSqlQuery &q, const QString &path)\n{\n    for (const auto &cmd: FOLDER_REMOVE_ALL_DOCS_SQL) {\n        if (!q.prepare(cmd))\n            return false;\n        q.addBindValue(path);\n        if (!q.exec())\n            return false;\n    }\n    return refreshDocumentIdCache(q);\n}\n\nbool Database::hasContent()\n{\n    return m_db.tables().contains(\"chunks\", Qt::CaseInsensitive);\n}\n\nint Database::openDatabase(const QString &modelPath, bool create, int ver)\n{\n    if (!QFileInfo(modelPath).isDir()) {\n        qWarning() << \"ERROR: invalid download path\" << modelPath;\n        return -1;\n    }\n    if (m_db.isOpen())\n        m_db.close();\n    auto dbPath = u\"%1/localdocs_v%2.db\"_s.arg(modelPath).arg(ver);\n    if (!create && !QFileInfo::exists(dbPath))\n        return 0;\n    m_db.setDatabaseName(dbPath);\n    if (!m_db.open()) {\n        qWarning() << \"ERROR: opening db\" << dbPath << m_db.lastError();\n        return -1;\n    }\n    return hasContent();\n}\n\nbool Database::openLatestDb(const QString &modelPath, QList<CollectionItem> &oldCollections)\n{\n    /*\n     * Support upgrade path from older versions:\n     *\n     *  1. Detect and load dbPath with older versions\n     *  2. Provide versioned SQL select statements\n     *  3. Upgrade the tables to the new version\n     *  4. By default mark all collections of older versions as force indexing and present to the user\n     *     the an 'update' button letting them know a breaking change happened and that the collection\n     *     will need to be indexed again\n     */\n\n    int dbVer;\n    for (dbVer = LOCALDOCS_VERSION;; dbVer--) {\n        if (dbVer < LOCALDOCS_MIN_VER) return true; // create a new db\n        int res = openDatabase(modelPath, false, dbVer);\n        if (res == 1) break; // found one with content\n        if (res == -1) return false; // error\n    }\n\n    if (dbVer == LOCALDOCS_VERSION) return true; // already up-to-date\n\n    // If we're upgrading, then we need to do a select on the current version of the collections table,\n    // then create the new one and populate the collections table and mark them as needing forced\n    // indexing\n\n#if defined(DEBUG)\n    qDebug() << \"Older localdocs version found\" << dbVer << \"upgrade to\" << LOCALDOCS_VERSION;\n#endif\n\n    // Select the current collections which will be marked to force indexing\n    QSqlQuery q(m_db);\n    if (!selectAllFromCollections(q, &oldCollections, dbVer)) {\n        qWarning() << \"ERROR: Could not open select old collections\" << q.lastError();\n        return false;\n    }\n\n    m_db.close();\n    return true;\n}\n\nbool Database::initDb(const QString &modelPath, const QList<CollectionItem> &oldCollections)\n{\n    if (!m_db.isOpen()) {\n        int res = openDatabase(modelPath);\n        if (res == 1) return true; // already populated\n        if (res == -1) return false; // error\n    } else if (hasContent()) {\n        return true; // already populated\n    }\n\n    transaction();\n\n    QSqlQuery q(m_db);\n    for (const auto &cmd: INIT_DB_SQL) {\n        if (!q.exec(cmd)) {\n            qWarning() << \"ERROR: failed to create tables\" << q.lastError();\n            rollback();\n            return false;\n        }\n    }\n\n    /* These are collection items that came from an older version of localdocs which\n     * require forced indexing that should only be done when the user has explicitly asked\n     * for them to be indexed again */\n    for (const CollectionItem &item : oldCollections) {\n        if (!addFolder(item.collection, item.folder_path, QString())) {\n            qWarning() << \"ERROR: failed to add previous collections to new database\";\n            rollback();\n            return false;\n        }\n    }\n\n    commit();\n    return true;\n}\n\nDatabase::Database(int chunkSize, QStringList extensions)\n    : QObject(nullptr)\n    , m_chunkSize(chunkSize)\n    , m_scannedFileExtensions(std::move(extensions))\n    , m_scanIntervalTimer(new QTimer(this))\n    , m_watcher(new QFileSystemWatcher(this))\n    , m_embLLM(new EmbeddingLLM)\n    , m_databaseValid(true)\n    , m_chunkStreamer(this)\n{\n    m_db = QSqlDatabase::database(QSqlDatabase::defaultConnection, false);\n    if (!m_db.isValid())\n        m_db = QSqlDatabase::addDatabase(\"QSQLITE\");\n    Q_ASSERT(m_db.isValid());\n\n    moveToThread(&m_dbThread);\n    m_dbThread.setObjectName(\"database\");\n    m_dbThread.start();\n}\n\nDatabase::~Database()\n{\n    m_dbThread.quit();\n    m_dbThread.wait();\n    delete m_embLLM;\n}\n\nvoid Database::setStartUpdateTime(CollectionItem &item)\n{\n    QSqlQuery q(m_db);\n    const qint64 update_time = QDateTime::currentMSecsSinceEpoch();\n    if (!updateStartUpdateTime(q, item.collection_id, update_time))\n        qWarning() << \"Database ERROR: failed to set start update time:\" << q.lastError();\n    else\n        item.startUpdate = QDateTime::fromMSecsSinceEpoch(update_time);\n}\n\nvoid Database::setLastUpdateTime(CollectionItem &item)\n{\n    QSqlQuery q(m_db);\n    const qint64 update_time = QDateTime::currentMSecsSinceEpoch();\n    if (!updateLastUpdateTime(q, item.collection_id, update_time))\n        qWarning() << \"Database ERROR: failed to set last update time:\" << q.lastError();\n    else\n        item.lastUpdate = QDateTime::fromMSecsSinceEpoch(update_time);\n}\n\nCollectionItem Database::guiCollectionItem(int folder_id) const\n{\n    Q_ASSERT(m_collectionMap.contains(folder_id));\n    return m_collectionMap.value(folder_id);\n}\n\nvoid Database::updateGuiForCollectionItem(const CollectionItem &item)\n{\n    m_collectionMap.insert(item.folder_id, item);\n    emit requestUpdateGuiForCollectionItem(item);\n}\n\nvoid Database::addGuiCollectionItem(const CollectionItem &item)\n{\n    m_collectionMap.insert(item.folder_id, item);\n    emit requestAddGuiCollectionItem(item);\n}\n\nvoid Database::removeGuiFolderById(const QString &collection, int folder_id)\n{\n    emit requestRemoveGuiFolderById(collection, folder_id);\n}\n\nvoid Database::guiCollectionListUpdated(const QList<CollectionItem> &collectionList)\n{\n    for (const auto &i : collectionList)\n        m_collectionMap.insert(i.folder_id, i);\n    emit requestGuiCollectionListUpdated(collectionList);\n}\n\nvoid Database::updateFolderToIndex(int folder_id, size_t countForFolder, bool sendChunks)\n{\n    CollectionItem item = guiCollectionItem(folder_id);\n    item.currentDocsToIndex = countForFolder;\n    if (!countForFolder) {\n        if (sendChunks && !m_chunkList.isEmpty())\n            sendChunkList(); // send any remaining embedding chunks to llm\n        item.indexing = false;\n        item.installed = true;\n\n        // Set the last update if we are done\n        if (item.startUpdate > item.lastUpdate && item.currentEmbeddingsToIndex == 0)\n            setLastUpdateTime(item);\n    }\n    updateGuiForCollectionItem(item);\n}\n\nstatic void handleDocumentError(const QString &errorMessage, int document_id, const QString &document_path,\n                                const QSqlError &error)\n{\n    qWarning() << errorMessage << document_id << document_path << error;\n}\n\nclass DocumentReader {\npublic:\n    struct Metadata { QString title, author, subject, keywords; };\n\n    static std::unique_ptr<DocumentReader> fromDocument(DocumentInfo info);\n\n    const DocumentInfo           &doc     () const { return m_info; }\n    const Metadata               &metadata() const { return m_metadata; }\n    const std::optional<QString> &word    () const { return m_word; }\n    const std::optional<QString> &nextWord()       { m_word = advance(); return m_word; }\n    virtual std::optional<ChunkStreamer::Status> getError() const { return std::nullopt; }\n    virtual int page() const { return -1; }\n\n    virtual ~DocumentReader() = default;\n\nprotected:\n    explicit DocumentReader(DocumentInfo info)\n        : m_info(std::move(info)) {}\n\n    void postInit(Metadata &&metadata = {})\n    {\n        m_metadata = std::move(metadata);\n        m_word = advance();\n    }\n\n    virtual std::optional<QString> advance() = 0;\n\n    DocumentInfo           m_info;\n    Metadata               m_metadata;\n    std::optional<QString> m_word;\n};\n\nnamespace {\n\n#ifdef GPT4ALL_USE_QTPDF\nclass PdfDocumentReader final : public DocumentReader {\npublic:\n    explicit PdfDocumentReader(DocumentInfo info)\n        : DocumentReader(std::move(info))\n    {\n        QString path = info.file.canonicalFilePath();\n        if (m_doc.load(path) != QPdfDocument::Error::None)\n            throw std::runtime_error(fmt::format(\"Failed to load PDF: {}\", path));\n        Metadata metadata {\n            .title    = m_doc.metaData(QPdfDocument::MetaDataField::Title   ).toString(),\n            .author   = m_doc.metaData(QPdfDocument::MetaDataField::Author  ).toString(),\n            .subject  = m_doc.metaData(QPdfDocument::MetaDataField::Subject ).toString(),\n            .keywords = m_doc.metaData(QPdfDocument::MetaDataField::Keywords).toString(),\n        };\n        postInit(std::move(metadata));\n    }\n\n    int page() const override { return m_currentPage; }\n\nprivate:\n    std::optional<QString> advance() override\n    {\n        QString word;\n        do {\n            while (!m_stream || m_stream->atEnd()) {\n                if (m_currentPage >= m_doc.pageCount())\n                    return std::nullopt;\n                m_pageText = m_doc.getAllText(m_currentPage++).text();\n                m_stream.emplace(&m_pageText);\n            }\n            *m_stream >> word;\n        } while (word.isEmpty());\n        return word;\n    }\n\n    QPdfDocument               m_doc;\n    int                        m_currentPage = 0;\n    QString                    m_pageText;\n    std::optional<QTextStream> m_stream;\n};\n#else\nclass PdfDocumentReader final : public DocumentReader {\npublic:\n    explicit PdfDocumentReader(DocumentInfo info)\n        : DocumentReader(std::move(info))\n    {\n        QString path = info.file.canonicalFilePath();\n        m_doc = FPDF_LoadDocument(path.toUtf8().constData(), nullptr);\n        if (!m_doc)\n            throw std::runtime_error(fmt::format(\"Failed to load PDF: {}\", path));\n\n        // Extract metadata\n        Metadata metadata {\n            .title    = getMetadata(\"Title\"   ),\n            .author   = getMetadata(\"Author\"  ),\n            .subject  = getMetadata(\"Subject\" ),\n            .keywords = getMetadata(\"Keywords\"),\n        };\n        postInit(std::move(metadata));\n    }\n\n    ~PdfDocumentReader() override\n    {\n        if (m_page)\n            FPDF_ClosePage(m_page);\n        if (m_doc)\n            FPDF_CloseDocument(m_doc);\n    }\n\n    int page() const override { return m_currentPage; }\n\nprivate:\n    std::optional<QString> advance() override\n    {\n        QString word;\n        do {\n            while (!m_stream || m_stream->atEnd()) {\n                if (m_currentPage >= FPDF_GetPageCount(m_doc))\n                    return std::nullopt;\n\n                if (m_page)\n                    FPDF_ClosePage(std::exchange(m_page, nullptr));\n                m_page = FPDF_LoadPage(m_doc, m_currentPage++);\n                if (!m_page)\n                    throw std::runtime_error(\"Failed to load page.\");\n\n                m_pageText = extractTextFromPage(m_page);\n                m_stream.emplace(&m_pageText);\n            }\n            *m_stream >> word;\n        } while (word.isEmpty());\n        return word;\n    }\n\n    QString getMetadata(FPDF_BYTESTRING key)\n    {\n        // FPDF_GetMetaText includes a 2-byte null terminator\n        ulong nBytes = FPDF_GetMetaText(m_doc, key, nullptr, 0);\n        if (nBytes <= sizeof (FPDF_WCHAR))\n            return { \"\" };\n        QByteArray buffer(nBytes, Qt::Uninitialized);\n        ulong nResultBytes = FPDF_GetMetaText(m_doc, key, buffer.data(), buffer.size());\n        Q_ASSERT(nResultBytes % 2 == 0);\n        Q_ASSERT(nResultBytes <= nBytes);\n        return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultBytes / 2 - 1);\n    }\n\n    QString extractTextFromPage(FPDF_PAGE page)\n    {\n        FPDF_TEXTPAGE textPage = FPDFText_LoadPage(page);\n        if (!textPage)\n            throw std::runtime_error(\"Failed to load text page.\");\n\n        int nChars = FPDFText_CountChars(textPage);\n        if (!nChars)\n            return {};\n        // FPDFText_GetText includes a 2-byte null terminator\n        QByteArray buffer((nChars + 1) * sizeof (FPDF_WCHAR), Qt::Uninitialized);\n        int nResultChars = FPDFText_GetText(textPage, 0, nChars, reinterpret_cast<ushort *>(buffer.data()));\n        Q_ASSERT(nResultChars <= nChars + 1);\n\n        FPDFText_ClosePage(textPage);\n        return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultChars - 1);\n    }\n\n    FPDF_DOCUMENT              m_doc = nullptr;\n    FPDF_PAGE                  m_page = nullptr;\n    int                        m_currentPage = 0;\n    QString                    m_pageText;\n    std::optional<QTextStream> m_stream;\n};\n#endif // !defined(GPT4ALL_USE_QTPDF)\n\nclass WordDocumentReader final : public DocumentReader {\npublic:\n    explicit WordDocumentReader(DocumentInfo info)\n        : DocumentReader(std::move(info))\n        , m_doc(info.file.canonicalFilePath().toStdString())\n    {\n        m_doc.open();\n        if (!m_doc.is_open())\n            throw std::runtime_error(fmt::format(\"Failed to open DOCX: {}\", info.file.canonicalFilePath()));\n\n        m_paragraph = &m_doc.paragraphs();\n        m_run       = &m_paragraph->runs();\n        // TODO(jared): metadata for Word documents?\n        postInit();\n    }\n\nprotected:\n    std::optional<QString> advance() override\n    {\n        // find non-space char\n        qsizetype wordStart = 0;\n        while (m_buffer.isEmpty() || m_buffer[wordStart].isSpace()) {\n            if (m_buffer.isEmpty() && !fillBuffer())\n                return std::nullopt;\n            if (m_buffer[wordStart].isSpace() && ++wordStart >= m_buffer.size()) {\n                m_buffer.clear();\n                wordStart = 0;\n            }\n        }\n\n        // find space char\n        qsizetype wordEnd = wordStart + 1;\n        while (wordEnd >= m_buffer.size() || !m_buffer[wordEnd].isSpace()) {\n            if (wordEnd >= m_buffer.size() && !fillBuffer())\n                break;\n            if (!m_buffer[wordEnd].isSpace())\n                ++wordEnd;\n        }\n\n        if (wordStart == wordEnd)\n            return std::nullopt;\n\n        auto size = wordEnd - wordStart;\n        QString word = std::move(m_buffer);\n        m_buffer = word.sliced(wordStart + size);\n        if (wordStart == 0)\n            word.resize(size);\n        else\n            word = word.sliced(wordStart, size);\n        return word;\n    }\n\n    bool fillBuffer()\n    {\n        for (;;) {\n            // get a run\n            while (!m_run->has_next()) {\n                // try next paragraph\n                if (!m_paragraph->has_next())\n                    return false;\n\n                m_paragraph->next();\n                m_buffer += u'\\n';\n            }\n\n            bool foundText = false;\n            auto &run = m_run->get_node();\n            for (auto node = run.first_child(); node; node = node.next_sibling()) {\n                std::string node_name = node.name();\n                if (node_name == \"w:t\") {\n                    const char *text = node.text().get();\n                    if (*text) {\n                        foundText = true;\n                        m_buffer += QUtf8StringView(text);\n                    }\n                } else if (node_name == \"w:br\") {\n                    m_buffer += u'\\n';\n                } else if (node_name == \"w:tab\") {\n                    m_buffer += u'\\t';\n                }\n            }\n\n            m_run->next();\n            if (foundText) return true;\n        }\n    }\n\n    duckx::Document   m_doc;\n    duckx::Paragraph *m_paragraph;\n    duckx::Run       *m_run;\n    QString           m_buffer;\n};\n\nclass TxtDocumentReader final : public DocumentReader {\npublic:\n    explicit TxtDocumentReader(DocumentInfo info)\n        : DocumentReader(std::move(info))\n        , m_file(info.file.canonicalFilePath())\n    {\n        if (!m_file.open(QIODevice::ReadOnly))\n            throw std::runtime_error(fmt::format(\"Failed to open text file: {}\", m_file.fileName()));\n\n        m_stream.setDevice(&m_file);\n        postInit();\n    }\n\nprotected:\n    std::optional<QString> advance() override\n    {\n        if (getError())\n            return std::nullopt;\n        while (!m_stream.atEnd()) {\n            QString word;\n            m_stream >> word;\n            if (getError())\n                return std::nullopt;\n            if (!word.isEmpty())\n                return word;\n        }\n        return std::nullopt;\n    }\n\n    std::optional<ChunkStreamer::Status> getError() const override\n    {\n        if (m_file.binarySeen())\n            return ChunkStreamer::Status::BINARY_SEEN;\n        if (m_file.error())\n            return ChunkStreamer::Status::ERROR;\n        return std::nullopt;\n    }\n\n    BinaryDetectingFile m_file;\n    QTextStream m_stream;\n};\n\n} // namespace\n\nstd::unique_ptr<DocumentReader> DocumentReader::fromDocument(DocumentInfo doc)\n{\n    if (doc.isPdf())\n        return std::make_unique<PdfDocumentReader>(std::move(doc));\n    if (doc.isDocx())\n        return std::make_unique<WordDocumentReader>(std::move(doc));\n    return std::make_unique<TxtDocumentReader>(std::move(doc));\n}\n\nChunkStreamer::ChunkStreamer(Database *database)\n    : m_database(database) {}\n\nChunkStreamer::~ChunkStreamer() = default;\n\nvoid ChunkStreamer::setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel)\n{\n    auto docKey = doc.key();\n    if (!m_docKey || *m_docKey != docKey) {\n        m_docKey         = docKey;\n        m_reader         = DocumentReader::fromDocument(std::move(doc));\n        m_documentId     = documentId;\n        m_embeddingModel = embeddingModel;\n        m_chunk.clear();\n        m_page = 0;\n\n        // make sure the document doesn't already have any chunks\n        if (m_database->m_documentIdCache.contains(documentId)) {\n            QSqlQuery q(m_database->m_db);\n            if (!m_database->removeChunksByDocumentId(q, documentId))\n                handleDocumentError(\"ERROR: Cannot remove chunks of document\",\n                                    documentId, m_reader->doc().file.canonicalPath(), q.lastError());\n        }\n    }\n}\n\nstd::optional<DocumentInfo::key_type> ChunkStreamer::currentDocKey() const\n{\n    return m_docKey;\n}\n\nvoid ChunkStreamer::reset()\n{\n    m_docKey.reset();\n}\n\nChunkStreamer::Status ChunkStreamer::step()\n{\n    // TODO: implement line_from/line_to\n    constexpr int line_from = -1;\n    constexpr int line_to = -1;\n    const int folderId = m_reader->doc().folder;\n    const int maxChunkSize = m_database->m_chunkSize;\n    int nChunks = 0;\n    int nAddedWords = 0;\n    Status retval;\n\n    for (;;) {\n        if (auto error = m_reader->getError()) {\n            m_docKey.reset(); // done processing\n            retval = *error;\n            break;\n        }\n\n        // get a word, if needed\n        std::optional<QString> word = QString(); // empty string to disable EOF logic\n        if (m_chunk.length() < maxChunkSize + 1) {\n            word = m_reader->word();\n            if (m_chunk.isEmpty())\n                m_page = m_reader->page(); // page number of first word\n\n            if (word) {\n                m_chunk += *word;\n                m_chunk += u' ';\n                m_reader->nextWord();\n                m_nChunkWords++;\n            }\n        }\n\n        if (!word || m_chunk.length() >= maxChunkSize + 1) { // +1 for trailing space\n            if (!m_chunk.isEmpty()) {\n                int nThisChunkWords = 0;\n                auto chunk = m_chunk; // copy\n\n                // handle overlength chunks\n                if (m_chunk.length() > maxChunkSize + 1) {\n                    // find the final space\n                    qsizetype chunkEnd = chunk.lastIndexOf(u' ', -2);\n\n                    qsizetype spaceSize;\n                    if (chunkEnd >= 0) {\n                        // slice off the last word\n                        spaceSize = 1;\n                        Q_ASSERT(m_nChunkWords >= 1);\n                        // one word left\n                        nThisChunkWords = m_nChunkWords - 1;\n                        m_nChunkWords = 1;\n                    } else {\n                        // slice the overlong word\n                        spaceSize = 0;\n                        chunkEnd = maxChunkSize;\n                        // partial word left, don't count it\n                        nThisChunkWords = m_nChunkWords;\n                        m_nChunkWords = 0;\n                    }\n                    // save the second part, excluding space if any\n                    m_chunk = chunk.sliced(chunkEnd + spaceSize);\n                    // consume the first part\n                    chunk.truncate(chunkEnd);\n                } else {\n                    nThisChunkWords = m_nChunkWords;\n                    m_nChunkWords = 0;\n                    // there is no second part\n                    m_chunk.clear();\n                    // consume the whole chunk, excluding space\n                    chunk.chop(1);\n                }\n                Q_ASSERT(chunk.length() <= maxChunkSize);\n\n                QSqlQuery q(m_database->m_db);\n                int chunkId = 0;\n                auto &metadata = m_reader->metadata();\n                if (!m_database->addChunk(q,\n                    m_documentId,\n                    chunk,\n                    m_reader->doc().file.fileName(), // basename\n                    metadata.title,\n                    metadata.author,\n                    metadata.subject,\n                    metadata.keywords,\n                    m_page,\n                    line_from,\n                    line_to,\n                    nThisChunkWords,\n                    &chunkId\n                )) {\n                    qWarning() << \"ERROR: Could not insert chunk into db\" << q.lastError();\n                }\n\n                nAddedWords += nThisChunkWords;\n\n                EmbeddingChunk toEmbed;\n                toEmbed.model = m_embeddingModel;\n                toEmbed.folder_id = folderId;\n                toEmbed.chunk_id = chunkId;\n                toEmbed.chunk = chunk;\n                m_database->appendChunk(toEmbed);\n                ++nChunks;\n            }\n\n            if (!word) {\n                retval = Status::DOC_COMPLETE;\n                m_docKey.reset(); // done processing\n                break;\n            }\n        }\n\n        if (m_database->scanQueueInterrupted()) {\n            retval = Status::INTERRUPTED;\n            break;\n        }\n    }\n\n    if (nChunks) {\n        CollectionItem item = m_database->guiCollectionItem(folderId);\n\n        // Set the start update if we haven't done so already\n        if (item.startUpdate <= item.lastUpdate && item.currentEmbeddingsToIndex == 0)\n            m_database->setStartUpdateTime(item);\n\n        item.currentEmbeddingsToIndex += nChunks;\n        item.totalEmbeddingsToIndex += nChunks;\n        item.totalWords += nAddedWords;\n        m_database->updateGuiForCollectionItem(item);\n    }\n\n    return retval;\n}\n\nvoid Database::appendChunk(const EmbeddingChunk &chunk)\n{\n    m_chunkList.reserve(s_batchSize);\n    m_chunkList.append(chunk);\n    if (m_chunkList.size() >= s_batchSize)\n        sendChunkList();\n}\n\nvoid Database::sendChunkList()\n{\n    m_embLLM->generateDocEmbeddingsAsync(m_chunkList);\n    m_chunkList.clear();\n}\n\nvoid Database::handleEmbeddingsGenerated(const QVector<EmbeddingResult> &embeddings)\n{\n    Q_ASSERT(!embeddings.isEmpty());\n\n    QList<Embedding> sqlEmbeddings;\n    for (const auto &e: embeddings) {\n        auto data = QByteArray::fromRawData(\n            reinterpret_cast<const char *>(e.embedding.data()),\n            e.embedding.size() * sizeof(e.embedding.front())\n        );\n        sqlEmbeddings.append({e.model, e.folder_id, e.chunk_id, std::move(data)});\n    }\n\n    transaction();\n\n    QSqlQuery q(m_db);\n    QHash<EmbeddingFolder, EmbeddingStat> stats;\n    if (!sqlAddEmbeddings(q, sqlEmbeddings, stats)) {\n        qWarning() << \"Database ERROR: failed to add embeddings:\" << q.lastError();\n        return rollback();\n    }\n\n    commit();\n\n    // FIXME(jared): embedding counts are per-collectionitem, not per-folder\n    for (const auto &[key, stat]: std::as_const(stats).asKeyValueRange()) {\n        if (!m_collectionMap.contains(key.folder_id)) continue;\n        CollectionItem item = guiCollectionItem(key.folder_id);\n        Q_ASSERT(item.currentEmbeddingsToIndex >= stat.nAdded + stat.nSkipped);\n        if (item.currentEmbeddingsToIndex < stat.nAdded + stat.nSkipped) {\n            qWarning() << \"Database ERROR: underflow in current embeddings to index statistics\";\n            item.currentEmbeddingsToIndex = 0;\n        } else {\n            item.currentEmbeddingsToIndex -= stat.nAdded + stat.nSkipped;\n        }\n\n        Q_ASSERT(item.totalEmbeddingsToIndex >= stat.nSkipped);\n        if (item.totalEmbeddingsToIndex < stat.nSkipped) {\n            qWarning() << \"Database ERROR: underflow in total embeddings to index statistics\";\n            item.totalEmbeddingsToIndex = 0;\n        } else {\n            item.totalEmbeddingsToIndex -= stat.nSkipped;\n        }\n\n        if (!stat.lastFile.isNull())\n            item.fileCurrentlyProcessing = stat.lastFile;\n\n        // Set the last update if we are done\n        Q_ASSERT(item.startUpdate > item.lastUpdate);\n        if (!item.indexing && item.currentEmbeddingsToIndex == 0)\n            setLastUpdateTime(item);\n\n        updateGuiForCollectionItem(item);\n    }\n}\n\nvoid Database::handleErrorGenerated(const QVector<EmbeddingChunk> &chunks, const QString &error)\n{\n    /* FIXME(jared): errors are actually collection-specific because they are conditioned\n     * on the embedding model, but this sets the error on all collections for a given\n     * folder */\n\n    QSet<int> folder_ids;\n    for (const auto &c: chunks) { folder_ids << c.folder_id; }\n\n    for (int fid: folder_ids) {\n        if (!m_collectionMap.contains(fid)) continue;\n        CollectionItem item = guiCollectionItem(fid);\n        item.error = error;\n        updateGuiForCollectionItem(item);\n    }\n}\n\nsize_t Database::countOfDocuments(int folder_id) const\n{\n    if (auto it = m_docsToScan.find(folder_id); it != m_docsToScan.end())\n        return it->second.size();\n    return 0;\n}\n\nsize_t Database::countOfBytes(int folder_id) const\n{\n    if (auto it = m_docsToScan.find(folder_id); it != m_docsToScan.end()) {\n        size_t totalBytes = 0;\n        for (const DocumentInfo &f : it->second)\n            totalBytes += f.file.size();\n        return totalBytes;\n    }\n    return 0;\n}\n\nDocumentInfo Database::dequeueDocument()\n{\n    Q_ASSERT(!m_docsToScan.empty());\n    auto firstEntry = m_docsToScan.begin();\n    auto &[firstKey, queue] = *firstEntry;\n    Q_ASSERT(!queue.empty());\n    DocumentInfo result = std::move(queue.front());\n    queue.pop_front();\n    if (queue.empty())\n        m_docsToScan.erase(firstEntry);\n    return result;\n}\n\nvoid Database::removeFolderFromDocumentQueue(int folder_id)\n{\n    if (auto queueIt = m_docsToScan.find(folder_id); queueIt != m_docsToScan.end()) {\n        if (auto key = m_chunkStreamer.currentDocKey()) {\n            if (ranges::any_of(queueIt->second, [&key](auto &d) { return d.key() == key; }))\n                m_chunkStreamer.reset(); // done with this document\n        }\n        // remove folder from queue\n        m_docsToScan.erase(queueIt);\n    }\n}\n\nvoid Database::enqueueDocumentInternal(DocumentInfo &&info, bool prepend)\n{\n    auto &queue = m_docsToScan[info.folder];\n    queue.insert(prepend ? queue.begin() : queue.end(), std::move(info));\n}\n\nvoid Database::enqueueDocuments(int folder_id, std::list<DocumentInfo> &&infos)\n{\n    // enqueue all documents\n    auto &queue = m_docsToScan[folder_id];\n    queue.splice(queue.end(), std::move(infos));\n\n    CollectionItem item = guiCollectionItem(folder_id);\n    item.currentDocsToIndex = queue.size();\n    item.totalDocsToIndex = queue.size();\n    const size_t bytes = countOfBytes(folder_id);\n    item.currentBytesToIndex = bytes;\n    item.totalBytesToIndex = bytes;\n    updateGuiForCollectionItem(item);\n    m_scanIntervalTimer->start();\n}\n\nbool Database::scanQueueInterrupted() const\n{\n    return m_scanDurationTimer.elapsed() >= 100;\n}\n\nvoid Database::scanQueueBatch()\n{\n    transaction();\n\n    m_scanDurationTimer.start();\n\n    // scan for up to the maximum scan duration or until we run out of documents\n    while (!m_docsToScan.empty()) {\n        scanQueue();\n        if (scanQueueInterrupted())\n            break;\n    }\n\n    commit();\n\n    if (m_docsToScan.empty())\n        m_scanIntervalTimer->stop();\n}\n\nvoid Database::scanQueue()\n{\n    DocumentInfo info = dequeueDocument();\n    const size_t countForFolder = countOfDocuments(info.folder);\n    const int folder_id = info.folder;\n\n    // Update info\n    info.file.stat();\n\n    // If the doc has since been deleted or no longer readable, then we schedule more work and return\n    // leaving the cleanup for the cleanup handler\n    if (!info.file.exists() || !info.file.isReadable())\n        return updateFolderToIndex(folder_id, countForFolder);\n\n    const qint64 document_time = info.file.fileTime(QFile::FileModificationTime).toMSecsSinceEpoch();\n    const QString document_path = info.file.canonicalFilePath();\n    const bool currentlyProcessing = info.currentlyProcessing;\n\n    // Check and see if we already have this document\n    QSqlQuery q(m_db);\n    int existing_id = -1;\n    qint64 existing_time = -1;\n    if (!selectDocument(q, document_path, &existing_id, &existing_time)) {\n        handleDocumentError(\"ERROR: Cannot select document\",\n            existing_id, document_path, q.lastError());\n        return updateFolderToIndex(folder_id, countForFolder);\n    }\n\n    // If we have the document, we need to compare the last modification time and if it is newer\n    // we must rescan the document, otherwise return\n    if (existing_id != -1 && !currentlyProcessing) {\n        Q_ASSERT(existing_time != -1);\n        if (document_time == existing_time) {\n            // No need to rescan, but we do have to schedule next\n            return updateFolderToIndex(folder_id, countForFolder);\n        }\n        if (!removeChunksByDocumentId(q, existing_id)) {\n            handleDocumentError(\"ERROR: Cannot remove chunks of document\",\n                existing_id, document_path, q.lastError());\n            return updateFolderToIndex(folder_id, countForFolder);\n        }\n        updateCollectionStatistics();\n    }\n\n    // Update the document_time for an existing document, or add it for the first time now\n    int document_id = existing_id;\n    if (!currentlyProcessing) {\n        if (document_id != -1) {\n            if (!updateDocument(q, document_id, document_time)) {\n                handleDocumentError(\"ERROR: Could not update document_time\",\n                    document_id, document_path, q.lastError());\n                return updateFolderToIndex(folder_id, countForFolder);\n            }\n        } else {\n            if (!addDocument(q, folder_id, document_time, document_path, &document_id)) {\n                handleDocumentError(\"ERROR: Could not add document\",\n                    document_id, document_path, q.lastError());\n                return updateFolderToIndex(folder_id, countForFolder);\n            }\n\n            CollectionItem item = guiCollectionItem(folder_id);\n            item.totalDocs += 1;\n            updateGuiForCollectionItem(item);\n        }\n    }\n\n    // Get the embedding model for this folder\n    // FIXME(jared): there can be more than one since we allow one folder to be in multiple collections\n    QString embedding_model;\n    if (!sqlGetFolderEmbeddingModel(q, folder_id, embedding_model)) {\n        handleDocumentError(\"ERROR: Could not get embedding model\",\n            document_id, document_path, q.lastError());\n        return updateFolderToIndex(folder_id, countForFolder);\n    }\n\n    Q_ASSERT(document_id != -1);\n\n    {\n        try {\n            m_chunkStreamer.setDocument(info, document_id, embedding_model);\n        } catch (const std::runtime_error &e) {\n            qWarning() << \"LocalDocs ERROR:\" << e.what();\n            goto dequeue;\n        }\n    }\n\n    switch (m_chunkStreamer.step()) {\n    case ChunkStreamer::Status::INTERRUPTED:\n        info.currentlyProcessing = true;\n        enqueueDocumentInternal(std::move(info), /*prepend*/ true);\n        return updateFolderToIndex(folder_id, countForFolder + 1);\n    case ChunkStreamer::Status::BINARY_SEEN:\n        /* When we see a binary file, we treat it like an empty file so we know not to\n         * scan it again. All existing chunks are removed, and in-progress embeddings\n         * are ignored when they complete. */\n        qInfo() << \"LocalDocs: Ignoring file with binary data:\" << document_path;\n\n        // this will also ensure in-flight embeddings are ignored\n        if (!removeChunksByDocumentId(q, existing_id))\n            handleDocumentError(\"ERROR: Cannot remove chunks of document\", existing_id, document_path, q.lastError());\n        updateCollectionStatistics();\n        break;\n    case ChunkStreamer::Status::ERROR:\n        qWarning() << \"error reading\" << document_path;\n        break;\n    case ChunkStreamer::Status::DOC_COMPLETE:\n        ;\n    }\n\ndequeue:\n    auto item = guiCollectionItem(folder_id);\n    Q_ASSERT(item.currentBytesToIndex >= info.file.size());\n    if (item.currentBytesToIndex < info.file.size()) {\n        qWarning() << \"Database ERROR: underflow in current bytes to index statistics\";\n        item.currentBytesToIndex = 0;\n    } else {\n        item.currentBytesToIndex -= info.file.size();\n    }\n    updateGuiForCollectionItem(item);\n    return updateFolderToIndex(folder_id, countForFolder);\n}\n\nvoid Database::scanDocuments(int folder_id, const QString &folder_path)\n{\n#if defined(DEBUG)\n    qDebug() << \"scanning folder for documents\" << folder_path;\n#endif\n\n    QDirIterator it(folder_path, QDir::Readable | QDir::Files | QDir::Dirs | QDir::NoDotAndDotDot,\n                    QDirIterator::Subdirectories);\n    std::list<DocumentInfo> infos;\n    while (it.hasNext()) {\n        it.next();\n        QFileInfo fileInfo = it.fileInfo();\n        if (fileInfo.isDir()) {\n            addFolderToWatch(fileInfo.canonicalFilePath());\n            continue;\n        }\n\n        if (!m_scannedFileExtensions.contains(fileInfo.suffix(), Qt::CaseInsensitive))\n            continue;\n\n        infos.push_back({ folder_id, fileInfo });\n    }\n\n    if (!infos.empty()) {\n        CollectionItem item = guiCollectionItem(folder_id);\n        item.indexing = true;\n        updateGuiForCollectionItem(item);\n        enqueueDocuments(folder_id, std::move(infos));\n    } else {\n        updateFolderToIndex(folder_id, 0, false);\n    }\n}\n\nvoid Database::start()\n{\n    connect(m_watcher, &QFileSystemWatcher::directoryChanged, this, &Database::directoryChanged);\n    connect(m_embLLM, &EmbeddingLLM::embeddingsGenerated, this, &Database::handleEmbeddingsGenerated);\n    connect(m_embLLM, &EmbeddingLLM::errorGenerated, this, &Database::handleErrorGenerated);\n    m_scanIntervalTimer->callOnTimeout(this, &Database::scanQueueBatch);\n\n    const QString modelPath = MySettings::globalInstance()->modelPath();\n    QList<CollectionItem> oldCollections;\n\n    if (!openLatestDb(modelPath, oldCollections)) {\n        m_databaseValid = false;\n    } else if (!initDb(modelPath, oldCollections)) {\n        m_databaseValid = false;\n    } else {\n        cleanDB();\n        ftsIntegrityCheck();\n        QSqlQuery q(m_db);\n        if (!refreshDocumentIdCache(q)) {\n            m_databaseValid = false;\n        } else {\n            addCurrentFolders();\n        }\n    }\n\n    if (!m_databaseValid)\n        emit databaseValidChanged();\n}\n\nvoid Database::addCurrentFolders()\n{\n#if defined(DEBUG)\n    qDebug() << \"addCurrentFolders\";\n#endif\n\n    QSqlQuery q(m_db);\n    QList<CollectionItem> collections;\n    if (!selectAllFromCollections(q, &collections)) {\n        qWarning() << \"ERROR: Cannot select collections\" << q.lastError();\n        return;\n    }\n\n    guiCollectionListUpdated(collections);\n\n    scheduleUncompletedEmbeddings();\n\n    for (const auto &i : collections) {\n        if (!i.forceIndexing) {\n            addFolderToWatch(i.folder_path);\n            scanDocuments(i.folder_id, i.folder_path);\n        }\n    }\n\n    updateCollectionStatistics();\n}\n\nvoid Database::scheduleUncompletedEmbeddings()\n{\n    QHash<IncompleteChunk, QStringList> chunkList;\n    QSqlQuery q(m_db);\n    if (!selectAllUncompletedChunks(q, chunkList)) {\n        qWarning() << \"ERROR: Cannot select uncompleted chunks\" << q.lastError();\n        return;\n    }\n\n    if (chunkList.isEmpty())\n        return;\n\n    // map of folder_id -> chunk count\n    QMap<int, int> folderNChunks;\n    for (auto it = chunkList.keyBegin(), end = chunkList.keyEnd(); it != end; ++it) {\n        int folder_id = it->folder_id;\n\n        if (folderNChunks.contains(folder_id)) continue;\n        int total = 0;\n        if (!selectCountChunks(q, folder_id, total)) {\n            qWarning() << \"ERROR: Cannot count total chunks\" << q.lastError();\n            return;\n        }\n        folderNChunks.insert(folder_id, total);\n    }\n\n    // map of (folder_id, collection) -> incomplete count\n    QMap<QPair<int, QString>, int> itemNIncomplete;\n    for (const auto &[chunk, collections]: std::as_const(chunkList).asKeyValueRange())\n        for (const auto &collection: std::as_const(collections))\n            itemNIncomplete[{ chunk.folder_id, collection }]++;\n\n    for (const auto &[key, nIncomplete]: std::as_const(itemNIncomplete).asKeyValueRange()) {\n        const auto &[folder_id, collection] = key;\n\n        /* FIXME(jared): this needs to be split by collection because different\n         * collections have different embedding models */\n        int total = folderNChunks.value(folder_id);\n        CollectionItem item = guiCollectionItem(folder_id);\n        item.totalEmbeddingsToIndex = total;\n        item.currentEmbeddingsToIndex = nIncomplete;\n        updateGuiForCollectionItem(item);\n    }\n\n    for (auto it = chunkList.keyBegin(), end = chunkList.keyEnd(); it != end;) {\n        QList<EmbeddingChunk> batch;\n        for (; it != end && batch.size() < s_batchSize; ++it)\n            batch.append({ /*model*/ it->embedding_model, /*folder_id*/ it->folder_id, /*chunk_id*/ it->chunk_id, /*chunk*/ it->text });\n        Q_ASSERT(!batch.isEmpty());\n        m_embLLM->generateDocEmbeddingsAsync(batch);\n    }\n}\n\nvoid Database::updateCollectionStatistics()\n{\n    QSqlQuery q(m_db);\n    QList<CollectionItem> collections;\n    if (!selectAllFromCollections(q, &collections)) {\n        qWarning() << \"ERROR: Cannot select collections\" << q.lastError();\n        return;\n    }\n\n    for (const auto &i: std::as_const(collections)) {\n        int total_docs = 0;\n        int total_words = 0;\n        int total_tokens = 0;\n        if (!selectCountStatistics(q, i.folder_id, &total_docs, &total_words, &total_tokens)) {\n            qWarning() << \"ERROR: could not count statistics for folder\" << q.lastError();\n        } else {\n            CollectionItem item = guiCollectionItem(i.folder_id);\n            item.totalDocs = total_docs;\n            item.totalWords = total_words;\n            item.totalTokens = total_tokens;\n            updateGuiForCollectionItem(item);\n        }\n    }\n}\n\nint Database::checkAndAddFolderToDB(const QString &path)\n{\n    QFileInfo info(path);\n    if (!info.exists() || !info.isReadable()) {\n        qWarning() << \"ERROR: Cannot add folder that doesn't exist or not readable\" << path;\n        return -1;\n    }\n\n    QSqlQuery q(m_db);\n    int folder_id = -1;\n\n    // See if the folder exists in the db\n    if (!selectFolder(q, path, &folder_id)) {\n        qWarning() << \"ERROR: Cannot select folder from path\" << path << q.lastError();\n        return -1;\n    }\n\n    // Add the folder\n    if (folder_id == -1 && !addFolderToDB(q, path, &folder_id)) {\n        qWarning() << \"ERROR: Cannot add folder to db with path\" << path << q.lastError();\n        return -1;\n    }\n\n    Q_ASSERT(folder_id != -1);\n    return folder_id;\n}\n\nvoid Database::forceIndexing(const QString &collection, const QString &embedding_model)\n{\n    Q_ASSERT(!embedding_model.isNull());\n\n    QSqlQuery q(m_db);\n    QList<QPair<int, QString>> folders;\n    if (!selectFoldersFromCollection(q, collection, &folders)) {\n        qWarning() << \"ERROR: Cannot select folders from collections\" << collection << q.lastError();\n        return;\n    }\n\n    if (!setCollectionEmbeddingModel(q, collection, embedding_model)) {\n        qWarning().nospace() << \"ERROR: Cannot set embedding model for collection \" << collection << \": \"\n                             << q.lastError();\n        return;\n    }\n\n    for (const auto &folder: std::as_const(folders)) {\n        CollectionItem item = guiCollectionItem(folder.first);\n        item.embeddingModel = embedding_model;\n        item.forceIndexing = false;\n        updateGuiForCollectionItem(item);\n        addFolderToWatch(folder.second);\n        scanDocuments(folder.first, folder.second);\n    }\n}\n\nvoid Database::forceRebuildFolder(const QString &path)\n{\n    QSqlQuery q(m_db);\n    int folder_id;\n    if (!selectFolder(q, path, &folder_id)) {\n        qWarning().nospace() << \"Database ERROR: Cannot select folder from path \" << path << \": \" << q.lastError();\n        return;\n    }\n\n    Q_ASSERT(!m_docsToScan.contains(folder_id));\n\n    transaction();\n\n    if (!sqlRemoveDocsByFolderPath(q, path)) {\n        qWarning().nospace() << \"Database ERROR: Cannot remove chunks for folder \" << path << \": \" << q.lastError();\n        return rollback();\n    }\n\n    commit();\n\n    updateCollectionStatistics();\n\n    // We now have zero embeddings. Document progress will be updated by scanDocuments.\n    // FIXME(jared): this updates the folder, but these values should also depend on the collection\n    CollectionItem item = guiCollectionItem(folder_id);\n    item.currentEmbeddingsToIndex = item.totalEmbeddingsToIndex = 0;\n    updateGuiForCollectionItem(item);\n\n    scanDocuments(folder_id, path);\n}\n\nbool Database::addFolder(const QString &collection, const QString &path, const QString &embedding_model)\n{\n    // add the folder, if needed\n    const int folder_id = checkAndAddFolderToDB(path);\n    if (folder_id == -1)\n        return false;\n\n    std::optional<CollectionItem> item;\n    QSqlQuery q(m_db);\n    if (!selectCollectionByName(q, collection, item)) {\n        qWarning().nospace() << \"Database ERROR: Cannot select collection \" << collection << \": \" << q.lastError();\n        return false;\n    }\n\n    // add the collection, if needed\n    if (!item) {\n        item.emplace();\n        if (!addCollection(q, collection, QDateTime() /*start_update*/, QDateTime() /*last_update*/,\n            embedding_model /*embedding_model*/, *item)) {\n            qWarning().nospace() << \"ERROR: Cannot add collection \" << collection << \": \" << q.lastError();\n            return false;\n        }\n    }\n\n    // link the folder and the collection, if needed\n    int res = addCollectionItem(q, item->collection_id, folder_id);\n    if (res < 0) { // error\n        qWarning().nospace() << \"Database ERROR: Cannot add folder \" << path << \" to collection \" << collection << \": \"\n                             << q.lastError();\n        return false;\n    }\n\n    // add the new collection item to the UI\n    if (res == 1) { // new item added\n        item->folder_path = path;\n        item->folder_id = folder_id;\n        addGuiCollectionItem(item.value());\n\n        // note: this is the existing embedding model if the collection was found\n        if (!item->embeddingModel.isNull()) {\n            addFolderToWatch(path);\n            scanDocuments(folder_id, path);\n        }\n    }\n    return true;\n}\n\nvoid Database::removeFolder(const QString &collection, const QString &path)\n{\n#if defined(DEBUG)\n    qDebug() << \"removeFolder\" << path;\n#endif\n\n    QSqlQuery q(m_db);\n    int folder_id = -1;\n\n    // See if the folder exists in the db\n    if (!selectFolder(q, path, &folder_id)) {\n        qWarning() << \"ERROR: Cannot select folder from path\" << path << q.lastError();\n        return;\n    }\n\n    // If we don't have a folder_id in the db, then something bad has happened\n    Q_ASSERT(folder_id != -1);\n    if (folder_id == -1) {\n        qWarning() << \"ERROR: Collected folder does not exist in db\" << path;\n        m_watcher->removePath(path);\n        return;\n    }\n\n    transaction();\n\n    if (removeFolderInternal(collection, folder_id, path)) {\n        commit();\n    } else {\n        rollback();\n    }\n}\n\nbool Database::removeFolderInternal(const QString &collection, int folder_id, const QString &path)\n{\n    // Remove it from the collection\n    QSqlQuery q(m_db);\n    int nRemaining = removeCollectionFolder(q, collection, folder_id);\n    if (nRemaining == -1) {\n        qWarning().nospace() << \"Database ERROR: Cannot remove collection \" << collection << \" from folder \"\n                             << folder_id << \": \" << q.lastError();\n        return false;\n    }\n    removeGuiFolderById(collection, folder_id);\n\n    if (!sqlPruneCollections(q)) {\n        qWarning() << \"Database ERROR: Cannot prune collections:\" << q.lastError();\n        return false;\n    }\n\n    // Keep folder if it is still referenced\n    if (nRemaining)\n        return true;\n\n    // Remove the last reference to a folder\n\n    // First remove all upcoming jobs associated with this folder\n    removeFolderFromDocumentQueue(folder_id);\n\n    // Get a list of all documents associated with folder\n    QList<int> documentIds;\n    if (!selectDocuments(q, folder_id, &documentIds)) {\n        qWarning() << \"ERROR: Cannot select documents\" << folder_id << q.lastError();\n        return false;\n    }\n\n    // Remove all chunks and documents associated with this folder\n    for (int document_id: std::as_const(documentIds)) {\n        if (!removeChunksByDocumentId(q, document_id)) {\n            qWarning() << \"ERROR: Cannot remove chunks of document_id\" << document_id << q.lastError();\n            return false;\n        }\n\n        if (!removeDocument(q, document_id)) {\n            qWarning() << \"ERROR: Cannot remove document_id\" << document_id << q.lastError();\n            return false;\n        }\n    }\n\n    if (!removeFolderFromDB(q, folder_id)) {\n        qWarning() << \"ERROR: Cannot remove folder_id\" << folder_id << q.lastError();\n        return false;\n    }\n\n    m_collectionMap.remove(folder_id);\n    removeFolderFromWatch(path);\n    return true;\n}\n\nvoid Database::addFolderToWatch(const QString &path)\n{\n#if defined(DEBUG)\n    qDebug() << \"addFolderToWatch\" << path;\n#endif\n    // pre-check because addPath returns false for already watched paths\n    if (!m_watchedPaths.contains(path)) {\n        if (!m_watcher->addPath(path))\n            qWarning() << \"Database::addFolderToWatch: failed to watch\" << path;\n        // add unconditionally to suppress repeated warnings\n        m_watchedPaths << path;\n    }\n}\n\nvoid Database::removeFolderFromWatch(const QString &path)\n{\n#if defined(DEBUG)\n    qDebug() << \"removeFolderFromWatch\" << path;\n#endif\n    QDirIterator it(path, QDir::Readable | QDir::Dirs | QDir::NoDotAndDotDot, QDirIterator::Subdirectories);\n    QStringList children { path };\n    while (it.hasNext())\n        children.append(it.next());\n\n    m_watcher->removePaths(children);\n    m_watchedPaths -= QSet(children.begin(), children.end());\n}\n\nQList<int> Database::searchEmbeddingsHelper(const std::vector<float> &query, QSqlQuery &q, int nNeighbors)\n{\n    constexpr int BATCH_SIZE = 2048;\n\n    const int n_embd = query.size();\n    const us::metric_punned_t metric(n_embd, us::metric_kind_t::ip_k); // inner product\n\n    us::executor_default_t executor(std::thread::hardware_concurrency());\n    us::exact_search_t search;\n\n    QList<int> batchChunkIds;\n    QList<float> batchEmbeddings;\n    batchChunkIds.reserve(BATCH_SIZE);\n    batchEmbeddings.reserve(BATCH_SIZE * n_embd);\n\n    struct Result { int chunkId; us::distance_punned_t dist; };\n    QList<Result> results;\n\n    // The q parameter is expected to be the result of a QSqlQuery returning (chunk_id, embedding) pairs\n    while (q.at() != QSql::AfterLastRow) { // batches\n        batchChunkIds.clear();\n        batchEmbeddings.clear();\n\n        while (batchChunkIds.count() < BATCH_SIZE && q.next()) { // batch\n            batchChunkIds << q.value(0).toInt();\n            batchEmbeddings.resize(batchEmbeddings.size() + n_embd);\n            QVariant embdCol = q.value(1);\n            if (embdCol.userType() != QMetaType::QByteArray) {\n                qWarning() << \"Database ERROR: Expected embedding to be blob, got\" << embdCol.userType();\n                return {};\n            }\n            auto *embd = static_cast<const QByteArray *>(embdCol.constData());\n            const int embd_stride = n_embd * sizeof(float);\n            if (embd->size() != embd_stride) {\n                qWarning() << \"Database ERROR: Expected embedding to be\" << embd_stride << \"bytes, got\"\n                           << embd->size();\n                return {};\n            }\n            memcpy(&*(batchEmbeddings.end() - n_embd), embd->constData(), embd_stride);\n        }\n\n        int nBatch = batchChunkIds.count();\n        if (!nBatch)\n            break;\n\n        // get top-k nearest neighbors of this batch\n        int kBatch = qMin(nNeighbors, nBatch);\n        us::exact_search_results_t batchResults = search(\n            (us::byte_t const *)batchEmbeddings.data(), nBatch, n_embd * sizeof(float),\n            (us::byte_t const *)query.data(),           1,      n_embd * sizeof(float),\n            kBatch, metric\n        );\n\n        for (int i = 0; i < kBatch; ++i) {\n            auto offset = batchResults.at(0)[i].offset;\n            us::distance_punned_t distance = batchResults.at(0)[i].distance;\n            results.append({batchChunkIds[offset], distance});\n        }\n    }\n\n    // get top-k nearest neighbors of combined results\n    nNeighbors = qMin(nNeighbors, results.size());\n    std::partial_sort(\n        results.begin(), results.begin() + nNeighbors, results.end(),\n        [](const Result &a, const Result &b) { return a.dist < b.dist; }\n    );\n\n    QList<int> chunkIds;\n    chunkIds.reserve(nNeighbors);\n    for (int i = 0; i < nNeighbors; i++)\n        chunkIds << results[i].chunkId;\n    return chunkIds;\n}\n\nQList<int> Database::searchEmbeddings(const std::vector<float> &query, const QList<QString> &collections,\n    int nNeighbors)\n{\n    QSqlQuery q(m_db);\n    if (!q.exec(GET_COLLECTION_EMBEDDINGS_SQL.arg(collections.join(\"', '\")))) {\n        qWarning() << \"Database ERROR: Failed to exec embeddings query:\" << q.lastError();\n        return {};\n    }\n    return searchEmbeddingsHelper(query, q, nNeighbors);\n}\n\nQList<int> Database::scoreChunks(const std::vector<float> &query, const QList<int> &chunks)\n{\n    QList<QString> chunkStrings;\n    for (int id : chunks)\n        chunkStrings << QString::number(id);\n    QSqlQuery q(m_db);\n    if (!q.exec(GET_CHUNK_EMBEDDINGS_SQL.arg(chunkStrings.join(\", \")))) {\n        qWarning() << \"Database ERROR: Failed to exec embeddings query:\" << q.lastError();\n        return {};\n    }\n    return searchEmbeddingsHelper(query, q, chunks.size());\n}\n\nQList<Database::BM25Query> Database::queriesForFTS5(const QString &input)\n{\n    // Escape double quotes by adding a second double quote\n    QString escapedInput = input;\n    escapedInput.replace(\"\\\"\", \"\\\"\\\"\");\n\n    static QRegularExpression spaces(\"\\\\s+\");\n    QStringList oWords = escapedInput.split(spaces, Qt::SkipEmptyParts);\n\n    QList<BM25Query> queries;\n\n    // Start by trying to match the entire input\n    BM25Query e;\n    e.isExact = true;\n    e.input = oWords.join(\" \");\n    e.query = \"\\\"\" + oWords.join(\" \") + \"\\\"\";\n    e.qlength = oWords.size();\n    e.ilength = oWords.size();\n    queries << e;\n\n    // https://github.com/igorbrigadir/stopwords?tab=readme-ov-file\n    // Lucene, Solr, Elastisearch\n    static const QSet<QString> stopWords = {\n        \"a\", \"an\", \"and\", \"are\", \"as\", \"at\", \"be\", \"but\", \"by\",\n        \"for\", \"if\", \"in\", \"into\", \"is\", \"it\", \"no\", \"not\", \"of\",\n        \"on\", \"or\", \"such\", \"that\", \"the\", \"their\", \"then\", \"there\",\n        \"these\", \"they\", \"this\", \"to\", \"was\", \"will\", \"with\"\n    };\n\n    QStringList quotedWords;\n    for (const QString &w : oWords)\n        if (!stopWords.contains(w.toLower()))\n            quotedWords << \"\\\"\" + w + \"\\\"\";\n\n    BM25Query b;\n    b.input = oWords.join(\" \");\n    b.query = \"(\" + quotedWords.join(\" OR \") + \")\";\n    b.qlength = 1; // length of phrase\n    b.ilength = oWords.size();\n    b.rlength = oWords.size() - quotedWords.size();\n    queries << b;\n    return queries;\n}\n\nQList<int> Database::searchBM25(const QString &query, const QList<QString> &collections, BM25Query &bm25q, int k)\n{\n    struct SearchResult { int chunkId; float score; };\n    QList<BM25Query> bm25Queries = queriesForFTS5(query);\n\n    QSqlQuery sqlQuery(m_db);\n    sqlQuery.prepare(SELECT_CHUNKS_FTS_SQL.arg(collections.join(\"', '\"), QString::number(k)));\n\n    QList<SearchResult> results;\n    for (auto &bm25Query : std::as_const(bm25Queries)) {\n        sqlQuery.addBindValue(bm25Query.query);\n\n        if (!sqlQuery.exec()) {\n            qWarning() << \"Database ERROR: Failed to execute BM25 query:\" << sqlQuery.lastError();\n            return {};\n        }\n\n        if (sqlQuery.next()) {\n            // Save the query that was used to produce results\n            bm25q = bm25Query;\n            break;\n        }\n    }\n\n    if (sqlQuery.at() != QSql::AfterLastRow) {\n        do {\n            const int chunkId = sqlQuery.value(0).toInt();\n            const float score = sqlQuery.value(1).toFloat();\n            results.append({chunkId, score});\n        } while (sqlQuery.next());\n    }\n\n    k = qMin(k, results.size());\n    std::partial_sort(\n        results.begin(), results.begin() + k, results.end(),\n        [](const SearchResult &a, const SearchResult &b) { return a.score < b.score; }\n    );\n\n    QList<int> chunkIds;\n    chunkIds.reserve(k);\n    for (int i = 0; i < k; i++)\n        chunkIds << results[i].chunkId;\n    return chunkIds;\n}\n\nfloat Database::computeBM25Weight(const Database::BM25Query &bm25q)\n{\n    float bmWeight = 0.0f;\n    if (bm25q.isExact) {\n        bmWeight = 0.9f; // the highest we give\n    } else {\n        // qlength is the length of the phrases in the query by number of distinct words\n        // ilength is the length of the natural language query by number of distinct words\n        // rlength is the number of stop words removed from the natural language query to form the query\n\n        // calculate the query length weight based on the ratio of query terms to meaningful terms.\n        // this formula adjusts the weight with the empirically determined insight that BM25's\n        // effectiveness decreases as query length increases.\n        float queryLengthWeight = 1 / powf(float(bm25q.ilength - bm25q.rlength), 2);\n        queryLengthWeight = qBound(0.0f, queryLengthWeight, 1.0f);\n\n        // the weighting is bound between 1/4 and 3/4 which was determined empirically to work well\n        // with the beir nfcorpus, scifact, fiqa and trec-covid datasets along with our embedding\n        // model\n        bmWeight = 0.25f + queryLengthWeight * 0.50f;\n    }\n\n#if 0\n    qDebug()\n        << \"bm25q.type\"         << bm25q.type\n        << \"bm25q.qlength\"      << bm25q.qlength\n        << \"bm25q.ilength\"      << bm25q.ilength\n        << \"bm25q.rlength\"      << bm25q.rlength\n        << \"bmWeight\"           << bmWeight;\n#endif\n\n    return bmWeight;\n}\n\nQList<int> Database::reciprocalRankFusion(const std::vector<float> &query, const QList<int> &embeddingResults,\n    const QList<int> &bm25Results, const BM25Query &bm25q, int k)\n{\n    // We default to the embedding results and augment with bm25 if any\n    QList<int> results = embeddingResults;\n\n    QList<int> missingScores;\n    QHash<int, int> bm25Ranks;\n    for (int i = 0; i < bm25Results.size(); ++i) {\n        if (!results.contains(bm25Results[i]))\n            missingScores.append(bm25Results[i]);\n        bm25Ranks[bm25Results[i]] = i + 1;\n    }\n\n    if (!missingScores.isEmpty()) {\n        QList<int> scored = scoreChunks(query, missingScores);\n        results << scored;\n    }\n\n    QHash<int, int> embeddingRanks;\n    for (int i = 0; i < results.size(); ++i)\n        embeddingRanks[results[i]] = i + 1;\n\n    const float bmWeight = bm25Results.isEmpty() ? 0 : computeBM25Weight(bm25q);\n\n    // From the paper: \"Reciprocal Rank Fusion outperforms Condorcet and individual Rank Learning Methods\"\n    // doi: 10.1145/1571941.157211\n    const int fusion_k = 60;\n\n    std::stable_sort(\n        results.begin(), results.end(),\n        [&](const int &a, const int &b) {\n            // Reciprocal Rank Fusion (RRF)\n            const int aBm25Rank = bm25Ranks.value(a, bm25Results.size() + 1);\n            const int aEmbeddingRank = embeddingRanks.value(a, embeddingResults.size() + 1);\n            Q_ASSERT(embeddingRanks.contains(a));\n\n            const int bBm25Rank = bm25Ranks.value(b, bm25Results.size() + 1);\n            const int bEmbeddingRank = embeddingRanks.value(b, embeddingResults.size() + 1);\n            Q_ASSERT(embeddingRanks.contains(b));\n\n            const float aBm25Score = 1.0f / (fusion_k + aBm25Rank);\n            const float bBm25Score = 1.0f / (fusion_k + bBm25Rank);\n            const float aEmbeddingScore = 1.0f / (fusion_k + aEmbeddingRank);\n            const float bEmbeddingScore = 1.0f / (fusion_k + bEmbeddingRank);\n            const float aWeightedScore = bmWeight * aBm25Score + (1.f - bmWeight) * aEmbeddingScore;\n            const float bWeightedScore = bmWeight * bBm25Score + (1.f - bmWeight) * bEmbeddingScore;\n\n            // Higher RRF score means better ranking, so we use greater than for sorting\n            return aWeightedScore > bWeightedScore;\n        }\n    );\n\n    k = qMin(k, results.size());\n    results.resize(k);\n    return results;\n}\n\nQList<int> Database::searchDatabase(const QString &query, const QList<QString> &collections, int k)\n{\n    std::vector<float> queryEmbd = m_embLLM->generateQueryEmbedding(query);\n    if (queryEmbd.empty()) {\n        qDebug() << \"ERROR: generating embeddings returned a null result\";\n        return { };\n    }\n\n    const QList<int> embeddingResults = searchEmbeddings(queryEmbd, collections, k);\n    BM25Query bm25q;\n    const QList<int> bm25Results = searchBM25(query, collections, bm25q, k);\n    return reciprocalRankFusion(queryEmbd, embeddingResults, bm25Results, bm25q, k);\n}\n\nvoid Database::retrieveFromDB(const QList<QString> &collections, const QString &text, int retrievalSize,\n    QList<ResultInfo> *results)\n{\n#if defined(DEBUG)\n    qDebug() << \"retrieveFromDB\" << collections << text << retrievalSize;\n#endif\n\n    QList<int> searchResults = searchDatabase(text, collections, retrievalSize);\n    if (searchResults.isEmpty())\n        return;\n\n    QSqlQuery q(m_db);\n    if (!selectChunk(q, searchResults)) {\n        qDebug() << \"ERROR: selecting chunks:\" << q.lastError();\n        return;\n    }\n\n    QHash<int, ResultInfo> tempResults;\n    while (q.next()) {\n        const int rowid = q.value(0).toInt();\n        const QString document_path = q.value(2).toString();\n        const QString chunk_text = q.value(3).toString();\n        const QString date = QDateTime::fromMSecsSinceEpoch(q.value(1).toLongLong()).toString(\"yyyy, MMMM dd\");\n        const QString file = q.value(4).toString();\n        const QString title = q.value(5).toString();\n        const QString author = q.value(6).toString();\n        const int page = q.value(7).toInt();\n        const int from = q.value(8).toInt();\n        const int to = q.value(9).toInt();\n        const QString collectionName = q.value(10).toString();\n        ResultInfo info;\n        info.collection = collectionName;\n        info.path = document_path;\n        info.file = file;\n        info.title = title;\n        info.author = author;\n        info.date = date;\n        info.text = chunk_text;\n        info.page = page;\n        info.from = from;\n        info.to = to;\n        tempResults.insert(rowid, info);\n#if defined(DEBUG)\n        qDebug() << \"retrieve rowid:\" << rowid\n                 << \"chunk_text:\" << chunk_text;\n#endif\n    }\n\n    for (int id : searchResults)\n        if (tempResults.contains(id))\n            results->append(tempResults.value(id));\n}\n\nbool Database::ftsIntegrityCheck()\n{\n    QSqlQuery q(m_db);\n\n    // Returns an error executing sql if it the integrity check fails\n    // See: https://www.sqlite.org/fts5.html#the_integrity_check_command\n    const bool success = q.exec(FTS_INTEGRITY_SQL);\n    if (!success && q.lastError().nativeErrorCode() != \"267\" /*SQLITE_CORRUPT_VTAB from sqlite header*/) {\n        qWarning() << \"ERROR: Cannot prepare sql for fts integrity check\" << q.lastError();\n        return false;\n    }\n\n    if (!success && !q.exec(FTS_REBUILD_SQL)) {\n        qWarning() << \"ERROR: Cannot exec sql for fts rebuild\" << q.lastError();\n        return false;\n    }\n\n    return true;\n}\n\n// FIXME This is very slow and non-interruptible and when we close the application and we're\n// cleaning a large table this can cause the app to take forever to shut down. This would ideally be\n// interruptible and we'd continue 'cleaning' when we restart\nbool Database::cleanDB()\n{\n#if defined(DEBUG)\n    qDebug() << \"cleanDB\";\n#endif\n\n    // Scan all folders in db to make sure they still exist\n    QSqlQuery q(m_db);\n    QList<CollectionItem> collections;\n    if (!selectAllFromCollections(q, &collections)) {\n        qWarning() << \"ERROR: Cannot select collections\" << q.lastError();\n        return false;\n    }\n\n    transaction();\n\n    for (const auto &i: std::as_const(collections)) {\n        // Find the path for the folder\n        QFileInfo info(i.folder_path);\n        if (!info.exists() || !info.isReadable()) {\n#if defined(DEBUG)\n            qDebug() << \"clean db removing folder\" << i.folder_id << i.folder_path;\n#endif\n            if (!removeFolderInternal(i.collection, i.folder_id, i.folder_path)) {\n                rollback();\n                return false;\n            }\n        }\n    }\n\n    // Scan all documents in db to make sure they still exist\n    if (!q.prepare(SELECT_ALL_DOCUMENTS_SQL)) {\n        qWarning() << \"ERROR: Cannot prepare sql for select all documents\" << q.lastError();\n        rollback();\n        return false;\n    }\n\n    if (!q.exec()) {\n        qWarning() << \"ERROR: Cannot exec sql for select all documents\" << q.lastError();\n        rollback();\n        return false;\n    }\n\n    while (q.next()) {\n        int document_id = q.value(0).toInt();\n        QString document_path = q.value(1).toString();\n        QFileInfo info(document_path);\n        if (info.exists() && info.isReadable() && m_scannedFileExtensions.contains(info.suffix(), Qt::CaseInsensitive))\n            continue;\n\n#if defined(DEBUG)\n        qDebug() << \"clean db removing document\" << document_id << document_path;\n#endif\n\n        // Remove all chunks and documents that either don't exist or have become unreadable\n        QSqlQuery query(m_db);\n        if (!removeChunksByDocumentId(query, document_id)) {\n            qWarning() << \"ERROR: Cannot remove chunks of document_id\" << document_id << query.lastError();\n            rollback();\n            return false;\n        }\n\n        if (!removeDocument(query, document_id)) {\n            qWarning() << \"ERROR: Cannot remove document_id\" << document_id << query.lastError();\n            rollback();\n            return false;\n        }\n    }\n\n    commit();\n    return true;\n}\n\nvoid Database::changeChunkSize(int chunkSize)\n{\n    if (chunkSize == m_chunkSize)\n        return;\n\n#if defined(DEBUG)\n    qDebug() << \"changeChunkSize\" << chunkSize;\n#endif\n\n    QSqlQuery q(m_db);\n    // Scan all documents in db to make sure they still exist\n    if (!q.prepare(SELECT_ALL_DOCUMENTS_SQL)) {\n        qWarning() << \"ERROR: Cannot prepare sql for select all documents\" << q.lastError();\n        return;\n    }\n\n    if (!q.exec()) {\n        qWarning() << \"ERROR: Cannot exec sql for select all documents\" << q.lastError();\n        return;\n    }\n\n    transaction();\n\n    while (q.next()) {\n        int document_id = q.value(0).toInt();\n        // Remove all chunks and documents to change the chunk size\n        QSqlQuery query(m_db);\n        if (!removeChunksByDocumentId(query, document_id)) {\n            qWarning() << \"ERROR: Cannot remove chunks of document_id\" << document_id << query.lastError();\n            return rollback();\n        }\n\n        if (!removeDocument(query, document_id)) {\n            qWarning() << \"ERROR: Cannot remove document_id\" << document_id << query.lastError();\n            return rollback();\n        }\n    }\n\n    commit();\n\n    m_chunkSize = chunkSize;\n    addCurrentFolders();\n    updateCollectionStatistics();\n}\n\nvoid Database::changeFileExtensions(const QStringList &extensions)\n{\n#if defined(DEBUG)\n    qDebug() << \"changeFileExtensions\";\n#endif\n\n    m_scannedFileExtensions = extensions;\n\n    if (cleanDB())\n        updateCollectionStatistics();\n\n    QSqlQuery q(m_db);\n    QList<CollectionItem> collections;\n    if (!selectAllFromCollections(q, &collections)) {\n        qWarning() << \"ERROR: Cannot select collections\" << q.lastError();\n        return;\n    }\n\n    for (const auto &i: std::as_const(collections)) {\n        if (!i.forceIndexing)\n            scanDocuments(i.folder_id, i.folder_path);\n    }\n}\n\nvoid Database::directoryChanged(const QString &path)\n{\n#if defined(DEBUG)\n    qDebug() << \"directoryChanged\" << path;\n#endif\n\n    // search for a collection that contains this folder (we watch subdirectories)\n    int folder_id = -1;\n    QDir dir(path);\n    for (;;) {\n        QSqlQuery q(m_db);\n        if (!selectFolder(q, dir.path(), &folder_id)) {\n            qWarning() << \"ERROR: Cannot select folder from path\" << dir.path() << q.lastError();\n            return;\n        }\n        if (folder_id != -1)\n            break;\n\n        // check next parent\n        if (!dir.cdUp()) {\n            if (!dir.isRoot()) break; // folder removed\n            Q_ASSERT(false);\n            qWarning() << \"ERROR: Watched folder does not exist in db\" << path;\n            m_watcher->removePath(path);\n            return;\n        }\n    }\n\n    // Clean the database\n    if (cleanDB())\n        updateCollectionStatistics();\n\n    // Rescan the documents associated with the folder\n    if (folder_id != -1)\n        scanDocuments(folder_id, path);\n}\n"
  },
  {
    "path": "gpt4all-chat/src/database.h",
    "content": "#ifndef DATABASE_H\n#define DATABASE_H\n\n#include \"embllm.h\"\n\n#include <QByteArray>\n#include <QChar>\n#include <QDateTime>\n#include <QElapsedTimer>\n#include <QFileInfo>\n#include <QHash>\n#include <QLatin1String>\n#include <QList>\n#include <QObject>\n#include <QSet>\n#include <QSqlDatabase>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QThread>\n#include <QUrl>\n#include <QVector> // IWYU pragma: keep\n#include <QtAssert>\n\n#include <atomic>\n#include <cstddef>\n#include <list>\n#include <map>\n#include <memory>\n#include <optional>\n#include <utility>\n#include <vector> // IWYU pragma: keep\n\nusing namespace Qt::Literals::StringLiterals;\n\nclass Database;\nclass DocumentReader;\nclass QFileSystemWatcher;\nclass QSqlQuery;\nclass QTextStream;\nclass QTimer;\n\n\n/* Version 0: GPT4All v2.4.3, full-text search\n * Version 1: GPT4All v2.5.3, embeddings in hsnwlib\n * Version 2: GPT4All v3.0.0, embeddings in sqlite\n * Version 3: GPT4All v3.4.0, hybrid search\n */\n\n// minimum supported version\nstatic const int LOCALDOCS_MIN_VER = 1;\n\n// FIXME: (Adam) The next time we bump the version we should add triggers to manage the fts external\n// content table as recommended in the official documentation to keep the fts index in sync\n// See: https://www.sqlite.org/fts5.html#external_content_tables\n\n// FIXME: (Adam) The fts virtual table should include the chunk_id explicitly instead of relying upon\n// the id of the two tables to be in sync\n\n// current version\nstatic const int LOCALDOCS_VERSION = 3;\n\nstruct DocumentInfo\n{\n    using key_type = std::pair<int, QString>;\n\n    int       folder;\n    QFileInfo file;\n    bool      currentlyProcessing = false;\n\n    key_type key() const { return {folder, file.canonicalFilePath()}; } // for comparison\n\n    bool isPdf () const { return !file.suffix().compare(\"pdf\"_L1,  Qt::CaseInsensitive); }\n    bool isDocx() const { return !file.suffix().compare(\"docx\"_L1, Qt::CaseInsensitive); }\n};\n\nstruct ResultInfo {\n    Q_GADGET\n    Q_PROPERTY(QString collection MEMBER collection)\n    Q_PROPERTY(QString path MEMBER path)\n    Q_PROPERTY(QString file MEMBER file)\n    Q_PROPERTY(QString title MEMBER title)\n    Q_PROPERTY(QString author MEMBER author)\n    Q_PROPERTY(QString date MEMBER date)\n    Q_PROPERTY(QString text MEMBER text)\n    Q_PROPERTY(int page MEMBER page)\n    Q_PROPERTY(int from MEMBER from)\n    Q_PROPERTY(int to MEMBER to)\n    Q_PROPERTY(QString fileUri READ fileUri STORED false)\n\npublic:\n    QString collection; // [Required] The name of the collection\n    QString path;       // [Required] The full path\n    QString file;       // [Required] The name of the file, but not the full path\n    QString title;      // [Optional] The title of the document\n    QString author;     // [Optional] The author of the document\n    QString date;       // [Required] The creation or the last modification date whichever is latest\n    QString text;       // [Required] The text actually used in the augmented context\n    int page = -1;      // [Optional] The page where the text was found\n    int from = -1;      // [Optional] The line number where the text begins\n    int to = -1;        // [Optional] The line number where the text ends\n\n    QString fileUri() const {\n        // QUrl reserved chars that are not UNSAFE_PATH according to glib/gconvert.c\n        static const QByteArray s_exclude = \"!$&'()*+,/:=@~\"_ba;\n\n        Q_ASSERT(!QFileInfo(path).isRelative());\n#ifdef Q_OS_WINDOWS\n        Q_ASSERT(!path.contains('\\\\')); // Qt normally uses forward slash as path separator\n#endif\n\n        auto escaped = QString::fromUtf8(QUrl::toPercentEncoding(path, s_exclude));\n        if (escaped.front() != '/')\n            escaped = '/' + escaped;\n        return u\"file://\"_s + escaped;\n    }\n\n    bool operator==(const ResultInfo &other) const {\n        return file == other.file &&\n               title == other.title &&\n               author == other.author &&\n               date == other.date &&\n               text == other.text &&\n               page == other.page &&\n               from == other.from &&\n               to == other.to;\n    }\n    bool operator!=(const ResultInfo &other) const {\n        return !(*this == other);\n    }\n};\n\nQ_DECLARE_METATYPE(ResultInfo)\n\nstruct CollectionItem {\n    // -- Fields persisted to database --\n\n    int collection_id = -1;\n    int folder_id = -1;\n    QString collection;\n    QString folder_path;\n    QString embeddingModel;\n\n    // -- Transient fields --\n\n    bool installed = false;\n    bool indexing = false;\n    bool forceIndexing = false;\n    QString error;\n\n    // progress\n    int currentDocsToIndex = 0;\n    int totalDocsToIndex = 0;\n    size_t currentBytesToIndex = 0;\n    size_t totalBytesToIndex = 0;\n    size_t currentEmbeddingsToIndex = 0;\n    size_t totalEmbeddingsToIndex = 0;\n\n    // statistics\n    size_t totalDocs = 0;\n    size_t totalWords = 0;\n    size_t totalTokens = 0;\n    QDateTime startUpdate;\n    QDateTime lastUpdate;\n    QString fileCurrentlyProcessing;\n};\nQ_DECLARE_METATYPE(CollectionItem)\n\nclass ChunkStreamer {\npublic:\n    enum class Status { DOC_COMPLETE, INTERRUPTED, ERROR, BINARY_SEEN };\n\n    explicit ChunkStreamer(Database *database);\n    ~ChunkStreamer();\n\n    void setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel);\n    std::optional<DocumentInfo::key_type> currentDocKey() const;\n    void reset();\n\n    Status step();\n\nprivate:\n    Database                              *m_database;\n    std::optional<DocumentInfo::key_type>  m_docKey;\n    std::unique_ptr<DocumentReader>        m_reader; // may be invalid, always compare key first\n    int                                    m_documentId;\n    QString                                m_embeddingModel;\n    QString                                m_title;\n    QString                                m_author;\n    QString                                m_subject;\n    QString                                m_keywords;\n\n    // working state\n    QString                                m_chunk; // has a trailing space for convenience\n    int                                    m_nChunkWords = 0;\n    int                                    m_page = 0;\n};\n\nclass Database : public QObject\n{\n    Q_OBJECT\npublic:\n    Database(int chunkSize, QStringList extensions);\n    ~Database() override;\n\n    bool isValid() const { return m_databaseValid; }\n\npublic Q_SLOTS:\n    void start();\n    bool scanQueueInterrupted() const;\n    void scanQueueBatch();\n    void scanDocuments(int folder_id, const QString &folder_path);\n    void forceIndexing(const QString &collection, const QString &embedding_model);\n    void forceRebuildFolder(const QString &path);\n    bool addFolder(const QString &collection, const QString &path, const QString &embedding_model);\n    void removeFolder(const QString &collection, const QString &path);\n    void retrieveFromDB(const QList<QString> &collections, const QString &text, int retrievalSize, QList<ResultInfo> *results);\n    void changeChunkSize(int chunkSize);\n    void changeFileExtensions(const QStringList &extensions);\n\nQ_SIGNALS:\n    // Signals for the gui only\n    void requestUpdateGuiForCollectionItem(const CollectionItem &item);\n    void requestAddGuiCollectionItem(const CollectionItem &item);\n    void requestRemoveGuiFolderById(const QString &collection, int folder_id);\n    void requestGuiCollectionListUpdated(const QList<CollectionItem> &collectionList);\n    void databaseValidChanged();\n\nprivate Q_SLOTS:\n    void directoryChanged(const QString &path);\n    void addCurrentFolders();\n    void handleEmbeddingsGenerated(const QVector<EmbeddingResult> &embeddings);\n    void handleErrorGenerated(const QVector<EmbeddingChunk> &chunks, const QString &error);\n\nprivate:\n    void transaction();\n    void commit();\n    void rollback();\n\n    bool addChunk(QSqlQuery &q, int document_id, const QString &chunk_text, const QString &file,\n                  const QString &title, const QString &author, const QString &subject, const QString &keywords,\n                  int page, int from, int to, int words, int *chunk_id);\n    bool refreshDocumentIdCache(QSqlQuery &q);\n    bool removeChunksByDocumentId(QSqlQuery &q, int document_id);\n    bool sqlRemoveDocsByFolderPath(QSqlQuery &q, const QString &path);\n    bool hasContent();\n    // not found -> 0, , exists and has content -> 1, error -> -1\n    int openDatabase(const QString &modelPath, bool create = true, int ver = LOCALDOCS_VERSION);\n    bool openLatestDb(const QString &modelPath, QList<CollectionItem> &oldCollections);\n    bool initDb(const QString &modelPath, const QList<CollectionItem> &oldCollections);\n    int checkAndAddFolderToDB(const QString &path);\n    bool removeFolderInternal(const QString &collection, int folder_id, const QString &path);\n    size_t chunkStream(QTextStream &stream, int folder_id, int document_id, const QString &embedding_model,\n        const QString &file, const QString &title, const QString &author, const QString &subject,\n        const QString &keywords, int page, int maxChunks = -1);\n    void appendChunk(const EmbeddingChunk &chunk);\n    void sendChunkList();\n    void updateFolderToIndex(int folder_id, size_t countForFolder, bool sendChunks = true);\n    size_t countOfDocuments(int folder_id) const;\n    size_t countOfBytes(int folder_id) const;\n    DocumentInfo dequeueDocument();\n    void removeFolderFromDocumentQueue(int folder_id);\n    void enqueueDocumentInternal(DocumentInfo &&info, bool prepend = false);\n    void enqueueDocuments(int folder_id, std::list<DocumentInfo> &&infos);\n    void scanQueue();\n    bool ftsIntegrityCheck();\n    bool cleanDB();\n    void addFolderToWatch(const QString &path);\n    void removeFolderFromWatch(const QString &path);\n    static QList<int> searchEmbeddingsHelper(const std::vector<float> &query, QSqlQuery &q, int nNeighbors);\n    QList<int> searchEmbeddings(const std::vector<float> &query, const QList<QString> &collections,\n        int nNeighbors);\n    struct BM25Query {\n        QString input;\n        QString query;\n        bool isExact = false;\n        int qlength = 0;\n        int ilength = 0;\n        int rlength = 0;\n    };\n    QList<Database::BM25Query> queriesForFTS5(const QString &input);\n    QList<int> searchBM25(const QString &query, const QList<QString> &collections, BM25Query &bm25q, int k);\n    QList<int> scoreChunks(const std::vector<float> &query, const QList<int> &chunks);\n    float computeBM25Weight(const BM25Query &bm25q);\n    QList<int> reciprocalRankFusion(const std::vector<float> &query, const QList<int> &embeddingResults,\n        const QList<int> &bm25Results, const BM25Query &bm25q, int k);\n    QList<int> searchDatabase(const QString &query, const QList<QString> &collections, int k);\n\n    void setStartUpdateTime(CollectionItem &item);\n    void setLastUpdateTime(CollectionItem &item);\n\n    CollectionItem guiCollectionItem(int folder_id) const;\n    void updateGuiForCollectionItem(const CollectionItem &item);\n    void addGuiCollectionItem(const CollectionItem &item);\n    void removeGuiFolderById(const QString &collection, int folder_id);\n    void guiCollectionListUpdated(const QList<CollectionItem> &collectionList);\n    void scheduleUncompletedEmbeddings();\n    void updateCollectionStatistics();\n\nprivate:\n    QSqlDatabase m_db;\n    int m_chunkSize;\n    QStringList m_scannedFileExtensions;\n    QTimer *m_scanIntervalTimer;\n    QElapsedTimer m_scanDurationTimer;\n    std::map<int, std::list<DocumentInfo>> m_docsToScan;\n    QList<ResultInfo> m_retrieve;\n    QThread m_dbThread;\n    QFileSystemWatcher *m_watcher;\n    QSet<QString> m_watchedPaths;\n    EmbeddingLLM *m_embLLM;\n    QVector<EmbeddingChunk> m_chunkList;\n    QHash<int, CollectionItem> m_collectionMap; // used only for tracking indexing/embedding progress\n    std::atomic<bool> m_databaseValid;\n    ChunkStreamer m_chunkStreamer;\n    QSet<int> m_documentIdCache; // cached list of documents with chunks for fast lookup\n\n    friend class ChunkStreamer;\n};\n\n#endif // DATABASE_H\n"
  },
  {
    "path": "gpt4all-chat/src/download.cpp",
    "content": "#include \"download.h\"\n\n#include \"modellist.h\"\n#include \"mysettings.h\"\n#include \"network.h\"\n\n#include <QByteArray>\n#include <QCollator>\n#include <QCoreApplication>\n#include <QDebug>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QIODevice> // IWYU pragma: keep\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QKeyValueIterator>\n#include <QLocale>\n#include <QNetworkRequest>\n#include <QPair> // IWYU pragma: keep\n#include <QRegularExpression>\n#include <QRegularExpressionMatch>\n#include <QSettings>\n#include <QSslConfiguration>\n#include <QSslSocket>\n#include <QStringList> // IWYU pragma: keep\n#include <QTextStream>\n#include <QUrl>\n#include <QVariant>\n#include <QVector> // IWYU pragma: keep\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n#include <QtMinMax>\n\n#include <compare>\n#include <cstddef>\n#include <utility>\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nclass MyDownload: public Download { };\nQ_GLOBAL_STATIC(MyDownload, downloadInstance)\nDownload *Download::globalInstance()\n{\n    return downloadInstance();\n}\n\nDownload::Download()\n    : QObject(nullptr)\n    , m_hashAndSave(new HashAndSaveFile)\n{\n    connect(this, &Download::requestHashAndSave, m_hashAndSave,\n        &HashAndSaveFile::hashAndSave, Qt::QueuedConnection);\n    connect(m_hashAndSave, &HashAndSaveFile::hashAndSaveFinished, this,\n        &Download::handleHashAndSaveFinished, Qt::QueuedConnection);\n    connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this,\n        &Download::handleSslErrors);\n    updateLatestNews();\n    updateReleaseNotes();\n    m_startTime = QDateTime::currentDateTime();\n}\n\nstd::strong_ordering Download::compareAppVersions(const QString &a, const QString &b)\n{\n    static QRegularExpression versionRegex(R\"(^(\\d+(?:\\.\\d+){0,2})(-.+)?$)\");\n\n    // When comparing versions, make sure a2 < a10.\n    QCollator versionCollator(QLocale(QLocale::English, QLocale::UnitedStates));\n    versionCollator.setNumericMode(true);\n\n    QRegularExpressionMatch aMatch = versionRegex.match(a);\n    QRegularExpressionMatch bMatch = versionRegex.match(b);\n\n    Q_ASSERT(aMatch.hasMatch() && bMatch.hasMatch()); // expect valid versions\n\n    // Check for an invalid version. foo < 3.0.0 -> !hasMatch < hasMatch\n    if (auto diff = aMatch.hasMatch() <=> bMatch.hasMatch(); diff != 0)\n        return diff; // invalid version compares as lower\n\n    // Compare invalid versions. fooa < foob\n    if (!aMatch.hasMatch() && !bMatch.hasMatch())\n        return versionCollator.compare(a, b) <=> 0; // lexicographic comparison\n\n    // Compare first three components. 3.0.0 < 3.0.1\n    QStringList aParts = aMatch.captured(1).split('.');\n    QStringList bParts = bMatch.captured(1).split('.');\n    for (int i = 0; i < qMax(aParts.size(), bParts.size()); i++) {\n        bool ok = false;\n        int aInt = aParts.value(i, \"0\").toInt(&ok);\n        Q_ASSERT(ok);\n        int bInt = bParts.value(i, \"0\").toInt(&ok);\n        Q_ASSERT(ok);\n        if (auto diff = aInt <=> bInt; diff != 0)\n            return diff; // version with lower component compares as lower\n    }\n\n    // Check for a pre/post-release suffix. 3.0.0-dev0 < 3.0.0-rc1 < 3.0.0 < 3.0.0-post1\n    auto getSuffixOrder = [](const QRegularExpressionMatch &match) -> int {\n        QString suffix = match.captured(2);\n        return suffix.startsWith(\"-dev\") ? 0 :\n               suffix.startsWith(\"-rc\")  ? 1 :\n               suffix.isEmpty()          ? 2 :\n               /* some other suffix */     3;\n    };\n    if (auto diff = getSuffixOrder(aMatch) <=> getSuffixOrder(bMatch); diff != 0)\n        return diff; // different suffix types\n\n    // Lexicographic comparison of suffix. 3.0.0-rc1 < 3.0.0-rc2\n    if (aMatch.hasCaptured(2) && bMatch.hasCaptured(2)) {\n        if (auto diff = versionCollator.compare(aMatch.captured(2), bMatch.captured(2)); diff != 0)\n            return diff <=> 0;\n    }\n\n    return std::strong_ordering::equal;\n}\n\nReleaseInfo Download::releaseInfo() const\n{\n    const QString currentVersion = QCoreApplication::applicationVersion();\n    if (m_releaseMap.contains(currentVersion))\n        return m_releaseMap.value(currentVersion);\n    if (!m_releaseMap.empty())\n        return m_releaseMap.last();\n    return ReleaseInfo();\n}\n\nbool Download::hasNewerRelease() const\n{\n    const QString currentVersion = QCoreApplication::applicationVersion();\n    for (const auto &version : m_releaseMap.keys()) {\n        if (compareAppVersions(version, currentVersion) > 0)\n            return true;\n    }\n    return false;\n}\n\nbool Download::isFirstStart(bool writeVersion) const\n{\n    auto *mySettings = MySettings::globalInstance();\n\n    QSettings settings;\n    QString lastVersionStarted = settings.value(\"download/lastVersionStarted\").toString();\n    bool first = lastVersionStarted != QCoreApplication::applicationVersion();\n    if (first && writeVersion) {\n        settings.setValue(\"download/lastVersionStarted\", QCoreApplication::applicationVersion());\n        // let the user select these again\n        settings.remove(\"network/usageStatsActive\");\n        settings.remove(\"network/isActive\");\n        emit mySettings->networkUsageStatsActiveChanged();\n        emit mySettings->networkIsActiveChanged();\n    }\n\n    return first || !mySettings->isNetworkUsageStatsActiveSet() || !mySettings->isNetworkIsActiveSet();\n}\n\nvoid Download::updateReleaseNotes()\n{\n    QUrl jsonUrl(\"http://gpt4all.io/meta/release.json\");\n    QNetworkRequest request(jsonUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *jsonReply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort);\n    connect(jsonReply, &QNetworkReply::finished, this, &Download::handleReleaseJsonDownloadFinished);\n}\n\nvoid Download::updateLatestNews()\n{\n    QUrl url(\"http://gpt4all.io/meta/latestnews.md\");\n    QNetworkRequest request(url);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *reply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n    connect(reply, &QNetworkReply::finished, this, &Download::handleLatestNewsDownloadFinished);\n}\n\nvoid Download::downloadModel(const QString &modelFile)\n{\n    QFile *tempFile = new QFile(ModelList::globalInstance()->incompleteDownloadPath(modelFile));\n    bool success = tempFile->open(QIODevice::WriteOnly | QIODevice::Append);\n    qWarning() << \"Opening temp file for writing:\" << tempFile->fileName();\n    if (!success) {\n        const QString error\n            = u\"ERROR: Could not open temp file: %1 %2\"_s.arg(tempFile->fileName(), modelFile);\n        qWarning() << error;\n        clearRetry(modelFile);\n        ModelList::globalInstance()->updateDataByFilename(modelFile, {{ ModelList::DownloadErrorRole, error }});\n        return;\n    }\n    tempFile->flush();\n    size_t incomplete_size = tempFile->size();\n    if (incomplete_size > 0) {\n        bool success = tempFile->seek(incomplete_size);\n        if (!success) {\n            incomplete_size = 0;\n            success = tempFile->seek(incomplete_size);\n            Q_ASSERT(success);\n        }\n    }\n\n    if (!ModelList::globalInstance()->containsByFilename(modelFile)) {\n        qWarning() << \"ERROR: Could not find file:\" << modelFile;\n        return;\n    }\n\n    ModelList::globalInstance()->updateDataByFilename(modelFile, {{ ModelList::DownloadingRole, true }});\n    ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile);\n    QString url = !info.url().isEmpty() ? info.url() : \"http://gpt4all.io/models/gguf/\" + modelFile;\n    Network::globalInstance()->trackEvent(\"download_started\", { {\"model\", modelFile} });\n    QNetworkRequest request(url);\n    request.setAttribute(QNetworkRequest::User, modelFile);\n    request.setRawHeader(\"range\", u\"bytes=%1-\"_s.arg(tempFile->pos()).toUtf8());\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *modelReply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, modelReply, &QNetworkReply::abort);\n    connect(modelReply, &QNetworkReply::downloadProgress, this, &Download::handleDownloadProgress);\n    connect(modelReply, &QNetworkReply::errorOccurred, this, &Download::handleErrorOccurred);\n    connect(modelReply, &QNetworkReply::finished, this, &Download::handleModelDownloadFinished);\n    connect(modelReply, &QNetworkReply::readyRead, this, &Download::handleReadyRead);\n    m_activeDownloads.insert(modelReply, tempFile);\n}\n\nvoid Download::cancelDownload(const QString &modelFile)\n{\n    for (auto [modelReply, tempFile]: m_activeDownloads.asKeyValueRange()) {\n        QUrl url = modelReply->request().url();\n        if (url.toString().endsWith(modelFile)) {\n            Network::globalInstance()->trackEvent(\"download_canceled\", { {\"model\", modelFile} });\n\n            // Disconnect the signals\n            disconnect(modelReply, &QNetworkReply::downloadProgress, this, &Download::handleDownloadProgress);\n            disconnect(modelReply, &QNetworkReply::finished, this, &Download::handleModelDownloadFinished);\n\n            modelReply->abort(); // Abort the download\n            modelReply->deleteLater(); // Schedule the reply for deletion\n\n            tempFile->deleteLater();\n            m_activeDownloads.remove(modelReply);\n\n            ModelList::globalInstance()->updateDataByFilename(modelFile, {{ ModelList::DownloadingRole, false }});\n            break;\n        }\n    }\n}\n\nvoid Download::installModel(const QString &modelFile, const QString &apiKey)\n{\n    Q_ASSERT(!apiKey.isEmpty());\n    if (apiKey.isEmpty())\n        return;\n\n    Network::globalInstance()->trackEvent(\"install_model\", { {\"model\", modelFile} });\n\n    QString filePath = MySettings::globalInstance()->modelPath() + modelFile;\n    QFile file(filePath);\n    if (file.open(QIODeviceBase::WriteOnly | QIODeviceBase::Text)) {\n\n        QJsonObject obj;\n        QString modelName(modelFile);\n        modelName.remove(0, 8); // strip \"gpt4all-\" prefix\n        modelName.chop(7); // strip \".rmodel\" extension\n        obj.insert(\"apiKey\", apiKey);\n        obj.insert(\"modelName\", modelName);\n        QJsonDocument doc(obj);\n\n        QTextStream stream(&file);\n        stream << doc.toJson();\n        file.close();\n        ModelList::globalInstance()->updateModelsFromDirectory();\n        emit toastMessage(tr(\"Model \\\"%1\\\" is installed successfully.\").arg(modelName));\n    }\n\n    ModelList::globalInstance()->updateDataByFilename(modelFile, {{ ModelList::InstalledRole, true }});\n}\n\nvoid Download::installCompatibleModel(const QString &modelName, const QString &apiKey, const QString &baseUrl)\n{\n    Q_ASSERT(!modelName.isEmpty());\n    if (modelName.isEmpty()) {\n        emit toastMessage(tr(\"ERROR: $MODEL_NAME is empty.\"));\n        return;\n    }\n\n    Q_ASSERT(!apiKey.isEmpty());\n    if (apiKey.isEmpty()) {\n        emit toastMessage(tr(\"ERROR: $API_KEY is empty.\"));\n        return;\n    }\n\n    QUrl apiBaseUrl(QUrl::fromUserInput(baseUrl));\n    if (!Network::isHttpUrlValid(baseUrl)) {\n        emit toastMessage(tr(\"ERROR: $BASE_URL is invalid.\"));\n        return;\n    }\n\n    QString modelFile(ModelList::compatibleModelFilename(baseUrl, modelName));\n    if (ModelList::globalInstance()->contains(modelFile)) {\n        emit toastMessage(tr(\"ERROR: Model \\\"%1 (%2)\\\" is conflict.\").arg(modelName, baseUrl));\n        return;\n    }\n    ModelList::globalInstance()->addModel(modelFile);\n    Network::globalInstance()->trackEvent(\"install_model\", { {\"model\", modelFile} });\n\n    QString filePath = MySettings::globalInstance()->modelPath() + modelFile;\n    QFile file(filePath);\n    if (file.open(QIODeviceBase::WriteOnly | QIODeviceBase::Text)) {\n        QJsonObject obj;\n        obj.insert(\"apiKey\", apiKey);\n        obj.insert(\"modelName\", modelName);\n        obj.insert(\"baseUrl\", apiBaseUrl.toString());\n        QJsonDocument doc(obj);\n\n        QTextStream stream(&file);\n        stream << doc.toJson();\n        file.close();\n        ModelList::globalInstance()->updateModelsFromDirectory();\n        emit toastMessage(tr(\"Model \\\"%1 (%2)\\\" is installed successfully.\").arg(modelName, baseUrl));\n    }\n\n    ModelList::globalInstance()->updateDataByFilename(modelFile, {{ ModelList::InstalledRole, true }});\n}\n\nvoid Download::removeModel(const QString &modelFile)\n{\n    const QString filePath = MySettings::globalInstance()->modelPath() + modelFile;\n    QFile incompleteFile(ModelList::globalInstance()->incompleteDownloadPath(modelFile));\n    if (incompleteFile.exists()) {\n        incompleteFile.remove();\n    }\n\n    bool shouldRemoveInstalled = false;\n    QFile file(filePath);\n    if (file.exists()) {\n        const ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile);\n        MySettings::globalInstance()->eraseModel(info);\n        shouldRemoveInstalled = info.installed && !info.isClone() && (info.isDiscovered() || info.isCompatibleApi || info.description() == \"\" /*indicates sideloaded*/);\n        if (shouldRemoveInstalled)\n            ModelList::globalInstance()->removeInstalled(info);\n        Network::globalInstance()->trackEvent(\"remove_model\", { {\"model\", modelFile} });\n        file.remove();\n        emit toastMessage(tr(\"Model \\\"%1\\\" is removed.\").arg(info.name()));\n    }\n\n    if (!shouldRemoveInstalled) {\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::InstalledRole, false },\n            { ModelList::BytesReceivedRole, 0 },\n            { ModelList::BytesTotalRole, 0 },\n            { ModelList::TimestampRole, 0 },\n            { ModelList::SpeedRole, QString() },\n            { ModelList::DownloadErrorRole, QString() },\n        };\n        ModelList::globalInstance()->updateDataByFilename(modelFile, data);\n    }\n}\n\nvoid Download::handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors)\n{\n    QUrl url = reply->request().url();\n    for (const auto &e : errors)\n        qWarning() << \"ERROR: Received ssl error:\" << e.errorString() << \"for\" << url;\n}\n\nvoid Download::handleReleaseJsonDownloadFinished()\n{\n    QNetworkReply *jsonReply = qobject_cast<QNetworkReply *>(sender());\n    if (!jsonReply)\n        return;\n\n    QByteArray jsonData = jsonReply->readAll();\n    jsonReply->deleteLater();\n    parseReleaseJsonFile(jsonData);\n}\n\nvoid Download::parseReleaseJsonFile(const QByteArray &jsonData)\n{\n    QJsonParseError err;\n    QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);\n    if (err.error != QJsonParseError::NoError) {\n        qWarning() << \"ERROR: Couldn't parse: \" << jsonData << err.errorString();\n        return;\n    }\n\n    QJsonArray jsonArray = document.array();\n\n    m_releaseMap.clear();\n    for (const QJsonValue &value : jsonArray) {\n        QJsonObject obj = value.toObject();\n\n        QString version = obj[\"version\"].toString();\n        // \"notes\" field intentionally has a trailing newline for compatibility\n        QString notes = obj[\"notes\"].toString().trimmed();\n        QString contributors = obj[\"contributors\"].toString().trimmed();\n        ReleaseInfo releaseInfo;\n        releaseInfo.version = version;\n        releaseInfo.notes = notes;\n        releaseInfo.contributors = contributors;\n        m_releaseMap.insert(version, releaseInfo);\n    }\n\n    emit hasNewerReleaseChanged();\n    emit releaseInfoChanged();\n}\n\nvoid Download::handleLatestNewsDownloadFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n\n    if (reply->error() != QNetworkReply::NoError) {\n        qWarning() << \"ERROR: network error occurred attempting to download latest news:\" << reply->errorString();\n        reply->deleteLater();\n        return;\n    }\n\n    QByteArray responseData = reply->readAll();\n    m_latestNews = QString::fromUtf8(responseData);\n    reply->deleteLater();\n    emit latestNewsChanged();\n}\n\nbool Download::hasRetry(const QString &filename) const\n{\n    return m_activeRetries.contains(filename);\n}\n\nbool Download::shouldRetry(const QString &filename)\n{\n    int retries = 0;\n    if (m_activeRetries.contains(filename))\n        retries = m_activeRetries.value(filename);\n\n    ++retries;\n\n    // Allow up to ten retries for now\n    if (retries < 10) {\n        m_activeRetries.insert(filename, retries);\n        return true;\n    }\n\n    return false;\n}\n\nvoid Download::clearRetry(const QString &filename)\n{\n    m_activeRetries.remove(filename);\n}\n\nvoid Download::handleErrorOccurred(QNetworkReply::NetworkError code)\n{\n    QNetworkReply *modelReply = qobject_cast<QNetworkReply *>(sender());\n    if (!modelReply)\n        return;\n\n    // This occurs when the user explicitly cancels the download\n    if (code == QNetworkReply::OperationCanceledError)\n        return;\n\n    QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();\n    if (shouldRetry(modelFilename)) {\n        downloadModel(modelFilename);\n        return;\n    }\n\n    clearRetry(modelFilename);\n\n    const QString error\n        = u\"ERROR: Network error occurred attempting to download %1 code: %2 errorString %3\"_s\n            .arg(modelFilename)\n            .arg(code)\n            .arg(modelReply->errorString());\n    qWarning() << error;\n    ModelList::globalInstance()->updateDataByFilename(modelFilename, {{ ModelList::DownloadErrorRole, error }});\n    Network::globalInstance()->trackEvent(\"download_error\", {\n        {\"model\", modelFilename},\n        {\"code\", (int)code},\n        {\"error\", modelReply->errorString()},\n    });\n    cancelDownload(modelFilename);\n}\n\nvoid Download::handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal)\n{\n    QNetworkReply *modelReply = qobject_cast<QNetworkReply *>(sender());\n    if (!modelReply)\n        return;\n    QFile *tempFile = m_activeDownloads.value(modelReply);\n    if (!tempFile)\n        return;\n    QString contentRange = modelReply->rawHeader(\"content-range\");\n    if (contentRange.contains(\"/\")) {\n        QString contentTotalSize = contentRange.split(\"/\").last();\n        bytesTotal = contentTotalSize.toLongLong();\n    }\n\n    const QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();\n    const qint64 lastUpdate = ModelList::globalInstance()->dataByFilename(modelFilename, ModelList::TimestampRole).toLongLong();\n    const qint64 currentUpdate = QDateTime::currentMSecsSinceEpoch();\n    if (currentUpdate - lastUpdate < 1000)\n        return;\n\n    const qint64 lastBytesReceived = ModelList::globalInstance()->dataByFilename(modelFilename, ModelList::BytesReceivedRole).toLongLong();\n    const qint64 currentBytesReceived = tempFile->pos();\n\n    qint64 timeDifference = currentUpdate - lastUpdate;\n    qint64 bytesDifference = currentBytesReceived - lastBytesReceived;\n    qint64 speed = (bytesDifference / timeDifference) * 1000; // bytes per second\n    QString speedText;\n    if (speed < 1024)\n        speedText = QString::number(static_cast<double>(speed), 'f', 2) + \" B/s\";\n    else if (speed < 1024 * 1024)\n        speedText = QString::number(static_cast<double>(speed / 1024.0), 'f', 2) + \" KB/s\";\n    else\n        speedText = QString::number(static_cast<double>(speed / (1024.0 * 1024.0)), 'f', 2) + \" MB/s\";\n\n    QVector<QPair<int, QVariant>> data {\n        { ModelList::BytesReceivedRole, currentBytesReceived },\n        { ModelList::BytesTotalRole, bytesTotal },\n        { ModelList::SpeedRole, speedText },\n        { ModelList::TimestampRole, currentUpdate },\n    };\n    ModelList::globalInstance()->updateDataByFilename(modelFilename, data);\n}\n\nHashAndSaveFile::HashAndSaveFile()\n    : QObject(nullptr)\n{\n    moveToThread(&m_hashAndSaveThread);\n    m_hashAndSaveThread.setObjectName(\"hashandsave thread\");\n    m_hashAndSaveThread.start();\n}\n\nvoid HashAndSaveFile::hashAndSave(const QString &expectedHash, QCryptographicHash::Algorithm a,\n    const QString &saveFilePath, QFile *tempFile, QNetworkReply *modelReply)\n{\n    Q_ASSERT(!tempFile->isOpen());\n    QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();\n\n    // Reopen the tempFile for hashing\n    if (!tempFile->open(QIODevice::ReadOnly)) {\n        const QString error\n            = u\"ERROR: Could not open temp file for hashing: %1 %2\"_s.arg(tempFile->fileName(), modelFilename);\n        qWarning() << error;\n        emit hashAndSaveFinished(false, error, tempFile, modelReply);\n        return;\n    }\n\n    QCryptographicHash hash(a);\n    while(!tempFile->atEnd())\n        hash.addData(tempFile->read(16384));\n    if (hash.result().toHex() != expectedHash.toLatin1()) {\n        tempFile->close();\n        const QString error\n            = u\"ERROR: Download error hash did not match: %1 != %2 for %3\"_s\n                .arg(hash.result().toHex(), expectedHash.toLatin1(), modelFilename);\n        qWarning() << error;\n        tempFile->remove();\n        emit hashAndSaveFinished(false, error, tempFile, modelReply);\n        return;\n    }\n\n    // The file save needs the tempFile closed\n    tempFile->close();\n\n    // Attempt to *move* the verified tempfile into place - this should be atomic\n    // but will only work if the destination is on the same filesystem\n    if (tempFile->rename(saveFilePath)) {\n        emit hashAndSaveFinished(true, QString(), tempFile, modelReply);\n        ModelList::globalInstance()->updateModelsFromDirectory();\n        return;\n    }\n\n    // Reopen the tempFile for copying\n    if (!tempFile->open(QIODevice::ReadOnly)) {\n        const QString error\n            = u\"ERROR: Could not open temp file at finish: %1 %2\"_s.arg(tempFile->fileName(), modelFilename);\n        qWarning() << error;\n        emit hashAndSaveFinished(false, error, tempFile, modelReply);\n        return;\n    }\n\n    // Save the model file to disk\n    QFile file(saveFilePath);\n    if (file.open(QIODevice::WriteOnly)) {\n        QByteArray buffer;\n        while (!tempFile->atEnd()) {\n            buffer = tempFile->read(16384);\n            file.write(buffer);\n        }\n        file.close();\n        tempFile->close();\n        emit hashAndSaveFinished(true, QString(), tempFile, modelReply);\n    } else {\n        QFile::FileError error = file.error();\n        const QString errorString\n            = u\"ERROR: Could not save model to location: %1 failed with code %1\"_s.arg(saveFilePath).arg(error);\n        qWarning() << errorString;\n        tempFile->close();\n        emit hashAndSaveFinished(false, errorString, tempFile, modelReply);\n    }\n\n    ModelList::globalInstance()->updateModelsFromDirectory();\n}\n\nvoid Download::handleModelDownloadFinished()\n{\n    QNetworkReply *modelReply = qobject_cast<QNetworkReply *>(sender());\n    if (!modelReply)\n        return;\n\n    QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();\n    QFile *tempFile = m_activeDownloads.value(modelReply);\n    m_activeDownloads.remove(modelReply);\n\n    if (modelReply->error()) {\n        const QString errorString\n            = u\"ERROR: Downloading failed with code %1 \\\"%2\\\"\"_s.arg(modelReply->error()).arg(modelReply->errorString());\n        qWarning() << errorString;\n        modelReply->deleteLater();\n        tempFile->deleteLater();\n        if (!hasRetry(modelFilename)) {\n            QVector<QPair<int, QVariant>> data {\n                { ModelList::DownloadingRole, false },\n                { ModelList::DownloadErrorRole, errorString },\n            };\n            ModelList::globalInstance()->updateDataByFilename(modelFilename, data);\n        }\n        return;\n    }\n\n    clearRetry(modelFilename);\n\n    // The hash and save needs the tempFile closed\n    tempFile->close();\n\n    if (!ModelList::globalInstance()->containsByFilename(modelFilename)) {\n        qWarning() << \"ERROR: downloading no such file:\" << modelFilename;\n        modelReply->deleteLater();\n        tempFile->deleteLater();\n        return;\n    }\n\n    // Notify that we are calculating hash\n    ModelList::globalInstance()->updateDataByFilename(modelFilename, {{ ModelList::CalcHashRole, true }});\n    QByteArray hash =  ModelList::globalInstance()->modelInfoByFilename(modelFilename).hash;\n    ModelInfo::HashAlgorithm hashAlgorithm =  ModelList::globalInstance()->modelInfoByFilename(modelFilename).hashAlgorithm;\n    const QString saveFilePath = MySettings::globalInstance()->modelPath() + modelFilename;\n    emit requestHashAndSave(hash,\n        (hashAlgorithm == ModelInfo::Md5 ? QCryptographicHash::Md5 : QCryptographicHash::Sha256),\n        saveFilePath, tempFile, modelReply);\n}\n\nvoid Download::handleHashAndSaveFinished(bool success, const QString &error,\n        QFile *tempFile, QNetworkReply *modelReply)\n{\n    // The hash and save should send back with tempfile closed\n    Q_ASSERT(!tempFile->isOpen());\n    QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();\n    Network::globalInstance()->trackEvent(\"download_finished\", { {\"model\", modelFilename}, {\"success\", success} });\n\n    QVector<QPair<int, QVariant>> data {\n        { ModelList::CalcHashRole, false },\n        { ModelList::DownloadingRole, false },\n    };\n\n    modelReply->deleteLater();\n    tempFile->deleteLater();\n\n    if (!success) {\n        data.append({ ModelList::DownloadErrorRole, error });\n    } else {\n        data.append({ ModelList::DownloadErrorRole, QString() });\n        ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFilename);\n        if (info.isDiscovered())\n            ModelList::globalInstance()->updateDiscoveredInstalled(info);\n    }\n\n    ModelList::globalInstance()->updateDataByFilename(modelFilename, data);\n}\n\nvoid Download::handleReadyRead()\n{\n    QNetworkReply *modelReply = qobject_cast<QNetworkReply *>(sender());\n    if (!modelReply)\n        return;\n\n    QFile *tempFile = m_activeDownloads.value(modelReply);\n    QByteArray buffer;\n    while (!modelReply->atEnd()) {\n        buffer = modelReply->read(16384);\n        tempFile->write(buffer);\n    }\n    tempFile->flush();\n}\n"
  },
  {
    "path": "gpt4all-chat/src/download.h",
    "content": "#ifndef DOWNLOAD_H\n#define DOWNLOAD_H\n\n#include <QCryptographicHash>\n#include <QDateTime>\n#include <QFile>\n#include <QHash>\n#include <QList>\n#include <QMap>\n#include <QNetworkAccessManager>\n#include <QNetworkReply>\n#include <QObject>\n#include <QSslError>\n#include <QString>\n#include <QThread>\n#include <QtTypes>\n\n// IWYU pragma: no_forward_declare QFile\n// IWYU pragma: no_forward_declare QList\n// IWYU pragma: no_forward_declare QSslError\nclass QByteArray;\n\n\nstruct ReleaseInfo {\n    Q_GADGET\n    Q_PROPERTY(QString version MEMBER version)\n    Q_PROPERTY(QString notes MEMBER notes)\n    Q_PROPERTY(QString contributors MEMBER contributors)\n\npublic:\n    QString version;\n    QString notes;\n    QString contributors;\n};\n\nclass HashAndSaveFile : public QObject\n{\n    Q_OBJECT\npublic:\n    HashAndSaveFile();\n\npublic Q_SLOTS:\n    void hashAndSave(const QString &hash, QCryptographicHash::Algorithm a, const QString &saveFilePath,\n        QFile *tempFile, QNetworkReply *modelReply);\n\nQ_SIGNALS:\n    void hashAndSaveFinished(bool success, const QString &error,\n        QFile *tempFile, QNetworkReply *modelReply);\n\nprivate:\n    QThread m_hashAndSaveThread;\n};\n\nclass Download : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(bool hasNewerRelease READ hasNewerRelease NOTIFY hasNewerReleaseChanged)\n    Q_PROPERTY(ReleaseInfo releaseInfo READ releaseInfo NOTIFY releaseInfoChanged)\n    Q_PROPERTY(QString latestNews READ latestNews NOTIFY latestNewsChanged)\n\npublic:\n    static Download *globalInstance();\n\n    static std::strong_ordering compareAppVersions(const QString &a, const QString &b);\n    ReleaseInfo releaseInfo() const;\n    bool hasNewerRelease() const;\n    QString latestNews() const { return m_latestNews; }\n    Q_INVOKABLE void downloadModel(const QString &modelFile);\n    Q_INVOKABLE void cancelDownload(const QString &modelFile);\n    Q_INVOKABLE void installModel(const QString &modelFile, const QString &apiKey);\n    Q_INVOKABLE void installCompatibleModel(const QString &modelName, const QString &apiKey, const QString &baseUrl);\n    Q_INVOKABLE void removeModel(const QString &modelFile);\n    Q_INVOKABLE bool isFirstStart(bool writeVersion = false) const;\n\npublic Q_SLOTS:\n    void updateLatestNews();\n    void updateReleaseNotes();\n\nprivate Q_SLOTS:\n    void handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors);\n    void handleReleaseJsonDownloadFinished();\n    void handleLatestNewsDownloadFinished();\n    void handleErrorOccurred(QNetworkReply::NetworkError code);\n    void handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal);\n    void handleModelDownloadFinished();\n    void handleHashAndSaveFinished(bool success, const QString &error,\n        QFile *tempFile, QNetworkReply *modelReply);\n    void handleReadyRead();\n\nQ_SIGNALS:\n    void releaseInfoChanged();\n    void hasNewerReleaseChanged();\n    void requestHashAndSave(const QString &hash, QCryptographicHash::Algorithm a, const QString &saveFilePath,\n        QFile *tempFile, QNetworkReply *modelReply);\n    void latestNewsChanged();\n    void toastMessage(const QString &message);\n\nprivate:\n    void parseReleaseJsonFile(const QByteArray &jsonData);\n    QString incompleteDownloadPath(const QString &modelFile);\n    bool hasRetry(const QString &filename) const;\n    bool shouldRetry(const QString &filename);\n    void clearRetry(const QString &filename);\n\n    HashAndSaveFile *m_hashAndSave;\n    QMap<QString, ReleaseInfo> m_releaseMap;\n    QString m_latestNews;\n    QNetworkAccessManager m_networkManager;\n    QMap<QNetworkReply*, QFile*> m_activeDownloads;\n    QHash<QString, int> m_activeRetries;\n    QDateTime m_startTime;\n\nprivate:\n    explicit Download();\n    ~Download() {}\n    friend class MyDownload;\n};\n\n#endif // DOWNLOAD_H\n"
  },
  {
    "path": "gpt4all-chat/src/embllm.cpp",
    "content": "#include \"embllm.h\"\n\n#include \"mysettings.h\"\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QCoreApplication>\n#include <QDebug>\n#include <QFileInfo>\n#include <QGuiApplication>\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QList>\n#include <QMutexLocker> // IWYU pragma: keep\n#include <QNetworkAccessManager>\n#include <QNetworkReply>\n#include <QNetworkRequest>\n#include <QUrl>\n#include <Qt>\n#include <QtAssert>\n#include <QtLogging>\n\n#include <exception>\n#include <string>\n#include <utility>\n#include <vector>\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nstatic const QString EMBEDDING_MODEL_NAME = u\"nomic-embed-text-v1.5\"_s;\nstatic const QString LOCAL_EMBEDDING_MODEL = u\"nomic-embed-text-v1.5.f16.gguf\"_s;\n\nEmbeddingLLMWorker::EmbeddingLLMWorker()\n    : QObject(nullptr)\n    , m_networkManager(new QNetworkAccessManager(this))\n    , m_stopGenerating(false)\n{\n    moveToThread(&m_workerThread);\n    connect(this, &EmbeddingLLMWorker::requestAtlasQueryEmbedding, this, &EmbeddingLLMWorker::atlasQueryEmbeddingRequested);\n    connect(this, &EmbeddingLLMWorker::finished, &m_workerThread, &QThread::quit, Qt::DirectConnection);\n    m_workerThread.setObjectName(\"embedding\");\n    m_workerThread.start();\n}\n\nEmbeddingLLMWorker::~EmbeddingLLMWorker()\n{\n    m_stopGenerating = true;\n    m_workerThread.quit();\n    m_workerThread.wait();\n\n    if (m_model) {\n        delete m_model;\n        m_model = nullptr;\n    }\n}\n\nvoid EmbeddingLLMWorker::wait()\n{\n    m_workerThread.wait();\n}\n\nbool EmbeddingLLMWorker::loadModel()\n{\n    constexpr int n_ctx = 2048;\n\n    m_nomicAPIKey.clear();\n    m_model = nullptr;\n\n    // TODO(jared): react to setting changes without restarting\n\n    if (MySettings::globalInstance()->localDocsUseRemoteEmbed()) {\n        m_nomicAPIKey = MySettings::globalInstance()->localDocsNomicAPIKey();\n        return true;\n    }\n\n#ifdef Q_OS_DARWIN\n    static const QString embPathFmt = u\"%1/../Resources/%2\"_s;\n#else\n    static const QString embPathFmt = u\"%1/../resources/%2\"_s;\n#endif\n\n    QString filePath = embPathFmt.arg(QCoreApplication::applicationDirPath(), LOCAL_EMBEDDING_MODEL);\n    if (!QFileInfo::exists(filePath)) {\n        qWarning() << \"embllm WARNING: Local embedding model not found\";\n        return false;\n    }\n\n    QString requestedDevice = MySettings::globalInstance()->localDocsEmbedDevice();\n    std::string backend = \"auto\";\n#ifdef Q_OS_MAC\n    if (requestedDevice == \"Auto\" || requestedDevice == \"CPU\")\n        backend = \"cpu\";\n#else\n    if (requestedDevice.startsWith(\"CUDA: \"))\n        backend = \"cuda\";\n#endif\n\n    try {\n        m_model = LLModel::Implementation::construct(filePath.toStdString(), backend, n_ctx);\n    } catch (const std::exception &e) {\n        qWarning() << \"embllm WARNING: Could not load embedding model:\" << e.what();\n        return false;\n    }\n\n    bool actualDeviceIsCPU = true;\n\n#if defined(Q_OS_MAC) && defined(__aarch64__)\n    if (m_model->implementation().buildVariant() == \"metal\")\n        actualDeviceIsCPU = false;\n#else\n    if (requestedDevice != \"CPU\") {\n        const LLModel::GPUDevice *device = nullptr;\n        std::vector<LLModel::GPUDevice> availableDevices = m_model->availableGPUDevices(0);\n        if (requestedDevice != \"Auto\") {\n            // Use the selected device\n            for (const LLModel::GPUDevice &d : availableDevices) {\n                if (QString::fromStdString(d.selectionName()) == requestedDevice) {\n                    device = &d;\n                    break;\n                }\n            }\n        }\n\n        std::string unavail_reason;\n        if (!device) {\n            // GPU not available\n        } else if (!m_model->initializeGPUDevice(device->index, &unavail_reason)) {\n            qWarning().noquote() << \"embllm WARNING: Did not use GPU:\" << QString::fromStdString(unavail_reason);\n        } else {\n            actualDeviceIsCPU = false;\n        }\n    }\n#endif\n\n    bool success = m_model->loadModel(filePath.toStdString(), n_ctx, 100);\n\n    // CPU fallback\n    if (!actualDeviceIsCPU && !success) {\n        // llama_init_from_file returned nullptr\n        qWarning() << \"embllm WARNING: Did not use GPU: GPU loading failed (out of VRAM?)\";\n\n        if (backend == \"cuda\") {\n            // For CUDA, make sure we don't use the GPU at all - ngl=0 still offloads matmuls\n            try {\n                m_model = LLModel::Implementation::construct(filePath.toStdString(), \"auto\", n_ctx);\n            } catch (const std::exception &e) {\n                qWarning() << \"embllm WARNING: Could not load embedding model:\" << e.what();\n                return false;\n            }\n        }\n\n        success = m_model->loadModel(filePath.toStdString(), n_ctx, 0);\n    }\n\n    if (!success) {\n        qWarning() << \"embllm WARNING: Could not load embedding model\";\n        delete m_model;\n        m_model = nullptr;\n        return false;\n    }\n\n    if (!m_model->supportsEmbedding()) {\n        qWarning() << \"embllm WARNING: Model type does not support embeddings\";\n        delete m_model;\n        m_model = nullptr;\n        return false;\n    }\n\n    // FIXME(jared): the user may want this to take effect without having to restart\n    int n_threads = MySettings::globalInstance()->threadCount();\n    m_model->setThreadCount(n_threads);\n\n    return true;\n}\n\nstd::vector<float> EmbeddingLLMWorker::generateQueryEmbedding(const QString &text)\n{\n    {\n        QMutexLocker locker(&m_mutex);\n\n        if (!hasModel() && !loadModel()) {\n            qWarning() << \"WARNING: Could not load model for embeddings\";\n            return {};\n        }\n\n        if (!isNomic()) {\n            std::vector<float> embedding(m_model->embeddingSize());\n\n            try {\n                m_model->embed({text.toStdString()}, embedding.data(), /*isRetrieval*/ true);\n            } catch (const std::exception &e) {\n                qWarning() << \"WARNING: LLModel::embed failed:\" << e.what();\n                return {};\n            }\n\n            return embedding;\n        }\n    }\n\n    EmbeddingLLMWorker worker;\n    emit worker.requestAtlasQueryEmbedding(text);\n    worker.wait();\n    return worker.lastResponse();\n}\n\nvoid EmbeddingLLMWorker::sendAtlasRequest(const QStringList &texts, const QString &taskType, const QVariant &userData)\n{\n    QJsonObject root;\n    root.insert(\"model\", \"nomic-embed-text-v1\");\n    root.insert(\"texts\", QJsonArray::fromStringList(texts));\n    root.insert(\"task_type\", taskType);\n\n    QJsonDocument doc(root);\n\n    QUrl nomicUrl(\"https://api-atlas.nomic.ai/v1/embedding/text\");\n    const QString authorization = u\"Bearer %1\"_s.arg(m_nomicAPIKey).trimmed();\n    QNetworkRequest request(nomicUrl);\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n    request.setRawHeader(\"Authorization\", authorization.toUtf8());\n    request.setAttribute(QNetworkRequest::User, userData);\n    QNetworkReply *reply = m_networkManager->post(request, doc.toJson(QJsonDocument::Compact));\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n    connect(reply, &QNetworkReply::finished, this, &EmbeddingLLMWorker::handleFinished);\n}\n\nvoid EmbeddingLLMWorker::atlasQueryEmbeddingRequested(const QString &text)\n{\n    {\n        QMutexLocker locker(&m_mutex);\n        if (!hasModel() && !loadModel()) {\n            qWarning() << \"WARNING: Could not load model for embeddings\";\n            return;\n        }\n\n        if (!isNomic()) {\n            qWarning() << \"WARNING: Request to generate sync embeddings for local model invalid\";\n            return;\n        }\n\n        Q_ASSERT(hasModel());\n    }\n\n    sendAtlasRequest({text}, \"search_query\");\n}\n\nvoid EmbeddingLLMWorker::docEmbeddingsRequested(const QVector<EmbeddingChunk> &chunks)\n{\n    if (m_stopGenerating)\n        return;\n\n    bool isNomic;\n    {\n        QMutexLocker locker(&m_mutex);\n        if (!hasModel() && !loadModel()) {\n            qWarning() << \"WARNING: Could not load model for embeddings\";\n            return;\n        }\n\n        isNomic = this->isNomic();\n    }\n\n    if (!isNomic) {\n        QVector<EmbeddingResult> results;\n        results.reserve(chunks.size());\n        std::vector<std::string> texts;\n        texts.reserve(chunks.size());\n        for (const auto &c: chunks) {\n            EmbeddingResult result;\n            result.model = c.model;\n            result.folder_id = c.folder_id;\n            result.chunk_id = c.chunk_id;\n            result.embedding.resize(m_model->embeddingSize());\n            results << result;\n            texts.push_back(c.chunk.toStdString());\n        }\n\n        constexpr int BATCH_SIZE = 4;\n        std::vector<float> result;\n        result.resize(chunks.size() * m_model->embeddingSize());\n        for (int j = 0; j < chunks.size(); j += BATCH_SIZE) {\n            QMutexLocker locker(&m_mutex);\n            std::vector batchTexts(texts.begin() + j, texts.begin() + std::min(j + BATCH_SIZE, int(texts.size())));\n            try {\n                m_model->embed(batchTexts, result.data() + j * m_model->embeddingSize(), /*isRetrieval*/ false);\n            } catch (const std::exception &e) {\n                qWarning() << \"WARNING: LLModel::embed failed:\" << e.what();\n                return;\n            }\n        }\n        for (int i = 0; i < chunks.size(); i++)\n            memcpy(results[i].embedding.data(), &result[i * m_model->embeddingSize()], m_model->embeddingSize() * sizeof(float));\n\n        emit embeddingsGenerated(results);\n        return;\n    };\n\n    QStringList texts;\n    for (auto &c: chunks)\n        texts.append(c.chunk);\n    sendAtlasRequest(texts, \"search_document\", QVariant::fromValue(chunks));\n}\n\nstd::vector<float> jsonArrayToVector(const QJsonArray &jsonArray)\n{\n    std::vector<float> result;\n\n    for (const auto &innerValue: jsonArray) {\n        if (innerValue.isArray()) {\n            QJsonArray innerArray = innerValue.toArray();\n            result.reserve(result.size() + innerArray.size());\n            for (const auto &value: innerArray) {\n                result.push_back(static_cast<float>(value.toDouble()));\n            }\n        }\n    }\n\n    return result;\n}\n\nQVector<EmbeddingResult> jsonArrayToEmbeddingResults(const QVector<EmbeddingChunk>& chunks, const QJsonArray& embeddings)\n{\n    QVector<EmbeddingResult> results;\n\n    if (chunks.size() != embeddings.size()) {\n        qWarning() << \"WARNING: Size of json array result does not match input!\";\n        return results;\n    }\n\n    for (int i = 0; i < chunks.size(); ++i) {\n        const EmbeddingChunk& chunk = chunks.at(i);\n        const QJsonArray embeddingArray = embeddings.at(i).toArray();\n\n        std::vector<float> embeddingVector;\n        for (const auto &value: embeddingArray)\n            embeddingVector.push_back(static_cast<float>(value.toDouble()));\n\n        EmbeddingResult result;\n        result.model = chunk.model;\n        result.folder_id = chunk.folder_id;\n        result.chunk_id = chunk.chunk_id;\n        result.embedding = std::move(embeddingVector);\n        results.push_back(std::move(result));\n    }\n\n    return results;\n}\n\nvoid EmbeddingLLMWorker::handleFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n\n    QVariant retrievedData = reply->request().attribute(QNetworkRequest::User);\n    QVector<EmbeddingChunk> chunks;\n    if (retrievedData.isValid() && retrievedData.canConvert<QVector<EmbeddingChunk>>())\n        chunks = retrievedData.value<QVector<EmbeddingChunk>>();\n\n    QVariant response;\n    if (reply->error() != QNetworkReply::NoError) {\n        response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n        Q_ASSERT(response.isValid());\n    }\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok || code != 200) {\n        QString errorDetails;\n        QString replyErrorString = reply->errorString().trimmed();\n        QByteArray replyContent = reply->readAll().trimmed();\n        errorDetails = u\"ERROR: Nomic Atlas responded with error code \\\"%1\\\"\"_s.arg(code);\n        if (!replyErrorString.isEmpty())\n            errorDetails += u\". Error Details: \\\"%1\\\"\"_s.arg(replyErrorString);\n        if (!replyContent.isEmpty())\n            errorDetails += u\". Response Content: \\\"%1\\\"\"_s.arg(QString::fromUtf8(replyContent));\n        qWarning() << errorDetails;\n        emit errorGenerated(chunks, errorDetails);\n        return;\n    }\n\n    QByteArray jsonData = reply->readAll();\n\n    QJsonParseError err;\n    QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);\n    if (err.error != QJsonParseError::NoError) {\n        qWarning() << \"ERROR: Couldn't parse Nomic Atlas response:\" << jsonData << err.errorString();\n        return;\n    }\n\n    const QJsonObject root = document.object();\n    const QJsonArray embeddings = root.value(\"embeddings\").toArray();\n\n    if (!chunks.isEmpty()) {\n        emit embeddingsGenerated(jsonArrayToEmbeddingResults(chunks, embeddings));\n    } else {\n        m_lastResponse = jsonArrayToVector(embeddings);\n        emit finished();\n    }\n\n    reply->deleteLater();\n}\n\nEmbeddingLLM::EmbeddingLLM()\n    : QObject(nullptr)\n    , m_embeddingWorker(new EmbeddingLLMWorker)\n{\n    connect(this, &EmbeddingLLM::requestDocEmbeddings, m_embeddingWorker,\n        &EmbeddingLLMWorker::docEmbeddingsRequested, Qt::QueuedConnection);\n    connect(m_embeddingWorker, &EmbeddingLLMWorker::embeddingsGenerated, this,\n        &EmbeddingLLM::embeddingsGenerated, Qt::QueuedConnection);\n    connect(m_embeddingWorker, &EmbeddingLLMWorker::errorGenerated, this,\n        &EmbeddingLLM::errorGenerated, Qt::QueuedConnection);\n}\n\nEmbeddingLLM::~EmbeddingLLM()\n{\n    delete m_embeddingWorker;\n    m_embeddingWorker = nullptr;\n}\n\nQString EmbeddingLLM::model()\n{\n    return EMBEDDING_MODEL_NAME;\n}\n\n// TODO(jared): embed using all necessary embedding models given collection\nstd::vector<float> EmbeddingLLM::generateQueryEmbedding(const QString &text)\n{\n    return m_embeddingWorker->generateQueryEmbedding(text);\n}\n\nvoid EmbeddingLLM::generateDocEmbeddingsAsync(const QVector<EmbeddingChunk> &chunks)\n{\n    emit requestDocEmbeddings(chunks);\n}\n"
  },
  {
    "path": "gpt4all-chat/src/embllm.h",
    "content": "#ifndef EMBLLM_H\n#define EMBLLM_H\n\n#include <QByteArray>\n#include <QMutex>\n#include <QObject>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QThread>\n#include <QVariant>\n#include <QVector> // IWYU pragma: keep\n\n#include <atomic>\n#include <vector>\n\nclass LLModel;\nclass QNetworkAccessManager;\n\n\nstruct EmbeddingChunk {\n    QString model; // TODO(jared): use to select model\n    int folder_id;\n    int chunk_id;\n    QString chunk;\n};\n\nQ_DECLARE_METATYPE(EmbeddingChunk)\n\nstruct EmbeddingResult {\n    QString model;\n    int folder_id;\n    int chunk_id;\n    std::vector<float> embedding;\n};\n\nclass EmbeddingLLMWorker : public QObject {\n    Q_OBJECT\npublic:\n    EmbeddingLLMWorker();\n    ~EmbeddingLLMWorker() override;\n\n    void wait();\n\n    std::vector<float> lastResponse() const { return m_lastResponse; }\n\n    bool loadModel();\n    bool isNomic() const { return !m_nomicAPIKey.isEmpty(); }\n    bool hasModel() const { return isNomic() || m_model; }\n\n    std::vector<float> generateQueryEmbedding(const QString &text);\n\npublic Q_SLOTS:\n    void atlasQueryEmbeddingRequested(const QString &text);\n    void docEmbeddingsRequested(const QVector<EmbeddingChunk> &chunks);\n\nQ_SIGNALS:\n    void requestAtlasQueryEmbedding(const QString &text);\n    void embeddingsGenerated(const QVector<EmbeddingResult> &embeddings);\n    void errorGenerated(const QVector<EmbeddingChunk> &chunks, const QString &error);\n    void finished();\n\nprivate Q_SLOTS:\n    void handleFinished();\n\nprivate:\n    void sendAtlasRequest(const QStringList &texts, const QString &taskType, const QVariant &userData = {});\n\n    QString m_nomicAPIKey;\n    QNetworkAccessManager *m_networkManager;\n    std::vector<float> m_lastResponse;\n    LLModel *m_model = nullptr;\n    std::atomic<bool> m_stopGenerating;\n    QThread m_workerThread;\n    QMutex m_mutex; // guards m_model and m_nomicAPIKey\n};\n\nclass EmbeddingLLM : public QObject\n{\n    Q_OBJECT\npublic:\n    EmbeddingLLM();\n    ~EmbeddingLLM() override;\n\n    static QString model();\n    bool loadModel();\n    bool hasModel() const;\n\npublic Q_SLOTS:\n    std::vector<float> generateQueryEmbedding(const QString &text); // synchronous\n    void generateDocEmbeddingsAsync(const QVector<EmbeddingChunk> &chunks);\n\nQ_SIGNALS:\n    void requestDocEmbeddings(const QVector<EmbeddingChunk> &chunks);\n    void embeddingsGenerated(const QVector<EmbeddingResult> &embeddings);\n    void errorGenerated(const QVector<EmbeddingChunk> &chunks, const QString &error);\n\nprivate:\n    EmbeddingLLMWorker *m_embeddingWorker;\n};\n\n#endif // EMBLLM_H\n"
  },
  {
    "path": "gpt4all-chat/src/jinja_helpers.cpp",
    "content": "#include \"jinja_helpers.h\"\n\n#include <QString>\n#include <QUrl>\n\n#include <ranges>\n#include <string>\n#include <utility>\n\nnamespace views  = std::views;\nusing json = nlohmann::ordered_json;\n\n\njson::object_t JinjaResultInfo::AsJson() const\n{\n    return {\n        { \"collection\", m_source->collection.toStdString() },\n        { \"path\",       m_source->path      .toStdString() },\n        { \"file\",       m_source->file      .toStdString() },\n        { \"title\",      m_source->title     .toStdString() },\n        { \"author\",     m_source->author    .toStdString() },\n        { \"date\",       m_source->date      .toStdString() },\n        { \"text\",       m_source->text      .toStdString() },\n        { \"page\",       m_source->page                     },\n        { \"file_uri\",   m_source->fileUri() .toStdString() },\n    };\n}\n\njson::object_t JinjaPromptAttachment::AsJson() const\n{\n    return {\n        { \"url\",               m_attachment->url.toString()    .toStdString() },\n        { \"file\",              m_attachment->file()            .toStdString() },\n        { \"processed_content\", m_attachment->processedContent().toStdString() },\n    };\n}\n\njson::object_t JinjaMessage::AsJson() const\n{\n    json::object_t obj;\n    {\n        json::string_t role;\n        switch (m_item->type()) {\n            using enum MessageItem::Type;\n            case System:       role = \"system\";    break;\n            case Prompt:       role = \"user\";      break;\n            case Response:     role = \"assistant\"; break;\n            case ToolResponse: role = \"tool\";      break;\n        }\n        obj.emplace_back(\"role\", std::move(role));\n    }\n    {\n        QString content;\n        if (m_version == 0 && m_item->type() == MessageItem::Type::Prompt) {\n            content = m_item->bakedPrompt();\n        } else {\n            content = m_item->content();\n        }\n        obj.emplace_back(\"content\", content.toStdString());\n    }\n    if (m_item->type() == MessageItem::Type::Prompt) {\n        {\n            auto sources = m_item->sources() | views::transform([](auto &r) {\n                return JinjaResultInfo(r).AsJson();\n            });\n            obj.emplace(\"sources\", json::array_t(sources.begin(), sources.end()));\n        }\n        {\n            auto attachments = m_item->promptAttachments() | views::transform([](auto &pa) {\n                return JinjaPromptAttachment(pa).AsJson();\n            });\n            obj.emplace(\"prompt_attachments\", json::array_t(attachments.begin(), attachments.end()));\n        }\n    }\n    return obj;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/jinja_helpers.h",
    "content": "#pragma once\n\n#include \"chatmodel.h\"\n#include \"database.h\"\n\n#include <nlohmann/json.hpp>\n\n#include <QtTypes> // IWYU pragma: keep\n\n// IWYU pragma: no_forward_declare MessageItem\n// IWYU pragma: no_forward_declare PromptAttachment\n// IWYU pragma: no_forward_declare ResultInfo\n\nusing json = nlohmann::ordered_json;\n\n\ntemplate <typename Derived>\nclass JinjaHelper {\npublic:\n    json::object_t AsJson() const { return static_cast<const Derived *>(this)->AsJson(); }\n};\n\nclass JinjaResultInfo : public JinjaHelper<JinjaResultInfo> {\npublic:\n    explicit JinjaResultInfo(const ResultInfo &source) noexcept\n        : m_source(&source) {}\n\n    json::object_t AsJson() const;\n\nprivate:\n    const ResultInfo *m_source;\n};\n\nclass JinjaPromptAttachment : public JinjaHelper<JinjaPromptAttachment> {\npublic:\n    explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept\n        : m_attachment(&attachment) {}\n\n    json::object_t AsJson() const;\n\nprivate:\n    const PromptAttachment *m_attachment;\n};\n\nclass JinjaMessage : public JinjaHelper<JinjaMessage> {\npublic:\n    explicit JinjaMessage(uint version, const MessageItem &item) noexcept\n        : m_version(version), m_item(&item) {}\n\n    json::object_t AsJson() const;\n\nprivate:\n    uint               m_version;\n    const MessageItem *m_item;\n};\n"
  },
  {
    "path": "gpt4all-chat/src/jinja_replacements.cpp",
    "content": "// The map in this file is automatically generated by Jared. Do not hand-edit it.\n\n#include \"jinja_replacements.h\"\n\n#include <utility>\n\n\n// This is a list of prompt templates known to GPT4All and their associated replacements which are automatically used\n// instead when loading the chat template from GGUF. These exist for two primary reasons:\n// - HuggingFace model authors make ugly chat templates because they do not expect the end user to see them;\n// - and chat templates occasionally use features we do not support. This is less true now that we use minja.\n\n// The substitution list.\n\nconst std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS {\n    // calme-2.1-phi3.5-4b.Q6_K.gguf (reported by ThilotE on Discord), Phi-3.5-mini-instruct-Q4_0.gguf (nomic-ai/gpt4all#3345)\n    {\n        // original\n        R\"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {%- if message['role'] == 'system' and message['content'] %}\n        {{- '<|system|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- elif message['role'] == 'user' %}\n        {{- '<|user|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- elif message['role'] == 'assistant' %}\n        {{- '<|assistant|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|assistant|>\\n' }}\n{%- else %}\n    {{- eos_token }}\n{%- endif %})TEMPLATE\",\n    },\n    // DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<｜User｜>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<｜Assistant｜><｜tool▁calls▁begin｜><｜tool▁call▁begin｜>' + tool['type'] + '<｜tool▁sep｜>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<｜tool▁call▁end｜>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<｜tool▁call▁begin｜>' + tool['type'] + '<｜tool▁sep｜>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<｜tool▁call▁end｜>'}}{{'<｜tool▁calls▁end｜><｜end▁of▁sentence｜>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<｜tool▁outputs▁end｜>' + message['content'] + '<｜end▁of▁sentence｜>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<｜tool▁outputs▁begin｜><｜tool▁output▁begin｜>' + message['content'] + '<｜tool▁output▁end｜>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<｜tool▁output▁begin｜>' + message['content'] + '<｜tool▁output▁end｜>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<｜tool▁outputs▁end｜>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<｜Assistant｜>'}}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- if not add_generation_prompt is defined %}\n    {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n    {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n    {%- if message['role'] == 'user' %}\n        {{- '<｜User｜>' + message['content'] }}\n    {%- endif %}\n    {%- if message['role'] == 'assistant' %}\n        {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n        {{- '<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>' }}\n    {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n    {{- '<｜Assistant｜>' }}\n{%- endif %})TEMPLATE\",\n    },\n    // gemma-2-9b-it-Q4_0.gguf (nomic-ai/gpt4all#3282)\n    {\n        // original\n        R\"TEMPLATE({{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- if messages[0]['role'] == 'system' %}\n    {{- raise_exception('System role not supported') }}\n{%- endif %}\n{%- for message in messages %}\n    {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n        {{- raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n    {%- endif %}\n    {%- if message['role'] == 'assistant' %}\n        {%- set role = 'model' %}\n    {%- else %}\n        {%- set role = message['role'] %}\n    {%- endif %}\n    {{- '<start_of_turn>' + role + '\\n' + message['content'] | trim + '<end_of_turn>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<start_of_turn>model\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // ghost-7b-v0.9.1-Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {%- if message['role'] == 'user' %}\n        {{- '<|user|>\\n' + message['content'] + eos_token }}\n    {%- elif message['role'] == 'system' %}\n        {{- '<|system|>\\n' + message['content'] + eos_token }}\n    {%- elif message['role'] == 'assistant' %}\n        {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n    {%- endif %}\n    {%- if loop.last and add_generation_prompt %}\n        {{- '<|assistant|>' }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n    },\n    // granite-3.1-3b-a800m-instruct-Q4_0.gguf, granite-3.1-8b-instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)\n    {\n        // original\n        R\"TEMPLATE({%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content'] %}{%- set loop_messages = messages[1:] %}{%- else %}{%- set system_message = \"Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM.\" %}{%- if tools and documents %}{%- set system_message = system_message + \" You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request. Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}{%- elif tools %}{%- set system_message = system_message + \" You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}{%- elif documents %}{%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}{%- else %}{%- set system_message = system_message + \" You are a helpful AI assistant.\" %}{%- endif %}{%- if controls and 'citations' in controls and documents %}{%- set system_message = system_message + ' In your response, use the symbols <co> and </co> to indicate when a fact comes from a document in the search result, e.g <co>0</co> for a fact from document 0. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}{%- endif %}{%- if controls and 'hallucinations' in controls and documents %}{%- set system_message = system_message + ' Finally, after the response is written, include a numbered list of sentences from the response that are potentially hallucinated and not based in the documents.' %}{%- endif %}{%- set loop_messages = messages %}{%- endif %}{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}{%- if tools %}{{- '<|start_of_role|>tools<|end_of_role|>' }}{{- tools | tojson(indent=4) }}{{- '<|end_of_text|> ' }}{%- endif %}{%- if documents %}{{- '<|start_of_role|>documents<|end_of_role|>' }}{%- for document in documents %}{{- 'Document ' + loop.index0 | string + ' ' }}{{- document['text'] }}{%- if not loop.last %}{{- ' '}}{%- endif%}{%- endfor %}{{- '<|end_of_text|> ' }}{%- endif %}{%- for message in loop_messages %}{{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}{%- if loop.last and add_generation_prompt %}{{- '<|start_of_role|>assistant' }}{%- if controls %}{{- ' ' + controls | tojson()}}{%- endif %}{{- '<|end_of_role|>' }}{%- endif %}{%- endfor %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content'] %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = \"Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM. You are a helpful AI assistant.\" %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}\n{%- for message in loop_messages %}\n    {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}\n    {%- if loop.last and add_generation_prompt %}\n        {{- '<|start_of_role|>assistant<|end_of_role|>' }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n    },\n    // Hermes-3-Llama-3.2-3B.Q4_0.gguf, mistral-7b-openorca.gguf2.Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // Llama-3.2-1B-Instruct-Q4_0.gguf, Llama-3.2-3B-Instruct-Q4_0.gguf, SummLlama3.2-3B-Q4_0.gguf (nomic-ai/gpt4all#3309)\n    {\n        // original\n        R\"TEMPLATE({{- bos_token }}\n{%- if custom_tools is defined %}\n    {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n    {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n    {%- if strftime_now is defined %}\n        {%- set date_string = strftime_now(\"%d %b %Y\") %}\n    {%- else %}\n        {%- set date_string = \"26 Jul 2024\" %}\n    {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n    {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content']|trim %}\n    {%- set messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n    {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n    {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n    {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n    {{- \"Do not use variables.\\n\\n\" }}\n    {%- for t in tools %}\n        {{- t | tojson(indent=4) }}\n        {{- \"\\n\\n\" }}\n    {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n    {#- Extract the first user message so we can plug it in here #}\n    {%- if messages | length != 0 %}\n        {%- set first_user_message = messages[0]['content']|trim %}\n        {%- set messages = messages[1:] %}\n    {%- else %}\n        {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n    {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n    {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n    {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n    {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n    {{- \"Do not use variables.\\n\\n\" }}\n    {%- for t in tools %}\n        {{- t | tojson(indent=4) }}\n        {{- \"\\n\\n\" }}\n    {%- endfor %}\n    {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n    {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n    {%- elif 'tool_calls' in message %}\n        {%- if not message.tool_calls|length == 1 %}\n            {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n        {%- endif %}\n        {%- set tool_call = message.tool_calls[0].function %}\n        {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n        {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n        {{- '\"parameters\": ' }}\n        {{- tool_call.arguments | tojson }}\n        {{- \"}\" }}\n        {{- \"<|eot_id|>\" }}\n    {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n        {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n        {%- if message.content is mapping or message.content is iterable %}\n            {{- message.content | tojson }}\n        {%- else %}\n            {{- message.content }}\n        {%- endif %}\n        {{- \"<|eot_id|>\" }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content'] | trim %}\n    {%- set loop_start = 1 %}\n{%- else %}\n    {%- set system_message = '' %}\n    {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n    {%- if loop.index0 >= loop_start %}\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // Llama-3.3-70B-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3305)\n    {\n        // original\n        R\"TEMPLATE({{- bos_token }}\n{%- if custom_tools is defined %}\n    {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n    {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n    {%- set date_string = \"26 Jul 2024\" %}\n{%- endif %}\n{%- if not tools is defined %}\n    {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content']|trim %}\n    {%- set messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message + builtin tools #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if builtin_tools is defined or tools is not none %}\n    {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{%- if builtin_tools is defined %}\n    {{- \"Tools: \" + builtin_tools | reject('equalto', 'code_interpreter') | join(\", \") + \"\\n\\n\"}}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n    {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n    {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n    {{- \"Do not use variables.\\n\\n\" }}\n    {%- for t in tools %}\n        {{- t | tojson(indent=4) }}\n        {{- \"\\n\\n\" }}\n    {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n    {#- Extract the first user message so we can plug it in here #}\n    {%- if messages | length != 0 %}\n        {%- set first_user_message = messages[0]['content']|trim %}\n        {%- set messages = messages[1:] %}\n    {%- else %}\n        {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n    {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n    {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n    {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n    {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n    {{- \"Do not use variables.\\n\\n\" }}\n    {%- for t in tools %}\n        {{- t | tojson(indent=4) }}\n        {{- \"\\n\\n\" }}\n    {%- endfor %}\n    {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n    {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n    {%- elif 'tool_calls' in message %}\n        {%- if not message.tool_calls|length == 1 %}\n            {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n        {%- endif %}\n        {%- set tool_call = message.tool_calls[0].function %}\n        {%- if builtin_tools is defined and tool_call.name in builtin_tools %}\n            {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n            {{- \"<|python_tag|>\" + tool_call.name + \".call(\" }}\n            {%- for arg_name, arg_val in tool_call.arguments | items %}\n                {{- arg_name + '=\"' + arg_val + '\"' }}\n                {%- if not loop.last %}\n                    {{- \", \" }}\n                {%- endif %}\n                {%- endfor %}\n            {{- \")\" }}\n        {%- else  %}\n            {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n            {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n            {{- '\"parameters\": ' }}\n            {{- tool_call.arguments | tojson }}\n            {{- \"}\" }}\n        {%- endif %}\n        {%- if builtin_tools is defined %}\n            {#- This means we're in ipython mode #}\n            {{- \"<|eom_id|>\" }}\n        {%- else %}\n            {{- \"<|eot_id|>\" }}\n        {%- endif %}\n    {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n        {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n        {%- if message.content is mapping or message.content is iterable %}\n            {{- message.content | tojson }}\n        {%- else %}\n            {{- message.content }}\n        {%- endif %}\n        {{- \"<|eot_id|>\" }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content'] | trim %}\n    {%- set loop_start = 1 %}\n{%- else %}\n    {%- set system_message = '' %}\n    {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n    {%- if loop.index0 >= loop_start %}\n        {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // Llama3-DiscoLeo-Instruct-8B-32k-v0.1-Q4_0.gguf (nomic-ai/gpt4all#3347)\n    {\n        // original\n        R\"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' %}\n    {%- if loop.index0 == 0 %}\n        {%- set content = bos_token + content %}\n    {%- endif %}\n    {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n    {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n    {%- if loop.index0 == 0 %}\n        {%- set content = bos_token + content %}\n    {%- endif %}\n    {{- content }}\n{%- endfor %}\n{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }})TEMPLATE\",\n    },\n    // Meta-Llama-3-8B-Instruct.Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n    {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n    {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // Mistral-Nemo-Instruct-2407-Q4_0.gguf (nomic-ai/gpt4all#3284)\n    {\n        // original\n        R\"TEMPLATE({%- if messages[0][\"role\"] == \"system\" %}\n    {%- set system_message = messages[0][\"content\"] %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n    {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n    {%- if not (message.role == \"tool\" or message.role == \"tool_results\" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n        {%- if (message[\"role\"] == \"user\") != (ns.index % 2 == 0) %}\n            {{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}\n        {%- endif %}\n        {%- set ns.index = ns.index + 1 %}\n    {%- endif %}\n{%- endfor %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n    {%- if message[\"role\"] == \"user\" %}\n        {%- if tools is not none and (message == user_messages[-1]) %}\n            {{- \"[AVAILABLE_TOOLS][\" }}\n            {%- for tool in tools %}\n                {%- set tool = tool.function %}\n                {{- '{\"type\": \"function\", \"function\": {' }}\n                {%- for key, val in tool.items() if key != \"return\" %}\n                    {%- if val is string %}\n                        {{- '\"' + key + '\": \"' + val + '\"' }}\n                    {%- else %}\n                        {{- '\"' + key + '\": ' + val|tojson }}\n                    {%- endif %}\n                    {%- if not loop.last %}\n                        {{- \", \" }}\n                    {%- endif %}\n                {%- endfor %}\n                {{- \"}}\" }}\n                {%- if not loop.last %}\n                    {{- \", \" }}\n                {%- else %}\n                    {{- \"]\" }}\n                {%- endif %}\n            {%- endfor %}\n            {{- \"[/AVAILABLE_TOOLS]\" }}\n            {%- endif %}\n        {%- if loop.last and system_message is defined %}\n            {{- \"[INST]\" + system_message + \"\\n\\n\" + message[\"content\"] + \"[/INST]\" }}\n        {%- else %}\n            {{- \"[INST]\" + message[\"content\"] + \"[/INST]\" }}\n        {%- endif %}\n    {%- elif (message.tool_calls is defined and message.tool_calls is not none) %}\n        {{- \"[TOOL_CALLS][\" }}\n        {%- for tool_call in message.tool_calls %}\n            {%- set out = tool_call.function|tojson %}\n            {{- out[:-1] }}\n            {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n                {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n            {%- endif %}\n            {{- ', \"id\": \"' + tool_call.id + '\"}' }}\n            {%- if not loop.last %}\n                {{- \", \" }}\n            {%- else %}\n                {{- \"]\" + eos_token }}\n            {%- endif %}\n        {%- endfor %}\n    {%- elif message[\"role\"] == \"assistant\" %}\n        {{- message[\"content\"] + eos_token}}\n    {%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}\n        {%- if message.content is defined and message.content.content is defined %}\n            {%- set content = message.content.content %}\n        {%- else %}\n            {%- set content = message.content %}\n        {%- endif %}\n        {{- '[TOOL_RESULTS]{\"content\": ' + content|string + \", \" }}\n        {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n            {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n        {%- endif %}\n        {{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}\n    {%- else %}\n        {{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- if messages[0]['role'] == 'system' %}\n    {%- set system_message = messages[0]['content'] %}\n    {%- set loop_start = 1 %}\n{%- else %}\n    {%- set loop_start = 0 %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in messages %}\n    {#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n    {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n        {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n    {%- endif %}\n\n    {%- if message['role'] == 'user' %}\n        {%- if loop.last and loop_start == 1 %}\n            {{- '[INST]' + system_message + '\\n\\n' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- endif %}\n    {%- elif message['role'] == 'assistant' %}\n        {{- message['content'] + eos_token }}\n    {%- else %}\n        {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n    },\n    // Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // occiglot-7b-de-en-instruct.Q4_0.gguf (nomic-ai/gpt4all#3283)\n    {\n        // original\n        R\"TEMPLATE({{'<s>'}}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- if messages[0]['role'] == 'system' %}\n    {%- set loop_start = 1 %}\n    {%- set system_message = messages[0]['content'] %}\n{%- else %}\n    {%- set loop_start = 0 %}\n    {%- set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}\n{%- endif %}\n{{- '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}\n{%- for message in messages %}\n    {%- if loop.index0 >= loop_start %}\n        {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n    // OLMoE-1B-7B-0125-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)\n    {\n        // original\n        R\"TEMPLATE({{ bos_token }}{% for message in messages %}{% if message['role'] == 'system' %}{{ '<|system|>\n' + message['content'] + '\n' }}{% elif message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '\n' }}{% elif message['role'] == 'assistant' %}{% if not loop.last %}{{ '<|assistant|>\n'  + message['content'] + eos_token + '\n' }}{% else %}{{ '<|assistant|>\n'  + message['content'] + eos_token }}{% endif %}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}{% endfor %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- for message in messages %}\n    {%- if message['role'] == 'system' %}\n        {{- '<|system|>\\n' + message['content'] + '\\n' }}\n    {%- elif message['role'] == 'user' %}\n        {{- '<|user|>\\n' + message['content'] + '\\n' }}\n    {%- elif message['role'] == 'assistant' %}\n        {%- if not loop.last %}\n            {{- '<|assistant|>\\n' + message['content'] + eos_token + '\\n' }}\n        {%- else %}\n            {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n        {%- endif %}\n    {%- endif %}\n    {%- if loop.last and add_generation_prompt %}\n        {{- '<|assistant|>\\n' }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n    },\n    // OLMoE-1B-7B-0924-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)\n    {\n        // original\n        R\"TEMPLATE({{ bos_token }}{% for message in messages %}\n{% if message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] }}\n{% elif message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- for message in messages %}\n    {%- if message['role'] == 'system' %}\n        {{- '<|system|>\\n' + message['content'] }}\n    {%- elif message['role'] == 'user' %}\n        {{- '<|user|>\\n' + message['content'] }}\n    {%- elif message['role'] == 'assistant' %}\n        {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n    {%- endif %}\n    {%- if loop.last and add_generation_prompt %}\n        {{- '<|assistant|>' }}\n    {%- endif %}\n{%- endfor %})TEMPLATE\",\n    },\n    // Phi-3.1-mini-128k-instruct-Q4_0.gguf (nomic-ai/gpt4all#3346)\n    {\n        // original\n        R\"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {%- if message['role'] == 'system' %}\n        {{- '<|system|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- elif message['role'] == 'user' %}\n        {{- '<|user|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- elif message['role'] == 'assistant' %}\n        {{- '<|assistant|>\\n' + message['content'] + '<|end|>\\n' }}\n    {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|assistant|>\\n' }}\n{%- else %}\n    {{- eos_token }}\n{%- endif %})TEMPLATE\",\n    },\n    // Phi-3-mini-4k-instruct.Q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({{- bos_token }}\n{%- for message in messages %}\n    {{- '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|assistant|>\\n' }}\n{%- else %}\n    {{- eos_token }}\n{%- endif %})TEMPLATE\",\n    },\n    // qwen2-1_5b-instruct-q4_0.gguf (nomic-ai/gpt4all#3263), qwen2-72b-instruct-q4_0.gguf\n    {\n        // original\n        R\"TEMPLATE({% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %})TEMPLATE\",\n        // replacement\n        R\"TEMPLATE({%- for message in messages %}\n    {%- if loop.first and messages[0]['role'] != 'system' %}\n        {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n    {%- endif %}\n    {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n    {{- '<|im_start|>assistant\\n' }}\n{%- endif %})TEMPLATE\",\n    },\n};\n"
  },
  {
    "path": "gpt4all-chat/src/jinja_replacements.h",
    "content": "#pragma once\n\n#include <string_view>\n#include <unordered_map>\n\nextern const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS;\n"
  },
  {
    "path": "gpt4all-chat/src/llm.cpp",
    "content": "#include \"llm.h\"\n\n#include <gpt4all-backend/llmodel.h>\n#include <gpt4all-backend/sysinfo.h>\n\n#include <QCoreApplication>\n#include <QDebug>\n#include <QFileInfo>\n#include <QGlobalStatic>\n#include <QNetworkInformation>\n#include <QProcess>\n#include <QSettings>\n#include <QUrl>\n#include <QtLogging>\n#include <QtSystemDetection>\n\n#include <string>\n\n#ifdef GPT4ALL_OFFLINE_INSTALLER\n#   include <QDesktopServices>\n#else\n#   include \"network.h\"\n#endif\n\n#ifdef Q_OS_MAC\n#include \"macosdock.h\"\n#endif\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nclass MyLLM: public LLM { };\nQ_GLOBAL_STATIC(MyLLM, llmInstance)\nLLM *LLM::globalInstance()\n{\n    return llmInstance();\n}\n\nLLM::LLM()\n    : QObject{nullptr}\n    , m_compatHardware(LLModel::Implementation::hasSupportedCPU())\n{\n    QNetworkInformation::loadDefaultBackend();\n    auto * netinfo = QNetworkInformation::instance();\n    if (netinfo) {\n        connect(netinfo, &QNetworkInformation::reachabilityChanged,\n            this, &LLM::isNetworkOnlineChanged);\n    }\n}\n\nbool LLM::hasSettingsAccess() const\n{\n    QSettings settings;\n    settings.sync();\n    return settings.status() == QSettings::NoError;\n}\n\nbool LLM::checkForUpdates() const\n{\n#ifdef GPT4ALL_OFFLINE_INSTALLER\n#   pragma message(__FILE__ \": WARNING: offline installer build will not check for updates!\")\n    return QDesktopServices::openUrl(QUrl(\"https://github.com/nomic-ai/gpt4all/releases\"));\n#else\n    Network::globalInstance()->trackEvent(\"check_for_updates\");\n\n#if defined(Q_OS_LINUX)\n    QString tool = u\"maintenancetool\"_s;\n#elif defined(Q_OS_WINDOWS)\n    QString tool = u\"maintenancetool.exe\"_s;\n#elif defined(Q_OS_DARWIN)\n    QString tool = u\"../../../maintenancetool.app/Contents/MacOS/maintenancetool\"_s;\n#endif\n\n    QString fileName = QCoreApplication::applicationDirPath()\n        + \"/../\" + tool;\n    if (!QFileInfo::exists(fileName)) {\n        qDebug() << \"Couldn't find tool at\" << fileName << \"so cannot check for updates!\";\n        return false;\n    }\n\n    return QProcess::startDetached(fileName);\n#endif\n}\n\nbool LLM::directoryExists(const QString &path)\n{\n    const QUrl url(path);\n    const QString localFilePath = url.isLocalFile() ? url.toLocalFile() : path;\n    const QFileInfo info(localFilePath);\n    return info.exists() && info.isDir();\n}\n\nbool LLM::fileExists(const QString &path)\n{\n    const QUrl url(path);\n    const QString localFilePath = url.isLocalFile() ? url.toLocalFile() : path;\n    const QFileInfo info(localFilePath);\n    return info.exists() && info.isFile();\n}\n\nqint64 LLM::systemTotalRAMInGB() const\n{\n    return getSystemTotalRAMInGB();\n}\n\nQString LLM::systemTotalRAMInGBString() const\n{\n    return QString::fromStdString(getSystemTotalRAMInGBString());\n}\n\nbool LLM::isNetworkOnline() const\n{\n    auto * netinfo = QNetworkInformation::instance();\n    return !netinfo || netinfo->reachability() == QNetworkInformation::Reachability::Online;\n}\n\nvoid LLM::showDockIcon() const\n{\n#ifdef Q_OS_MAC\n    MacOSDock::showIcon();\n#else\n    qt_noop();\n#endif\n}\n\nvoid LLM::hideDockIcon() const\n{\n#ifdef Q_OS_MAC\n    MacOSDock::hideIcon();\n#else\n    qt_noop();\n#endif\n}\n"
  },
  {
    "path": "gpt4all-chat/src/llm.h",
    "content": "#ifndef LLM_H\n#define LLM_H\n\n#include <QObject>\n#include <QString>\n#include <QtTypes>\n\n\nclass LLM : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(bool isNetworkOnline READ isNetworkOnline NOTIFY isNetworkOnlineChanged)\n\npublic:\n    static LLM *globalInstance();\n\n    Q_INVOKABLE bool hasSettingsAccess() const;\n    Q_INVOKABLE bool compatHardware() const { return m_compatHardware; }\n\n    Q_INVOKABLE bool checkForUpdates() const;\n    Q_INVOKABLE static bool directoryExists(const QString &path);\n    Q_INVOKABLE static bool fileExists(const QString &path);\n    Q_INVOKABLE qint64 systemTotalRAMInGB() const;\n    Q_INVOKABLE QString systemTotalRAMInGBString() const;\n    Q_INVOKABLE bool isNetworkOnline() const;\n\n    Q_INVOKABLE void showDockIcon() const;\n    Q_INVOKABLE void hideDockIcon() const;\n\nQ_SIGNALS:\n    void isNetworkOnlineChanged();\n\nprivate:\n    bool m_compatHardware;\n\nprivate:\n    explicit LLM();\n    ~LLM() {}\n    friend class MyLLM;\n};\n\n#endif // LLM_H\n"
  },
  {
    "path": "gpt4all-chat/src/localdocs.cpp",
    "content": "#include \"localdocs.h\"\n\n#include \"database.h\"\n#include \"embllm.h\"\n#include \"mysettings.h\"\n\n#include <QCoreApplication>\n#include <QDebug>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QList>\n#include <QUrl>\n#include <Qt>\n#include <QtLogging>\n\n\nclass MyLocalDocs: public LocalDocs { };\nQ_GLOBAL_STATIC(MyLocalDocs, localDocsInstance)\nLocalDocs *LocalDocs::globalInstance()\n{\n    return localDocsInstance();\n}\n\nLocalDocs::LocalDocs()\n    : QObject(nullptr)\n    , m_localDocsModel(new LocalDocsModel(this))\n    , m_database(nullptr)\n{\n    connect(MySettings::globalInstance(), &MySettings::localDocsChunkSizeChanged, this, &LocalDocs::handleChunkSizeChanged);\n    connect(MySettings::globalInstance(), &MySettings::localDocsFileExtensionsChanged, this, &LocalDocs::handleFileExtensionsChanged);\n\n    // Create the DB with the chunk size from settings\n    m_database = new Database(MySettings::globalInstance()->localDocsChunkSize(),\n                              MySettings::globalInstance()->localDocsFileExtensions());\n\n    connect(this, &LocalDocs::requestStart, m_database,\n        &Database::start, Qt::QueuedConnection);\n    connect(this, &LocalDocs::requestForceIndexing, m_database,\n        &Database::forceIndexing, Qt::QueuedConnection);\n    connect(this, &LocalDocs::forceRebuildFolder, m_database,\n        &Database::forceRebuildFolder, Qt::QueuedConnection);\n    connect(this, &LocalDocs::requestAddFolder, m_database,\n        &Database::addFolder, Qt::QueuedConnection);\n    connect(this, &LocalDocs::requestRemoveFolder, m_database,\n        &Database::removeFolder, Qt::QueuedConnection);\n    connect(this, &LocalDocs::requestChunkSizeChange, m_database,\n        &Database::changeChunkSize, Qt::QueuedConnection);\n    connect(this, &LocalDocs::requestFileExtensionsChange, m_database,\n        &Database::changeFileExtensions, Qt::QueuedConnection);\n    connect(m_database, &Database::databaseValidChanged,\n        this, &LocalDocs::databaseValidChanged, Qt::QueuedConnection);\n\n    // Connections for modifying the model and keeping it updated with the database\n    connect(m_database, &Database::requestUpdateGuiForCollectionItem,\n        m_localDocsModel, &LocalDocsModel::updateCollectionItem, Qt::QueuedConnection);\n    connect(m_database, &Database::requestAddGuiCollectionItem,\n        m_localDocsModel, &LocalDocsModel::addCollectionItem, Qt::QueuedConnection);\n    connect(m_database, &Database::requestRemoveGuiFolderById,\n        m_localDocsModel, &LocalDocsModel::removeFolderById, Qt::QueuedConnection);\n    connect(m_database, &Database::requestGuiCollectionListUpdated,\n        m_localDocsModel, &LocalDocsModel::collectionListUpdated, Qt::QueuedConnection);\n\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, this, &LocalDocs::aboutToQuit);\n}\n\nvoid LocalDocs::aboutToQuit()\n{\n    delete m_database;\n    m_database = nullptr;\n}\n\nvoid LocalDocs::addFolder(const QString &collection, const QString &path)\n{\n    const QUrl url(path);\n    const QString localPath = url.isLocalFile() ? url.toLocalFile() : path;\n\n    const QString embedding_model = EmbeddingLLM::model();\n    if (embedding_model.isEmpty()) {\n        qWarning() << \"ERROR: We have no embedding model\";\n        return;\n    }\n\n    emit requestAddFolder(collection, localPath, embedding_model);\n}\n\nvoid LocalDocs::removeFolder(const QString &collection, const QString &path)\n{\n    emit requestRemoveFolder(collection, path);\n}\n\nvoid LocalDocs::forceIndexing(const QString &collection)\n{\n    const QString embedding_model = EmbeddingLLM::model();\n    if (embedding_model.isEmpty()) {\n        qWarning() << \"ERROR: We have no embedding model\";\n        return;\n    }\n\n    emit requestForceIndexing(collection, embedding_model);\n}\n\nvoid LocalDocs::handleChunkSizeChanged()\n{\n    emit requestChunkSizeChange(MySettings::globalInstance()->localDocsChunkSize());\n}\n\nvoid LocalDocs::handleFileExtensionsChanged()\n{\n    emit requestFileExtensionsChange(MySettings::globalInstance()->localDocsFileExtensions());\n}\n"
  },
  {
    "path": "gpt4all-chat/src/localdocs.h",
    "content": "#ifndef LOCALDOCS_H\n#define LOCALDOCS_H\n\n#include \"database.h\"\n#include \"localdocsmodel.h\"\n\n#include <QObject>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n\n// IWYU pragma: no_forward_declare LocalDocsModel\n\n\nclass LocalDocs : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(bool databaseValid READ databaseValid NOTIFY databaseValidChanged)\n    Q_PROPERTY(LocalDocsModel *localDocsModel READ localDocsModel NOTIFY localDocsModelChanged)\n\npublic:\n    static LocalDocs *globalInstance();\n\n    LocalDocsModel *localDocsModel() const { return m_localDocsModel; }\n\n    Q_INVOKABLE void addFolder(const QString &collection, const QString &path);\n    Q_INVOKABLE void removeFolder(const QString &collection, const QString &path);\n    Q_INVOKABLE void forceIndexing(const QString &collection);\n\n    Database *database() const { return m_database; }\n\n    bool databaseValid() const { return m_database->isValid(); }\n\npublic Q_SLOTS:\n    void handleChunkSizeChanged();\n    void handleFileExtensionsChanged();\n    void aboutToQuit();\n\nQ_SIGNALS:\n    void requestStart();\n    void requestForceIndexing(const QString &collection, const QString &embedding_model);\n    void forceRebuildFolder(const QString &path);\n    void requestAddFolder(const QString &collection, const QString &path, const QString &embedding_model);\n    void requestRemoveFolder(const QString &collection, const QString &path);\n    void requestChunkSizeChange(int chunkSize);\n    void requestFileExtensionsChange(const QStringList &extensions);\n    void localDocsModelChanged();\n    void databaseValidChanged();\n\nprivate:\n    LocalDocsModel *m_localDocsModel;\n    Database *m_database;\n\nprivate:\n    explicit LocalDocs();\n    friend class MyLocalDocs;\n};\n\n#endif // LOCALDOCS_H\n"
  },
  {
    "path": "gpt4all-chat/src/localdocsmodel.cpp",
    "content": "#include \"localdocsmodel.h\"\n\n#include \"localdocs.h\"\n#include \"network.h\"\n\n#include <QDateTime>\n#include <QMap>\n#include <QVector> // IWYU pragma: keep\n\n#include <utility>\n\n\nLocalDocsCollectionsModel::LocalDocsCollectionsModel(QObject *parent)\n    : QSortFilterProxyModel(parent)\n{\n    setSourceModel(LocalDocs::globalInstance()->localDocsModel());\n\n    connect(LocalDocs::globalInstance()->localDocsModel(),\n        &LocalDocsModel::updatingChanged, this, &LocalDocsCollectionsModel::maybeTriggerUpdatingCountChanged);\n    connect(this, &LocalDocsCollectionsModel::rowsInserted, this, &LocalDocsCollectionsModel::countChanged);\n    connect(this, &LocalDocsCollectionsModel::rowsRemoved, this, &LocalDocsCollectionsModel::countChanged);\n    connect(this, &LocalDocsCollectionsModel::modelReset, this, &LocalDocsCollectionsModel::countChanged);\n}\n\nbool LocalDocsCollectionsModel::filterAcceptsRow(int sourceRow,\n                                       const QModelIndex &sourceParent) const\n{\n    QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);\n    const QString collection = sourceModel()->data(index, LocalDocsModel::CollectionRole).toString();\n    return m_collections.contains(collection);\n}\n\nvoid LocalDocsCollectionsModel::setCollections(const QList<QString> &collections)\n{\n    m_collections = collections;\n    invalidateFilter();\n    maybeTriggerUpdatingCountChanged();\n}\n\nint LocalDocsCollectionsModel::updatingCount() const\n{\n    return m_updatingCount;\n}\n\nvoid LocalDocsCollectionsModel::maybeTriggerUpdatingCountChanged()\n{\n    int updatingCount = 0;\n    for (int row = 0; row < sourceModel()->rowCount(); ++row) {\n        QModelIndex index = sourceModel()->index(row, 0);\n        const QString collection = sourceModel()->data(index, LocalDocsModel::CollectionRole).toString();\n        if (!m_collections.contains(collection))\n            continue;\n        bool updating = sourceModel()->data(index, LocalDocsModel::UpdatingRole).toBool();\n        if (updating)\n            ++updatingCount;\n    }\n    if (updatingCount != m_updatingCount) {\n        m_updatingCount = updatingCount;\n        emit updatingCountChanged();\n    }\n}\n\nLocalDocsModel::LocalDocsModel(QObject *parent)\n    : QAbstractListModel(parent)\n{\n    connect(this, &LocalDocsModel::rowsInserted, this, &LocalDocsModel::countChanged);\n    connect(this, &LocalDocsModel::rowsRemoved, this, &LocalDocsModel::countChanged);\n    connect(this, &LocalDocsModel::modelReset, this, &LocalDocsModel::countChanged);\n}\n\nint LocalDocsModel::rowCount(const QModelIndex &parent) const\n{\n    Q_UNUSED(parent);\n    return m_collectionList.size();\n}\n\nQVariant LocalDocsModel::data(const QModelIndex &index, int role) const\n{\n    if (!index.isValid() || index.row() < 0 || index.row() >= m_collectionList.size())\n        return QVariant();\n\n    const CollectionItem item = m_collectionList.at(index.row());\n    switch (role) {\n        case CollectionRole:\n            return item.collection;\n        case FolderPathRole:\n            return item.folder_path;\n        case InstalledRole:\n            return item.installed;\n        case IndexingRole:\n            return item.indexing;\n        case ErrorRole:\n            return item.error;\n        case ForceIndexingRole:\n            return item.forceIndexing;\n        case CurrentDocsToIndexRole:\n            return item.currentDocsToIndex;\n        case TotalDocsToIndexRole:\n            return item.totalDocsToIndex;\n        case CurrentBytesToIndexRole:\n            return quint64(item.currentBytesToIndex);\n        case TotalBytesToIndexRole:\n            return quint64(item.totalBytesToIndex);\n        case CurrentEmbeddingsToIndexRole:\n            return quint64(item.currentEmbeddingsToIndex);\n        case TotalEmbeddingsToIndexRole:\n            return quint64(item.totalEmbeddingsToIndex);\n        case TotalDocsRole:\n            return quint64(item.totalDocs);\n        case TotalWordsRole:\n            return quint64(item.totalWords);\n        case TotalTokensRole:\n            return quint64(item.totalTokens);\n        case StartUpdateRole:\n            return item.startUpdate;\n        case LastUpdateRole:\n            return item.lastUpdate;\n        case FileCurrentlyProcessingRole:\n            return item.fileCurrentlyProcessing;\n        case EmbeddingModelRole:\n            return item.embeddingModel;\n        case UpdatingRole:\n            return item.indexing || item.currentEmbeddingsToIndex != 0;\n    }\n\n    return QVariant();\n}\n\nQHash<int, QByteArray> LocalDocsModel::roleNames() const\n{\n    QHash<int, QByteArray> roles;\n    roles[CollectionRole] = \"collection\";\n    roles[FolderPathRole] = \"folder_path\";\n    roles[InstalledRole] = \"installed\";\n    roles[IndexingRole] = \"indexing\";\n    roles[ErrorRole] = \"error\";\n    roles[ForceIndexingRole] = \"forceIndexing\";\n    roles[CurrentDocsToIndexRole] = \"currentDocsToIndex\";\n    roles[TotalDocsToIndexRole] = \"totalDocsToIndex\";\n    roles[CurrentBytesToIndexRole] = \"currentBytesToIndex\";\n    roles[TotalBytesToIndexRole] = \"totalBytesToIndex\";\n    roles[CurrentEmbeddingsToIndexRole] = \"currentEmbeddingsToIndex\";\n    roles[TotalEmbeddingsToIndexRole] = \"totalEmbeddingsToIndex\";\n    roles[TotalDocsRole] = \"totalDocs\";\n    roles[TotalWordsRole] = \"totalWords\";\n    roles[TotalTokensRole] = \"totalTokens\";\n    roles[StartUpdateRole] = \"startUpdate\";\n    roles[LastUpdateRole] = \"lastUpdate\";\n    roles[FileCurrentlyProcessingRole] = \"fileCurrentlyProcessing\";\n    roles[EmbeddingModelRole] = \"embeddingModel\";\n    roles[UpdatingRole] = \"updating\";\n    return roles;\n}\n\nvoid LocalDocsModel::updateCollectionItem(const CollectionItem &item)\n{\n    for (int i = 0; i < m_collectionList.size(); ++i) {\n        CollectionItem &stored = m_collectionList[i];\n        if (stored.folder_id != item.folder_id)\n            continue;\n\n        QVector<int> changed;\n        if (stored.folder_path != item.folder_path)\n            changed.append(FolderPathRole);\n        if (stored.installed != item.installed)\n            changed.append(InstalledRole);\n        if (stored.indexing != item.indexing) {\n            changed.append(IndexingRole);\n            changed.append(UpdatingRole);\n        }\n        if (stored.error != item.error)\n            changed.append(ErrorRole);\n        if (stored.forceIndexing != item.forceIndexing)\n            changed.append(ForceIndexingRole);\n        if (stored.currentDocsToIndex != item.currentDocsToIndex)\n            changed.append(CurrentDocsToIndexRole);\n        if (stored.totalDocsToIndex != item.totalDocsToIndex)\n            changed.append(TotalDocsToIndexRole);\n        if (stored.currentBytesToIndex != item.currentBytesToIndex)\n            changed.append(CurrentBytesToIndexRole);\n        if (stored.totalBytesToIndex != item.totalBytesToIndex)\n            changed.append(TotalBytesToIndexRole);\n        if (stored.currentEmbeddingsToIndex != item.currentEmbeddingsToIndex) {\n            changed.append(CurrentEmbeddingsToIndexRole);\n            changed.append(UpdatingRole);\n        }\n        if (stored.totalEmbeddingsToIndex != item.totalEmbeddingsToIndex)\n            changed.append(TotalEmbeddingsToIndexRole);\n        if (stored.totalDocs != item.totalDocs)\n            changed.append(TotalDocsRole);\n        if (stored.totalWords != item.totalWords)\n            changed.append(TotalWordsRole);\n        if (stored.totalTokens != item.totalTokens)\n            changed.append(TotalTokensRole);\n        if (stored.startUpdate != item.startUpdate)\n            changed.append(StartUpdateRole);\n        if (stored.lastUpdate != item.lastUpdate)\n            changed.append(LastUpdateRole);\n        if (stored.fileCurrentlyProcessing != item.fileCurrentlyProcessing)\n            changed.append(FileCurrentlyProcessingRole);\n        if (stored.embeddingModel != item.embeddingModel)\n            changed.append(EmbeddingModelRole);\n\n        // preserve collection name as we ignore it for matching\n        QString collection = stored.collection;\n        stored = item;\n        stored.collection = collection;\n\n        emit dataChanged(this->index(i), this->index(i), changed);\n\n        if (changed.contains(UpdatingRole))\n            emit updatingChanged(item.collection);\n    }\n}\n\nvoid LocalDocsModel::addCollectionItem(const CollectionItem &item)\n{\n    beginInsertRows(QModelIndex(), m_collectionList.size(), m_collectionList.size());\n    m_collectionList.append(item);\n    endInsertRows();\n}\n\nvoid LocalDocsModel::removeCollectionIf(std::function<bool(CollectionItem)> const &predicate)\n{\n    for (int i = 0; i < m_collectionList.size();) {\n        if (predicate(m_collectionList.at(i))) {\n            beginRemoveRows(QModelIndex(), i, i);\n            m_collectionList.removeAt(i);\n            endRemoveRows();\n\n            Network::globalInstance()->trackEvent(\"doc_collection_remove\", {\n                {\"collection_count\", m_collectionList.count()},\n            });\n        } else {\n            ++i;\n        }\n    }\n}\n\nvoid LocalDocsModel::removeFolderById(const QString &collection, int folder_id)\n{\n    removeCollectionIf([collection, folder_id](const auto &c) {\n        return c.collection == collection && c.folder_id == folder_id;\n    });\n}\n\nvoid LocalDocsModel::removeCollectionPath(const QString &name, const QString &path)\n{\n    removeCollectionIf([&name, &path](const auto &c) { return c.collection == name && c.folder_path == path; });\n}\n\nvoid LocalDocsModel::collectionListUpdated(const QList<CollectionItem> &collectionList)\n{\n    beginResetModel();\n    m_collectionList = collectionList;\n    endResetModel();\n}\n"
  },
  {
    "path": "gpt4all-chat/src/localdocsmodel.h",
    "content": "#ifndef LOCALDOCSMODEL_H\n#define LOCALDOCSMODEL_H\n\n#include \"database.h\"\n\n#include <QAbstractListModel>\n#include <QList>\n#include <QObject> // IWYU pragma: keep\n#include <QSortFilterProxyModel>\n#include <QString>\n#include <Qt>\n\n#include <functional>\n\nclass QByteArray;\nclass QVariant;\ntemplate <typename Key, typename T> class QHash;\n\n\nclass LocalDocsCollectionsModel : public QSortFilterProxyModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n    Q_PROPERTY(int updatingCount READ updatingCount NOTIFY updatingCountChanged)\n\npublic:\n    explicit LocalDocsCollectionsModel(QObject *parent);\n    int count() const { return rowCount(); }\n    int updatingCount() const;\n\npublic Q_SLOTS:\n    void setCollections(const QList<QString> &collections);\n\nQ_SIGNALS:\n    void countChanged();\n    void updatingCountChanged();\n\nprotected:\n    bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;\n\nprivate Q_SLOTS:\n    void maybeTriggerUpdatingCountChanged();\n\nprivate:\n    QList<QString> m_collections;\n    int m_updatingCount = 0;\n};\n\nclass LocalDocsModel : public QAbstractListModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n\npublic:\n    enum Roles {\n        CollectionRole = Qt::UserRole + 1,\n        FolderPathRole,\n        InstalledRole,\n        IndexingRole,\n        ErrorRole,\n        ForceIndexingRole,\n        CurrentDocsToIndexRole,\n        TotalDocsToIndexRole,\n        CurrentBytesToIndexRole,\n        TotalBytesToIndexRole,\n        CurrentEmbeddingsToIndexRole,\n        TotalEmbeddingsToIndexRole,\n        TotalDocsRole,\n        TotalWordsRole,\n        TotalTokensRole,\n        StartUpdateRole,\n        LastUpdateRole,\n        FileCurrentlyProcessingRole,\n        EmbeddingModelRole,\n        UpdatingRole\n    };\n\n    explicit LocalDocsModel(QObject *parent = nullptr);\n    int rowCount(const QModelIndex & = QModelIndex()) const override;\n    QVariant data(const QModelIndex &index, int role) const override;\n    QHash<int, QByteArray> roleNames() const override;\n    int count() const { return rowCount(); }\n\npublic Q_SLOTS:\n    void updateCollectionItem(const CollectionItem&);\n    void addCollectionItem(const CollectionItem &item);\n    void removeFolderById(const QString &collection, int folder_id);\n    void removeCollectionPath(const QString &name, const QString &path);\n    void collectionListUpdated(const QList<CollectionItem> &collectionList);\n\nQ_SIGNALS:\n    void countChanged();\n    void updatingChanged(const QString &collection);\n\nprivate:\n    void removeCollectionIf(std::function<bool(CollectionItem)> const &predicate);\n    QList<CollectionItem> m_collectionList;\n};\n\n#endif // LOCALDOCSMODEL_H\n"
  },
  {
    "path": "gpt4all-chat/src/logger.cpp",
    "content": "#include \"logger.h\"\n\n#include <QDateTime>\n#include <QDebug>\n#include <QFlags>\n#include <QGlobalStatic>\n#include <QIODevice>\n#include <QMutexLocker> // IWYU pragma: keep\n#include <QStandardPaths>\n\n#include <cstdio>\n#include <iostream>\n#include <string>\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nclass MyLogger: public Logger { };\nQ_GLOBAL_STATIC(MyLogger, loggerInstance)\nLogger *Logger::globalInstance()\n{\n    return loggerInstance();\n}\n\nLogger::Logger()\n{\n    // Get log file dir\n    auto dir = QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation);\n    // Remove old log file\n    QFile::remove(dir+\"/log-prev.txt\");\n    QFile::rename(dir+\"/log.txt\", dir+\"/log-prev.txt\");\n    // Open new log file\n    m_file.setFileName(dir+\"/log.txt\");\n    if (!m_file.open(QIODevice::NewOnly | QIODevice::WriteOnly | QIODevice::Text)) {\n        qWarning() << \"Failed to open log file, logging to stdout...\";\n        m_file.open(stdout, QIODevice::WriteOnly | QIODevice::Text);\n    }\n    // On success, install message handler\n    qInstallMessageHandler(Logger::messageHandler);\n}\n\nvoid Logger::messageHandler(QtMsgType type, const QMessageLogContext &, const QString &msg)\n{\n    auto logger = globalInstance();\n    // Get message type as string\n    QString typeString;\n    switch (type) {\n    case QtDebugMsg:\n        typeString = \"Debug\";\n        break;\n    case QtInfoMsg:\n        typeString = \"Info\";\n        break;\n    case QtWarningMsg:\n        typeString = \"Warning\";\n        break;\n    case QtCriticalMsg:\n        typeString = \"Critical\";\n        break;\n    case QtFatalMsg:\n        typeString = \"Fatal\";\n        break;\n    default:\n        typeString = \"???\";\n    }\n    // Get time and date\n    auto timestamp = QDateTime::currentDateTime().toString();\n\n    const std::string out = u\"[%1] (%2): %3\\n\"_s.arg(typeString, timestamp, msg).toStdString();\n\n    // Write message\n    QMutexLocker locker(&logger->m_mutex);\n    logger->m_file.write(out.c_str());\n    logger->m_file.flush();\n    std::cerr << out;\n    fflush(stderr);\n}\n"
  },
  {
    "path": "gpt4all-chat/src/logger.h",
    "content": "#ifndef LOGGER_H\n#define LOGGER_H\n\n#include <QFile>\n#include <QMutex>\n#include <QString>\n#include <QtLogging>\n\n\nclass Logger {\npublic:\n    explicit Logger();\n\n    static Logger *globalInstance();\n\nprivate:\n    static void messageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg);\n\nprivate:\n    QFile  m_file;\n    QMutex m_mutex;\n\n    friend class MyLogger;\n};\n\n#endif // LOGGER_H\n"
  },
  {
    "path": "gpt4all-chat/src/macosdock.h",
    "content": "#ifndef MACOSDOCK_H\n#define MACOSDOCK_H\n\nstruct MacOSDock {\nstatic void showIcon();\nstatic void hideIcon();\n};\n\n#endif // MACOSDOCK_H\n"
  },
  {
    "path": "gpt4all-chat/src/macosdock.mm",
    "content": "#include \"macosdock.h\"\n\n#include <Cocoa/Cocoa.h>\n\n\nvoid MacOSDock::showIcon()\n{\n    [[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyRegular];\n}\n\nvoid MacOSDock::hideIcon()\n{\n    [[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyProhibited];\n}\n"
  },
  {
    "path": "gpt4all-chat/src/main.cpp",
    "content": "#include \"chatlistmodel.h\"\n#include \"config.h\"\n#include \"download.h\"\n#include \"llm.h\"\n#include \"localdocs.h\"\n#include \"logger.h\"\n#include \"modellist.h\"\n#include \"mysettings.h\"\n#include \"network.h\"\n#include \"toolmodel.h\"\n\n#include <gpt4all-backend/llmodel.h>\n#include <singleapplication.h>\n\n#include <QByteArray>\n#include <QCoreApplication>\n#include <QFont>\n#include <QFontDatabase>\n#include <QList>\n#include <QObject>\n#include <QQmlApplicationEngine>\n#include <QQmlContext>\n#include <QQuickWindow>\n#include <QSettings>\n#include <QString>\n#include <QStringList>\n#include <QUrl>\n#include <QVariant>\n#include <QWindow>\n#include <Qt>\n#include <QtAssert>\n#include <QtSystemDetection>\n\n#if G4A_CONFIG(force_d3d12)\n#   include <QSGRendererInterface>\n#endif\n\n#ifndef GPT4ALL_USE_QTPDF\n#   include <fpdfview.h>\n#endif\n\n#ifdef Q_OS_LINUX\n#   include <QIcon>\n#endif\n\n#ifdef Q_OS_WINDOWS\n#   include <windows.h>\n#else\n#   include <signal.h>\n#endif\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nstatic void raiseWindow(QWindow *window)\n{\n#ifdef Q_OS_WINDOWS\n    HWND hwnd = HWND(window->winId());\n\n    // check if window is minimized to Windows task bar\n    if (IsIconic(hwnd))\n        ShowWindow(hwnd, SW_RESTORE);\n\n    SetForegroundWindow(hwnd);\n#else\n    LLM::globalInstance()->showDockIcon();\n    window->show();\n    window->raise();\n    window->requestActivate();\n#endif\n}\n\nint main(int argc, char *argv[])\n{\n#ifndef GPT4ALL_USE_QTPDF\n    FPDF_InitLibrary();\n#endif\n\n    QCoreApplication::setOrganizationName(\"nomic.ai\");\n    QCoreApplication::setOrganizationDomain(\"gpt4all.io\");\n    QCoreApplication::setApplicationName(\"GPT4All\");\n    QCoreApplication::setApplicationVersion(APP_VERSION);\n    QSettings::setDefaultFormat(QSettings::IniFormat);\n\n    Logger::globalInstance();\n\n    SingleApplication app(argc, argv, true /*allowSecondary*/);\n    if (app.isSecondary()) {\n#ifdef Q_OS_WINDOWS\n        AllowSetForegroundWindow(DWORD(app.primaryPid()));\n#endif\n        app.sendMessage(\"RAISE_WINDOW\");\n        return 0;\n    }\n\n#if G4A_CONFIG(force_d3d12)\n    QQuickWindow::setGraphicsApi(QSGRendererInterface::Direct3D12);\n#endif\n\n#ifdef Q_OS_LINUX\n    app.setWindowIcon(QIcon(\":/gpt4all/icons/gpt4all.svg\"));\n#endif\n\n    // set search path before constructing the MySettings instance, which relies on this\n    {\n        auto appDirPath = QCoreApplication::applicationDirPath();\n        QStringList searchPaths {\n#ifdef Q_OS_DARWIN\n            u\"%1/../Frameworks\"_s.arg(appDirPath),\n#else\n            appDirPath,\n            u\"%1/../lib\"_s.arg(appDirPath),\n#endif\n        };\n        LLModel::Implementation::setImplementationsSearchPath(searchPaths.join(u';').toStdString());\n    }\n\n    // Set the local and language translation before the qml engine has even been started. This will\n    // use the default system locale unless the user has explicitly set it to use a different one.\n    auto *mySettings = MySettings::globalInstance();\n    mySettings->setLanguageAndLocale();\n\n    QQmlApplicationEngine engine;\n\n    // Add a connection here from MySettings::languageAndLocaleChanged signal to a lambda slot where I can call\n    // engine.uiLanguage property\n    QObject::connect(mySettings, &MySettings::languageAndLocaleChanged, [&engine]() {\n        engine.setUiLanguage(MySettings::globalInstance()->languageAndLocale());\n    });\n\n    auto *modelList = ModelList::globalInstance();\n    QObject::connect(modelList, &ModelList::dataChanged, mySettings, &MySettings::onModelInfoChanged);\n\n    qmlRegisterSingletonInstance(\"mysettings\", 1, 0, \"MySettings\", mySettings);\n    qmlRegisterSingletonInstance(\"modellist\", 1, 0, \"ModelList\", modelList);\n    qmlRegisterSingletonInstance(\"chatlistmodel\", 1, 0, \"ChatListModel\", ChatListModel::globalInstance());\n    qmlRegisterSingletonInstance(\"llm\", 1, 0, \"LLM\", LLM::globalInstance());\n    qmlRegisterSingletonInstance(\"download\", 1, 0, \"Download\", Download::globalInstance());\n    qmlRegisterSingletonInstance(\"network\", 1, 0, \"Network\", Network::globalInstance());\n    qmlRegisterSingletonInstance(\"localdocs\", 1, 0, \"LocalDocs\", LocalDocs::globalInstance());\n    qmlRegisterSingletonInstance(\"toollist\", 1, 0, \"ToolList\", ToolModel::globalInstance());\n    qmlRegisterUncreatableMetaObject(ToolEnums::staticMetaObject, \"toolenums\", 1, 0, \"ToolEnums\", \"Error: only enums\");\n    qmlRegisterUncreatableMetaObject(MySettingsEnums::staticMetaObject, \"mysettingsenums\", 1, 0, \"MySettingsEnums\", \"Error: only enums\");\n\n    {\n        auto fixedFont = QFontDatabase::systemFont(QFontDatabase::FixedFont);\n        engine.rootContext()->setContextProperty(\"fixedFont\", fixedFont);\n    }\n\n    const QUrl url(u\"qrc:/gpt4all/main.qml\"_s);\n\n    QObject::connect(&engine, &QQmlApplicationEngine::objectCreated,\n        &app, [url](QObject *obj, const QUrl &objUrl) {\n            if (!obj && url == objUrl)\n                QCoreApplication::exit(-1);\n        }, Qt::QueuedConnection);\n    engine.load(url);\n\n    QObject *rootObject = engine.rootObjects().first();\n    QQuickWindow *windowObject = qobject_cast<QQuickWindow *>(rootObject);\n    Q_ASSERT(windowObject);\n    if (windowObject)\n        QObject::connect(&app, &SingleApplication::receivedMessage,\n                         windowObject, [windowObject] () { raiseWindow(windowObject); } );\n\n#if 0\n    QDirIterator it(\"qrc:\", QDirIterator::Subdirectories);\n    while (it.hasNext()) {\n        qDebug() << it.next();\n    }\n#endif\n\n#ifndef Q_OS_WINDOWS\n    // handle signals gracefully\n    struct sigaction sa;\n    sa.sa_handler = [](int s) { QCoreApplication::exit(s == SIGINT ? 0 : 1); };\n    sa.sa_flags   = SA_RESETHAND;\n    sigemptyset(&sa.sa_mask);\n    sigaction(SIGINT,  &sa, nullptr);\n    sigaction(SIGTERM, &sa, nullptr);\n    sigaction(SIGHUP,  &sa, nullptr);\n#endif\n\n    int res = app.exec();\n\n    // Make sure ChatLLM threads are joined before global destructors run.\n    // Otherwise, we can get a heap-use-after-free inside of llama.cpp.\n    ChatListModel::globalInstance()->destroyChats();\n\n#ifndef GPT4ALL_USE_QTPDF\n    FPDF_DestroyLibrary();\n#endif\n\n    return res;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/modellist.cpp",
    "content": "#include \"modellist.h\"\n\n#include \"download.h\"\n#include \"jinja_replacements.h\"\n#include \"mysettings.h\"\n#include \"network.h\"\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QChar>\n#include <QCoreApplication>\n#include <QCryptographicHash>\n#include <QDebug>\n#include <QDir>\n#include <QDirIterator>\n#include <QEvent>\n#include <QEventLoop>\n#include <QFile>\n#include <QFileInfo>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QIODevice>\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QNetworkRequest>\n#include <QObject>\n#include <QRegularExpression>\n#include <QSettings>\n#include <QSslConfiguration>\n#include <QSslSocket>\n#include <QStandardPaths>\n#include <QStringList> // IWYU pragma: keep\n#include <QTextStream>\n#include <QTimer>\n#include <QUrl>\n#include <QtAssert>\n#include <QtLogging>\n#include <QtPreprocessorSupport>\n\n#include <algorithm>\n#include <iterator>\n#include <optional>\n#include <string>\n#include <utility>\n\nusing namespace Qt::Literals::StringLiterals;\n\n//#define USE_LOCAL_MODELSJSON\n\n#define MODELS_JSON_VERSION \"3\"\n\n\nstatic const QStringList FILENAME_BLACKLIST { u\"gpt4all-nomic-embed-text-v1.rmodel\"_s };\n\nstatic const QString RMODEL_CHAT_TEMPLATE = uR\"(<chat>\n{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n    {%- if not message['role'] in ['user', 'assistant', 'system'] %}\n        {{- raise_exception('Unknown role: ' + message['role']) }}\n    {%- endif %}\n    {{- '<' + message['role'] + '>' }}\n    {%- if message['role'] == 'user' %}\n        {%- for source in message.sources %}\n            {%- if loop.first %}\n                {{- '### Context:\\n' }}\n            {%- endif %}\n            {{- ('Collection: ' + source.collection + '\\n'    +\n                 'Path: '       + source.path       + '\\n'    +\n                 'Excerpt: '    + source.text       + '\\n\\n') | escape }}\n        {%- endfor %}\n    {%- endif %}\n    {%- for attachment in message.prompt_attachments %}\n        {{- (attachment.processed_content + '\\n\\n') | escape }}\n    {%- endfor %}\n    {{- message.content | escape }}\n    {{- '</' + message['role'] + '>' }}\n{%- endfor %}\n</chat>)\"_s;\n\n\nQString ModelInfo::id() const\n{\n    return m_id;\n}\n\nvoid ModelInfo::setId(const QString &id)\n{\n    m_id = id;\n}\n\nQString ModelInfo::name() const\n{\n    return MySettings::globalInstance()->modelName(*this);\n}\n\nvoid ModelInfo::setName(const QString &name)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelName(*this, name, true /*force*/);\n    m_name = name;\n}\n\nQString ModelInfo::filename() const\n{\n    return MySettings::globalInstance()->modelFilename(*this);\n}\n\nvoid ModelInfo::setFilename(const QString &filename)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelFilename(*this, filename, true /*force*/);\n    m_filename = filename;\n}\n\nQString ModelInfo::description() const\n{\n    return MySettings::globalInstance()->modelDescription(*this);\n}\n\nvoid ModelInfo::setDescription(const QString &d)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelDescription(*this, d, true /*force*/);\n    m_description = d;\n}\n\nQString ModelInfo::url() const\n{\n    return MySettings::globalInstance()->modelUrl(*this);\n}\n\nvoid ModelInfo::setUrl(const QString &u)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelUrl(*this, u, true /*force*/);\n    m_url = u;\n}\n\nQString ModelInfo::quant() const\n{\n    return MySettings::globalInstance()->modelQuant(*this);\n}\n\nvoid ModelInfo::setQuant(const QString &q)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelQuant(*this, q, true /*force*/);\n    m_quant = q;\n}\n\nQString ModelInfo::type() const\n{\n    return MySettings::globalInstance()->modelType(*this);\n}\n\nvoid ModelInfo::setType(const QString &t)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelType(*this, t, true /*force*/);\n    m_type = t;\n}\n\nbool ModelInfo::isClone() const\n{\n    return MySettings::globalInstance()->modelIsClone(*this);\n}\n\nvoid ModelInfo::setIsClone(bool b)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelIsClone(*this, b, true /*force*/);\n    m_isClone = b;\n}\n\nbool ModelInfo::isDiscovered() const\n{\n    return MySettings::globalInstance()->modelIsDiscovered(*this);\n}\n\nvoid ModelInfo::setIsDiscovered(bool b)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelIsDiscovered(*this, b, true /*force*/);\n    m_isDiscovered = b;\n}\n\nint ModelInfo::likes() const\n{\n    return MySettings::globalInstance()->modelLikes(*this);\n}\n\nvoid ModelInfo::setLikes(int l)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelLikes(*this, l, true /*force*/);\n    m_likes = l;\n}\n\nint ModelInfo::downloads() const\n{\n    return MySettings::globalInstance()->modelDownloads(*this);\n}\n\nvoid ModelInfo::setDownloads(int d)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelDownloads(*this, d, true /*force*/);\n    m_downloads = d;\n}\n\nQDateTime ModelInfo::recency() const\n{\n    return MySettings::globalInstance()->modelRecency(*this);\n}\n\nvoid ModelInfo::setRecency(const QDateTime &r)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelRecency(*this, r, true /*force*/);\n    m_recency = r;\n}\n\ndouble ModelInfo::temperature() const\n{\n    return MySettings::globalInstance()->modelTemperature(*this);\n}\n\nvoid ModelInfo::setTemperature(double t)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelTemperature(*this, t, true /*force*/);\n    m_temperature = t;\n}\n\ndouble ModelInfo::topP() const\n{\n    return MySettings::globalInstance()->modelTopP(*this);\n}\n\ndouble ModelInfo::minP() const\n{\n    return MySettings::globalInstance()->modelMinP(*this);\n}\n\nvoid ModelInfo::setTopP(double p)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelTopP(*this, p, true /*force*/);\n    m_topP = p;\n}\n\nvoid ModelInfo::setMinP(double p)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelMinP(*this, p, true /*force*/);\n    m_minP = p;\n}\n\nint ModelInfo::topK() const\n{\n    return MySettings::globalInstance()->modelTopK(*this);\n}\n\nvoid ModelInfo::setTopK(int k)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelTopK(*this, k, true /*force*/);\n    m_topK = k;\n}\n\nint ModelInfo::maxLength() const\n{\n    return MySettings::globalInstance()->modelMaxLength(*this);\n}\n\nvoid ModelInfo::setMaxLength(int l)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelMaxLength(*this, l, true /*force*/);\n    m_maxLength = l;\n}\n\nint ModelInfo::promptBatchSize() const\n{\n    return MySettings::globalInstance()->modelPromptBatchSize(*this);\n}\n\nvoid ModelInfo::setPromptBatchSize(int s)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelPromptBatchSize(*this, s, true /*force*/);\n    m_promptBatchSize = s;\n}\n\nint ModelInfo::contextLength() const\n{\n    return MySettings::globalInstance()->modelContextLength(*this);\n}\n\nvoid ModelInfo::setContextLength(int l)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelContextLength(*this, l, true /*force*/);\n    m_contextLength = l;\n}\n\nint ModelInfo::maxContextLength() const\n{\n    if (!installed || isOnline) return -1;\n    if (m_maxContextLength != -1) return m_maxContextLength;\n    auto path = (dirpath + filename()).toStdString();\n    int n_ctx = LLModel::Implementation::maxContextLength(path);\n    if (n_ctx < 0) {\n        n_ctx = 4096; // fallback value\n    }\n    m_maxContextLength = n_ctx;\n    return m_maxContextLength;\n}\n\nint ModelInfo::gpuLayers() const\n{\n    return MySettings::globalInstance()->modelGpuLayers(*this);\n}\n\nvoid ModelInfo::setGpuLayers(int l)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelGpuLayers(*this, l, true /*force*/);\n    m_gpuLayers = l;\n}\n\nint ModelInfo::maxGpuLayers() const\n{\n    if (!installed || isOnline) return -1;\n    if (m_maxGpuLayers != -1) return m_maxGpuLayers;\n    auto path = (dirpath + filename()).toStdString();\n    int layers = LLModel::Implementation::layerCount(path);\n    if (layers < 0) {\n        layers = 100; // fallback value\n    }\n    m_maxGpuLayers = layers;\n    return m_maxGpuLayers;\n}\n\ndouble ModelInfo::repeatPenalty() const\n{\n    return MySettings::globalInstance()->modelRepeatPenalty(*this);\n}\n\nvoid ModelInfo::setRepeatPenalty(double p)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelRepeatPenalty(*this, p, true /*force*/);\n    m_repeatPenalty = p;\n}\n\nint ModelInfo::repeatPenaltyTokens() const\n{\n    return MySettings::globalInstance()->modelRepeatPenaltyTokens(*this);\n}\n\nvoid ModelInfo::setRepeatPenaltyTokens(int t)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelRepeatPenaltyTokens(*this, t, true /*force*/);\n    m_repeatPenaltyTokens = t;\n}\n\nQVariant ModelInfo::defaultChatTemplate() const\n{\n    auto res = m_chatTemplate.or_else([this]() -> std::optional<QString> {\n        if (!installed || isOnline)\n            return std::nullopt;\n        if (!m_modelChatTemplate) {\n            auto path = (dirpath + filename()).toUtf8();\n            auto res = LLModel::Implementation::chatTemplate(path.constData());\n            if (res) {\n                std::string ggufTmpl(std::move(*res));\n                if (ggufTmpl.size() >= 2 && ggufTmpl.end()[-2] != '\\n' && ggufTmpl.end()[-1] == '\\n')\n                    ggufTmpl.erase(ggufTmpl.end() - 1); // strip trailing newline for e.g. Llama-3.2-3B-Instruct\n                if (\n                    auto replacement = CHAT_TEMPLATE_SUBSTITUTIONS.find(ggufTmpl);\n                    replacement != CHAT_TEMPLATE_SUBSTITUTIONS.end()\n                ) {\n                    qWarning() << \"automatically substituting chat template for\" << filename();\n                    auto &[badTemplate, goodTemplate] = *replacement;\n                    ggufTmpl = goodTemplate;\n                }\n                m_modelChatTemplate = QString::fromStdString(ggufTmpl);\n            } else {\n                qWarning().nospace() << \"failed to get chat template for \" << filename() << \": \" << res.error().c_str();\n                m_modelChatTemplate = QString(); // do not retry\n            }\n        }\n        if (m_modelChatTemplate->isNull())\n            return std::nullopt;\n        return m_modelChatTemplate;\n    });\n\n    if (res)\n        return std::move(*res);\n    return QVariant::fromValue(nullptr);\n}\n\nauto ModelInfo::chatTemplate() const -> UpgradeableSetting\n{\n    return MySettings::globalInstance()->modelChatTemplate(*this);\n}\n\nQString ModelInfo::defaultSystemMessage() const\n{\n    return m_systemMessage;\n}\n\nauto ModelInfo::systemMessage() const -> UpgradeableSetting\n{\n    return MySettings::globalInstance()->modelSystemMessage(*this);\n}\n\nQString ModelInfo::chatNamePrompt() const\n{\n    return MySettings::globalInstance()->modelChatNamePrompt(*this);\n}\n\nvoid ModelInfo::setChatNamePrompt(const QString &p)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelChatNamePrompt(*this, p, true /*force*/);\n    m_chatNamePrompt = p;\n}\n\nQString ModelInfo::suggestedFollowUpPrompt() const\n{\n    return MySettings::globalInstance()->modelSuggestedFollowUpPrompt(*this);\n}\n\nvoid ModelInfo::setSuggestedFollowUpPrompt(const QString &p)\n{\n    if (shouldSaveMetadata()) MySettings::globalInstance()->setModelSuggestedFollowUpPrompt(*this, p, true /*force*/);\n    m_suggestedFollowUpPrompt = p;\n}\n\n// FIXME(jared): this should not be used for model settings that have meaningful defaults, such as temperature\nbool ModelInfo::shouldSaveMetadata() const\n{\n    return installed && (isClone() || isDiscovered() || description() == \"\" /*indicates sideloaded*/);\n}\n\nQVariant ModelInfo::getField(QLatin1StringView name) const\n{\n    static const std::unordered_map<QLatin1StringView, QVariant(*)(const ModelInfo &)> s_fields = {\n        { \"filename\"_L1,                [](auto &i) -> QVariant { return i.m_filename;                } },\n        { \"description\"_L1,             [](auto &i) -> QVariant { return i.m_description;             } },\n        { \"url\"_L1,                     [](auto &i) -> QVariant { return i.m_url;                     } },\n        { \"quant\"_L1,                   [](auto &i) -> QVariant { return i.m_quant;                   } },\n        { \"type\"_L1,                    [](auto &i) -> QVariant { return i.m_type;                    } },\n        { \"isClone\"_L1,                 [](auto &i) -> QVariant { return i.m_isClone;                 } },\n        { \"isDiscovered\"_L1,            [](auto &i) -> QVariant { return i.m_isDiscovered;            } },\n        { \"likes\"_L1,                   [](auto &i) -> QVariant { return i.m_likes;                   } },\n        { \"downloads\"_L1,               [](auto &i) -> QVariant { return i.m_downloads;               } },\n        { \"recency\"_L1,                 [](auto &i) -> QVariant { return i.m_recency;                 } },\n        { \"temperature\"_L1,             [](auto &i) -> QVariant { return i.m_temperature;             } },\n        { \"topP\"_L1,                    [](auto &i) -> QVariant { return i.m_topP;                    } },\n        { \"minP\"_L1,                    [](auto &i) -> QVariant { return i.m_minP;                    } },\n        { \"topK\"_L1,                    [](auto &i) -> QVariant { return i.m_topK;                    } },\n        { \"maxLength\"_L1,               [](auto &i) -> QVariant { return i.m_maxLength;               } },\n        { \"promptBatchSize\"_L1,         [](auto &i) -> QVariant { return i.m_promptBatchSize;         } },\n        { \"contextLength\"_L1,           [](auto &i) -> QVariant { return i.m_contextLength;           } },\n        { \"gpuLayers\"_L1,               [](auto &i) -> QVariant { return i.m_gpuLayers;               } },\n        { \"repeatPenalty\"_L1,           [](auto &i) -> QVariant { return i.m_repeatPenalty;           } },\n        { \"repeatPenaltyTokens\"_L1,     [](auto &i) -> QVariant { return i.m_repeatPenaltyTokens;     } },\n        { \"chatTemplate\"_L1,            [](auto &i) -> QVariant { return i.defaultChatTemplate();     } },\n        { \"systemMessage\"_L1,           [](auto &i) -> QVariant { return i.m_systemMessage;           } },\n        { \"chatNamePrompt\"_L1,          [](auto &i) -> QVariant { return i.m_chatNamePrompt;          } },\n        { \"suggestedFollowUpPrompt\"_L1, [](auto &i) -> QVariant { return i.m_suggestedFollowUpPrompt; } },\n    };\n    return s_fields.at(name)(*this);\n}\n\nInstalledModels::InstalledModels(QObject *parent, bool selectable)\n    : QSortFilterProxyModel(parent)\n    , m_selectable(selectable)\n{\n    connect(this, &InstalledModels::rowsInserted, this, &InstalledModels::countChanged);\n    connect(this, &InstalledModels::rowsRemoved, this, &InstalledModels::countChanged);\n    connect(this, &InstalledModels::modelReset, this, &InstalledModels::countChanged);\n}\n\nbool InstalledModels::filterAcceptsRow(int sourceRow,\n                                       const QModelIndex &sourceParent) const\n{\n    /* TODO(jared): We should list incomplete models alongside installed models on the\n     * Models page. Simply replacing isDownloading with isIncomplete here doesn't work for\n     * some reason - the models show up as something else. */\n    QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);\n    bool isInstalled = sourceModel()->data(index, ModelList::InstalledRole).toBool();\n    bool isDownloading = sourceModel()->data(index, ModelList::DownloadingRole).toBool();\n    bool isEmbeddingModel = sourceModel()->data(index, ModelList::IsEmbeddingModelRole).toBool();\n    // list installed chat models\n    return (isInstalled || (!m_selectable && isDownloading)) && !isEmbeddingModel;\n}\n\nGPT4AllDownloadableModels::GPT4AllDownloadableModels(QObject *parent)\n    : QSortFilterProxyModel(parent)\n{\n    connect(this, &GPT4AllDownloadableModels::rowsInserted, this, &GPT4AllDownloadableModels::countChanged);\n    connect(this, &GPT4AllDownloadableModels::rowsRemoved, this, &GPT4AllDownloadableModels::countChanged);\n    connect(this, &GPT4AllDownloadableModels::modelReset, this, &GPT4AllDownloadableModels::countChanged);\n}\n\nvoid GPT4AllDownloadableModels::filter(const QVector<QString> &keywords)\n{\n    m_keywords = keywords;\n    invalidateFilter();\n}\n\nbool GPT4AllDownloadableModels::filterAcceptsRow(int sourceRow,\n                                          const QModelIndex &sourceParent) const\n{\n    QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);\n    const QString description = sourceModel()->data(index, ModelList::DescriptionRole).toString();\n    bool hasDescription = !description.isEmpty();\n    bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();\n    bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();\n    bool isOnline = sourceModel()->data(index, ModelList::OnlineRole).toBool();\n    bool satisfiesKeyword = m_keywords.isEmpty();\n    for (const QString &k : m_keywords)\n        satisfiesKeyword = description.contains(k) ? true : satisfiesKeyword;\n    return !isOnline && !isDiscovered && hasDescription && !isClone && satisfiesKeyword;\n}\n\nint GPT4AllDownloadableModels::count() const\n{\n    return rowCount();\n}\n\nHuggingFaceDownloadableModels::HuggingFaceDownloadableModels(QObject *parent)\n    : QSortFilterProxyModel(parent)\n    , m_limit(5)\n{\n    connect(this, &HuggingFaceDownloadableModels::rowsInserted, this, &HuggingFaceDownloadableModels::countChanged);\n    connect(this, &HuggingFaceDownloadableModels::rowsRemoved, this, &HuggingFaceDownloadableModels::countChanged);\n    connect(this, &HuggingFaceDownloadableModels::modelReset, this, &HuggingFaceDownloadableModels::countChanged);\n}\n\nbool HuggingFaceDownloadableModels::filterAcceptsRow(int sourceRow,\n                                          const QModelIndex &sourceParent) const\n{\n    QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);\n    bool hasDescription = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();\n    bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();\n    bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();\n    return isDiscovered && hasDescription && !isClone;\n}\n\nint HuggingFaceDownloadableModels::count() const\n{\n    return rowCount();\n}\n\nvoid HuggingFaceDownloadableModels::discoverAndFilter(const QString &discover)\n{\n    m_discoverFilter = discover;\n    ModelList *ml = qobject_cast<ModelList*>(parent());\n    ml->discoverSearch(discover);\n}\n\nclass MyModelList: public ModelList { };\nQ_GLOBAL_STATIC(MyModelList, modelListInstance)\nModelList *ModelList::globalInstance()\n{\n    return modelListInstance();\n}\n\nModelList::ModelList()\n    : QAbstractListModel(nullptr)\n    , m_installedModels(new InstalledModels(this))\n    , m_selectableModels(new InstalledModels(this, /*selectable*/ true))\n    , m_gpt4AllDownloadableModels(new GPT4AllDownloadableModels(this))\n    , m_huggingFaceDownloadableModels(new HuggingFaceDownloadableModels(this))\n    , m_asyncModelRequestOngoing(false)\n    , m_discoverLimit(20)\n    , m_discoverSortDirection(-1)\n    , m_discoverSort(Likes)\n    , m_discoverNumberOfResults(0)\n    , m_discoverResultsCompleted(0)\n    , m_discoverInProgress(false)\n{\n    QCoreApplication::instance()->installEventFilter(this);\n\n    m_installedModels->setSourceModel(this);\n    m_selectableModels->setSourceModel(this);\n    m_gpt4AllDownloadableModels->setSourceModel(this);\n    m_huggingFaceDownloadableModels->setSourceModel(this);\n\n    auto *mySettings = MySettings::globalInstance();\n    connect(mySettings, &MySettings::nameChanged,                this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::temperatureChanged,         this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::topPChanged,                this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::minPChanged,                this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::topKChanged,                this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::maxLengthChanged,           this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::promptBatchSizeChanged,     this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::contextLengthChanged,       this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::gpuLayersChanged,           this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::repeatPenaltyChanged,       this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::repeatPenaltyTokensChanged, this, &ModelList::updateDataForSettings     );\n    connect(mySettings, &MySettings::chatTemplateChanged,        this, &ModelList::maybeUpdateDataForSettings);\n    connect(mySettings, &MySettings::systemMessageChanged,       this, &ModelList::maybeUpdateDataForSettings);\n\n    connect(this, &ModelList::dataChanged, this, &ModelList::onDataChanged);\n\n    connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this, &ModelList::handleSslErrors);\n\n    updateModelsFromJson();\n    updateModelsFromSettings();\n    updateModelsFromDirectory();\n\n    connect(mySettings, &MySettings::modelPathChanged, this, &ModelList::updateModelsFromDirectory);\n    connect(mySettings, &MySettings::modelPathChanged, this, &ModelList::updateModelsFromJson     );\n    connect(mySettings, &MySettings::modelPathChanged, this, &ModelList::updateModelsFromSettings );\n\n    QCoreApplication::instance()->installEventFilter(this);\n}\n\n// an easier way to listen for model info and setting changes\nvoid ModelList::onDataChanged(const QModelIndex &topLeft, const QModelIndex &bottomRight, const QList<int> &roles)\n{\n    Q_UNUSED(roles)\n    for (int row = topLeft.row(); row <= bottomRight.row(); row++) {\n        auto index = topLeft.siblingAtRow(row);\n        auto id = index.data(ModelList::IdRole).toString();\n        if (auto info = modelInfo(id); !info.id().isNull())\n            emit modelInfoChanged(info);\n    }\n}\n\nQString ModelList::compatibleModelNameHash(QUrl baseUrl, QString modelName) {\n    QCryptographicHash sha256(QCryptographicHash::Sha256);\n    sha256.addData((baseUrl.toString() + \"_\" + modelName).toUtf8());\n    return sha256.result().toHex();\n}\n\nQString ModelList::compatibleModelFilename(QUrl baseUrl, QString modelName) {\n    QString hash(compatibleModelNameHash(baseUrl, modelName));\n    return QString(u\"gpt4all-%1-capi.rmodel\"_s).arg(hash);\n}\n\nbool ModelList::eventFilter(QObject *obj, QEvent *ev)\n{\n    if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange)\n        emit dataChanged(index(0, 0), index(m_models.size() - 1, 0));\n    return false;\n}\n\nQString ModelList::incompleteDownloadPath(const QString &modelFile)\n{\n    return MySettings::globalInstance()->modelPath() + \"incomplete-\" + modelFile;\n}\n\nconst QList<ModelInfo> ModelList::selectableModelList() const\n{\n    // FIXME: This needs to be kept in sync with m_selectableModels so should probably be merged\n    QMutexLocker locker(&m_mutex);\n    QList<ModelInfo> infos;\n    for (ModelInfo *info : m_models)\n        if (info->installed && !info->isEmbeddingModel)\n            infos.append(*info);\n    return infos;\n}\n\nModelInfo ModelList::defaultModelInfo() const\n{\n    QMutexLocker locker(&m_mutex);\n\n    QSettings settings;\n\n    // The user default model can be set by the user in the settings dialog. The \"default\" user\n    // default model is \"Application default\" which signals we should use the logic here.\n    const QString userDefaultModelName = MySettings::globalInstance()->userDefaultModel();\n    const bool hasUserDefaultName = !userDefaultModelName.isEmpty() && userDefaultModelName != \"Application default\";\n\n    ModelInfo *defaultModel = nullptr;\n    for (ModelInfo *info : m_models) {\n        if (!info->installed)\n            continue;\n        defaultModel = info;\n\n        const size_t ramrequired = defaultModel->ramrequired;\n\n        // If we don't have either setting, then just use the first model that requires less than 16GB that is installed\n        if (!hasUserDefaultName && !info->isOnline && ramrequired > 0 && ramrequired < 16)\n            break;\n\n        // If we have a user specified default and match, then use it\n        if (hasUserDefaultName && (defaultModel->id() == userDefaultModelName))\n            break;\n    }\n    if (defaultModel)\n        return *defaultModel;\n    return ModelInfo();\n}\n\nbool ModelList::contains(const QString &id) const\n{\n    QMutexLocker locker(&m_mutex);\n    return m_modelMap.contains(id);\n}\n\nbool ModelList::containsByFilename(const QString &filename) const\n{\n    QMutexLocker locker(&m_mutex);\n    for (ModelInfo *info : m_models)\n        if (info->filename() == filename)\n            return true;\n    return false;\n}\n\nbool ModelList::lessThan(const ModelInfo* a, const ModelInfo* b, DiscoverSort s, int d)\n{\n    // Rule -1a: Discover sort\n    if (a->isDiscovered() && b->isDiscovered()) {\n        switch (s) {\n        case Default: break;\n        case Likes: return (d > 0 ? a->likes() < b->likes() : a->likes() > b->likes());\n        case Downloads: return (d > 0 ? a->downloads() < b->downloads() : a->downloads() > b->downloads());\n        case Recent: return (d > 0 ? a->recency() < b->recency() : a->recency() > b->recency());\n        }\n    }\n\n    // Rule -1: Discovered before non-discovered\n    if (a->isDiscovered() != b->isDiscovered()) {\n        return a->isDiscovered();\n    }\n\n    // Rule 0: Non-clone before clone\n    if (a->isClone() != b->isClone()) {\n        return !a->isClone();\n    }\n\n    // Rule 1: Non-empty 'order' before empty\n    if (a->order.isEmpty() != b->order.isEmpty()) {\n        return !a->order.isEmpty();\n    }\n\n    // Rule 2: Both 'order' are non-empty, sort alphanumerically\n    if (!a->order.isEmpty() && !b->order.isEmpty()) {\n        return a->order < b->order;\n    }\n\n    // Rule 3: Both 'order' are empty, sort by id\n    return a->id() < b->id();\n}\n\nvoid ModelList::addModel(const QString &id)\n{\n    const bool hasModel = contains(id);\n    Q_ASSERT(!hasModel);\n    if (hasModel) {\n        qWarning() << \"ERROR: model list already contains\" << id;\n        return;\n    }\n\n    ModelInfo *info = new ModelInfo;\n    info->setId(id);\n\n    m_mutex.lock();\n    auto s = m_discoverSort;\n    auto d = m_discoverSortDirection;\n    const auto insertPosition = std::lower_bound(m_models.begin(), m_models.end(), info,\n        [s, d](const ModelInfo* lhs, const ModelInfo* rhs) {\n            return ModelList::lessThan(lhs, rhs, s, d);\n        });\n    const int index = std::distance(m_models.begin(), insertPosition);\n    m_mutex.unlock();\n\n    // NOTE: The begin/end rows cannot have a lock placed around them. We calculate the index ahead\n    // of time and this works because this class is designed carefully so that only one thread is\n    // responsible for insertion, deletion, and update\n\n    beginInsertRows(QModelIndex(), index, index);\n    m_mutex.lock();\n    m_models.insert(insertPosition, info);\n    m_modelMap.insert(id, info);\n    m_mutex.unlock();\n    endInsertRows();\n\n    emit selectableModelListChanged();\n}\n\nvoid ModelList::changeId(const QString &oldId, const QString &newId)\n{\n    const bool hasModel = contains(oldId);\n    Q_ASSERT(hasModel);\n    if (!hasModel) {\n        qWarning() << \"ERROR: model list does not contain\" << oldId;\n        return;\n    }\n\n    QMutexLocker locker(&m_mutex);\n    ModelInfo *info = m_modelMap.take(oldId);\n    info->setId(newId);\n    m_modelMap.insert(newId, info);\n}\n\nint ModelList::rowCount(const QModelIndex &parent) const\n{\n    Q_UNUSED(parent)\n    QMutexLocker locker(&m_mutex);\n    return m_models.size();\n}\n\nQVariant ModelList::dataInternal(const ModelInfo *info, int role) const\n{\n    switch (role) {\n        case IdRole:\n            return info->id();\n        case NameRole:\n            return info->name();\n        case FilenameRole:\n            return info->filename();\n        case DirpathRole:\n            return info->dirpath;\n        case FilesizeRole:\n            return info->filesize;\n        case HashRole:\n            return info->hash;\n        case HashAlgorithmRole:\n            return info->hashAlgorithm;\n        case CalcHashRole:\n            return info->calcHash;\n        case InstalledRole:\n            return info->installed;\n        case DefaultRole:\n            return info->isDefault;\n        case OnlineRole:\n            return info->isOnline;\n        case CompatibleApiRole:\n            return info->isCompatibleApi;\n        case DescriptionRole:\n            return info->description();\n        case RequiresVersionRole:\n            return info->requiresVersion;\n        case VersionRemovedRole:\n            return info->versionRemoved;\n        case UrlRole:\n            return info->url();\n        case BytesReceivedRole:\n            return info->bytesReceived;\n        case BytesTotalRole:\n            return info->bytesTotal;\n        case TimestampRole:\n            return info->timestamp;\n        case SpeedRole:\n            return info->speed;\n        case DownloadingRole:\n            return info->isDownloading;\n        case IncompleteRole:\n            return info->isIncomplete;\n        case DownloadErrorRole:\n            return info->downloadError;\n        case OrderRole:\n            return info->order;\n        case RamrequiredRole:\n            return info->ramrequired;\n        case ParametersRole:\n            return info->parameters;\n        case QuantRole:\n            return info->quant();\n        case TypeRole:\n            return info->type();\n        case IsCloneRole:\n            return info->isClone();\n        case IsDiscoveredRole:\n            return info->isDiscovered();\n        case IsEmbeddingModelRole:\n            return info->isEmbeddingModel;\n        case TemperatureRole:\n            return info->temperature();\n        case TopPRole:\n            return info->topP();\n        case MinPRole:\n            return info->minP();\n        case TopKRole:\n            return info->topK();\n        case MaxLengthRole:\n            return info->maxLength();\n        case PromptBatchSizeRole:\n            return info->promptBatchSize();\n        case ContextLengthRole:\n            return info->contextLength();\n        case GpuLayersRole:\n            return info->gpuLayers();\n        case RepeatPenaltyRole:\n            return info->repeatPenalty();\n        case RepeatPenaltyTokensRole:\n            return info->repeatPenaltyTokens();\n        case ChatTemplateRole:\n            return QVariant::fromValue(info->chatTemplate());\n        case SystemMessageRole:\n            return QVariant::fromValue(info->systemMessage());\n        case ChatNamePromptRole:\n            return info->chatNamePrompt();\n        case SuggestedFollowUpPromptRole:\n            return info->suggestedFollowUpPrompt();\n        case LikesRole:\n            return info->likes();\n        case DownloadsRole:\n            return info->downloads();\n        case RecencyRole:\n            return info->recency();\n\n    }\n\n    return QVariant();\n}\n\nQVariant ModelList::data(const QString &id, int role) const\n{\n    QMutexLocker locker(&m_mutex);\n    ModelInfo *info = m_modelMap.value(id);\n    return dataInternal(info, role);\n}\n\nQVariant ModelList::dataByFilename(const QString &filename, int role) const\n{\n    QMutexLocker locker(&m_mutex);\n    for (ModelInfo *info : m_models)\n        if (info->filename() == filename)\n            return dataInternal(info, role);\n    return QVariant();\n}\n\nQVariant ModelList::data(const QModelIndex &index, int role) const\n{\n    QMutexLocker locker(&m_mutex);\n    if (!index.isValid() || index.row() < 0 || index.row() >= m_models.size())\n        return QVariant();\n    const ModelInfo *info = m_models.at(index.row());\n    return dataInternal(info, role);\n}\n\nvoid ModelList::updateData(const QString &id, const QVector<QPair<int, QVariant>> &data)\n{\n    // We only sort when one of the fields used by the sorting algorithm actually changes that\n    // is implicated or used by the sorting algorithm\n    bool shouldSort = false;\n    int index;\n\n    {\n        QMutexLocker locker(&m_mutex);\n        if (!m_modelMap.contains(id)) {\n            qWarning() << \"ERROR: cannot update as model map does not contain\" << id;\n            return;\n        }\n\n        ModelInfo *info = m_modelMap.value(id);\n        index = m_models.indexOf(info);\n        if (index == -1) {\n            qWarning() << \"ERROR: cannot update as model list does not contain\" << id;\n            return;\n        }\n\n        for (const auto &d : data) {\n            const int role = d.first;\n            const QVariant value = d.second;\n            switch (role) {\n            case IdRole:\n                {\n                    if (info->id() != value.toString()) {\n                        info->setId(value.toString());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case NameRole:\n                info->setName(value.toString()); break;\n            case FilenameRole:\n                info->setFilename(value.toString()); break;\n            case DirpathRole:\n                info->dirpath = value.toString(); break;\n            case FilesizeRole:\n                info->filesize = value.toString(); break;\n            case HashRole:\n                info->hash = value.toByteArray(); break;\n            case HashAlgorithmRole:\n                info->hashAlgorithm = static_cast<ModelInfo::HashAlgorithm>(value.toInt()); break;\n            case CalcHashRole:\n                info->calcHash = value.toBool(); break;\n            case InstalledRole:\n                info->installed = value.toBool(); break;\n            case DefaultRole:\n                info->isDefault = value.toBool(); break;\n            case OnlineRole:\n                info->isOnline = value.toBool(); break;\n            case CompatibleApiRole:\n                info->isCompatibleApi = value.toBool(); break;\n            case DescriptionRole:\n                info->setDescription(value.toString()); break;\n            case RequiresVersionRole:\n                info->requiresVersion = value.toString(); break;\n            case VersionRemovedRole:\n                info->versionRemoved = value.toString(); break;\n            case UrlRole:\n                info->setUrl(value.toString()); break;\n            case BytesReceivedRole:\n                info->bytesReceived = value.toLongLong(); break;\n            case BytesTotalRole:\n                info->bytesTotal = value.toLongLong(); break;\n            case TimestampRole:\n                info->timestamp = value.toLongLong(); break;\n            case SpeedRole:\n                info->speed = value.toString(); break;\n            case DownloadingRole:\n                info->isDownloading = value.toBool(); break;\n            case IncompleteRole:\n                info->isIncomplete = value.toBool(); break;\n            case DownloadErrorRole:\n                info->downloadError = value.toString(); break;\n            case OrderRole:\n                {\n                    if (info->order != value.toString()) {\n                        info->order = value.toString();\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case RamrequiredRole:\n                info->ramrequired = value.toInt(); break;\n            case ParametersRole:\n                info->parameters = value.toString(); break;\n            case QuantRole:\n                info->setQuant(value.toString()); break;\n            case TypeRole:\n                info->setType(value.toString()); break;\n            case IsCloneRole:\n                {\n                    if (info->isClone() != value.toBool()) {\n                        info->setIsClone(value.toBool());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case IsDiscoveredRole:\n                {\n                    if (info->isDiscovered() != value.toBool()) {\n                        info->setIsDiscovered(value.toBool());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case IsEmbeddingModelRole:\n                info->isEmbeddingModel = value.toBool(); break;\n            case TemperatureRole:\n                info->setTemperature(value.toDouble()); break;\n            case TopPRole:\n                info->setTopP(value.toDouble()); break;\n            case MinPRole:\n                info->setMinP(value.toDouble()); break;\n            case TopKRole:\n                info->setTopK(value.toInt()); break;\n            case MaxLengthRole:\n                info->setMaxLength(value.toInt()); break;\n            case PromptBatchSizeRole:\n                info->setPromptBatchSize(value.toInt()); break;\n            case ContextLengthRole:\n                info->setContextLength(value.toInt()); break;\n            case GpuLayersRole:\n                info->setGpuLayers(value.toInt()); break;\n            case RepeatPenaltyRole:\n                info->setRepeatPenalty(value.toDouble()); break;\n            case RepeatPenaltyTokensRole:\n                info->setRepeatPenaltyTokens(value.toInt()); break;\n            case ChatTemplateRole:\n                info->m_chatTemplate = value.toString(); break;\n            case SystemMessageRole:\n                info->m_systemMessage = value.toString(); break;\n            case ChatNamePromptRole:\n                info->setChatNamePrompt(value.toString()); break;\n            case SuggestedFollowUpPromptRole:\n                info->setSuggestedFollowUpPrompt(value.toString()); break;\n            case LikesRole:\n                {\n                    if (info->likes() != value.toInt()) {\n                        info->setLikes(value.toInt());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case DownloadsRole:\n                {\n                    if (info->downloads() != value.toInt()) {\n                        info->setDownloads(value.toInt());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            case RecencyRole:\n                {\n                    if (info->recency() != value.toDateTime()) {\n                        info->setRecency(value.toDateTime());\n                        shouldSort = true;\n                    }\n                    break;\n                }\n            }\n        }\n\n        // Extra guarantee that these always remains in sync with filesystem\n        QString modelPath = info->dirpath + info->filename();\n        const QFileInfo fileInfo(modelPath);\n        info->installed = fileInfo.exists();\n        const QFileInfo incompleteInfo(incompleteDownloadPath(info->filename()));\n        info->isIncomplete = incompleteInfo.exists();\n\n        // check installed, discovered/sideloaded models only (including clones)\n        if (!info->checkedEmbeddingModel && !info->isEmbeddingModel && info->installed\n            && (info->isDiscovered() || info->description().isEmpty()))\n        {\n            // read GGUF and decide based on model architecture\n            info->isEmbeddingModel = LLModel::Implementation::isEmbeddingModel(modelPath.toStdString());\n            info->checkedEmbeddingModel = true;\n        }\n    }\n\n    emit dataChanged(createIndex(index, 0), createIndex(index, 0));\n\n    if (shouldSort)\n        resortModel();\n\n    emit selectableModelListChanged();\n}\n\nvoid ModelList::resortModel()\n{\n    emit layoutAboutToBeChanged();\n    {\n        QMutexLocker locker(&m_mutex);\n        auto s = m_discoverSort;\n        auto d = m_discoverSortDirection;\n        std::stable_sort(m_models.begin(), m_models.end(), [s, d](const ModelInfo* lhs, const ModelInfo* rhs) {\n            return ModelList::lessThan(lhs, rhs, s, d);\n        });\n    }\n    emit layoutChanged();\n}\n\nvoid ModelList::updateDataByFilename(const QString &filename, QVector<QPair<int, QVariant>> data)\n{\n    if (data.isEmpty())\n        return; // no-op\n\n    QVector<QString> modelsById;\n    {\n        QMutexLocker locker(&m_mutex);\n        for (ModelInfo *info : m_models)\n            if (info->filename() == filename)\n                modelsById.append(info->id());\n    }\n\n    if (modelsById.isEmpty()) {\n        qWarning() << \"ERROR: cannot update model as list does not contain file\" << filename;\n        return;\n    }\n\n    for (const QString &id : modelsById)\n        updateData(id, data);\n}\n\nModelInfo ModelList::modelInfo(const QString &id) const\n{\n    QMutexLocker locker(&m_mutex);\n    if (!m_modelMap.contains(id))\n        return ModelInfo();\n    return *m_modelMap.value(id);\n}\n\nModelInfo ModelList::modelInfoByFilename(const QString &filename, bool allowClone) const\n{\n    QMutexLocker locker(&m_mutex);\n    for (ModelInfo *info : m_models)\n        if (info->filename() == filename && (allowClone || !info->isClone()))\n            return *info;\n    return ModelInfo();\n}\n\nbool ModelList::isUniqueName(const QString &name) const\n{\n    QMutexLocker locker(&m_mutex);\n    for (const ModelInfo *info : m_models) {\n        if(info->name() == name)\n            return false;\n    }\n    return true;\n}\n\nQString ModelList::clone(const ModelInfo &model)\n{\n    auto *mySettings = MySettings::globalInstance();\n\n    const QString id = Network::globalInstance()->generateUniqueId();\n    addModel(id);\n\n    QString tmplSetting, sysmsgSetting;\n    if (auto tmpl = model.chatTemplate().asModern()) {\n        tmplSetting = *tmpl;\n    } else {\n        qWarning(\"ModelList Warning: attempted to clone model with legacy chat template\");\n        return {};\n    }\n    if (auto msg = model.systemMessage().asModern()) {\n        sysmsgSetting = *msg;\n    } else {\n        qWarning(\"ModelList Warning: attempted to clone model with legacy system message\");\n        return {};\n    }\n\n    QVector<QPair<int, QVariant>> data {\n        { ModelList::InstalledRole, model.installed },\n        { ModelList::IsCloneRole, true },\n        { ModelList::NameRole, uniqueModelName(model) },\n        { ModelList::FilenameRole, model.filename() },\n        { ModelList::DirpathRole, model.dirpath },\n        { ModelList::OnlineRole, model.isOnline },\n        { ModelList::CompatibleApiRole, model.isCompatibleApi },\n        { ModelList::IsEmbeddingModelRole, model.isEmbeddingModel },\n        { ModelList::TemperatureRole, model.temperature() },\n        { ModelList::TopPRole, model.topP() },\n        { ModelList::MinPRole, model.minP() },\n        { ModelList::TopKRole, model.topK() },\n        { ModelList::MaxLengthRole, model.maxLength() },\n        { ModelList::PromptBatchSizeRole, model.promptBatchSize() },\n        { ModelList::ContextLengthRole, model.contextLength() },\n        { ModelList::GpuLayersRole, model.gpuLayers() },\n        { ModelList::RepeatPenaltyRole, model.repeatPenalty() },\n        { ModelList::RepeatPenaltyTokensRole, model.repeatPenaltyTokens() },\n        { ModelList::SystemMessageRole, model.m_systemMessage },\n        { ModelList::ChatNamePromptRole, model.chatNamePrompt() },\n        { ModelList::SuggestedFollowUpPromptRole, model.suggestedFollowUpPrompt() },\n    };\n    if (auto tmpl = model.m_chatTemplate)\n        data.emplace_back(ModelList::ChatTemplateRole, *tmpl); // copy default chat template, if known\n    updateData(id, data);\n\n    // Ensure setting overrides are copied in case the base model overrides change.\n    // This is necessary because setting these roles on ModelInfo above does not write to settings.\n    auto cloneInfo = modelInfo(id);\n    if (mySettings->isModelChatTemplateSet (model))\n        mySettings->setModelChatTemplate (cloneInfo, tmplSetting  );\n    if (mySettings->isModelSystemMessageSet(model))\n        mySettings->setModelSystemMessage(cloneInfo, sysmsgSetting);\n\n    return id;\n}\n\nvoid ModelList::removeClone(const ModelInfo &model)\n{\n    Q_ASSERT(model.isClone());\n    if (!model.isClone())\n        return;\n\n    removeInternal(model);\n}\n\nvoid ModelList::removeInstalled(const ModelInfo &model)\n{\n    Q_ASSERT(model.installed);\n    Q_ASSERT(!model.isClone());\n    Q_ASSERT(model.isDiscovered() || model.isCompatibleApi || model.description() == \"\" /*indicates sideloaded*/);\n    removeInternal(model);\n}\n\nint ModelList::indexByModelId(const QString &id) const\n{\n    QMutexLocker locker(&m_mutex);\n    if (auto it = m_modelMap.find(id); it != m_modelMap.cend())\n        return m_models.indexOf(*it);\n    return -1;\n}\n\nvoid ModelList::removeInternal(const ModelInfo &model)\n{\n    int indexOfModel = indexByModelId(model.id());\n    Q_ASSERT(indexOfModel != -1);\n    if (indexOfModel == -1) {\n        qWarning() << \"ERROR: model list does not contain\" << model.id();\n        return;\n    }\n\n    beginRemoveRows(QModelIndex(), indexOfModel, indexOfModel);\n    {\n        QMutexLocker locker(&m_mutex);\n        ModelInfo *info = m_models.takeAt(indexOfModel);\n        m_modelMap.remove(info->id());\n        delete info;\n    }\n    endRemoveRows();\n    emit selectableModelListChanged();\n    MySettings::globalInstance()->eraseModel(model);\n}\n\nQString ModelList::uniqueModelName(const ModelInfo &model) const\n{\n    QMutexLocker locker(&m_mutex);\n    static const QRegularExpression re(\"^(.*)~(\\\\d+)$\");\n    QRegularExpressionMatch match = re.match(model.name());\n    QString baseName;\n    if (match.hasMatch())\n        baseName = match.captured(1);\n    else\n        baseName = model.name();\n\n    int maxSuffixNumber = 0;\n    bool baseNameExists = false;\n\n    for (const ModelInfo *info : m_models) {\n        if(info->name() == baseName)\n            baseNameExists = true;\n\n        QRegularExpressionMatch match = re.match(info->name());\n        if (match.hasMatch()) {\n            QString currentBaseName = match.captured(1);\n            int currentSuffixNumber = match.captured(2).toInt();\n            if (currentBaseName == baseName && currentSuffixNumber > maxSuffixNumber)\n                maxSuffixNumber = currentSuffixNumber;\n        }\n    }\n\n    if (baseNameExists)\n        return baseName + \"~\" + QString::number(maxSuffixNumber + 1);\n\n    return baseName;\n}\n\nbool ModelList::modelExists(const QString &modelFilename) const\n{\n    QString appPath = QCoreApplication::applicationDirPath() + modelFilename;\n    QFileInfo infoAppPath(appPath);\n    if (infoAppPath.exists())\n        return true;\n\n    QString downloadPath = MySettings::globalInstance()->modelPath() + modelFilename;\n    QFileInfo infoLocalPath(downloadPath);\n    if (infoLocalPath.exists())\n        return true;\n    return false;\n}\n\nvoid ModelList::updateOldRemoteModels(const QString &path)\n{\n    QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories);\n    while (it.hasNext()) {\n        QFileInfo info = it.nextFileInfo();\n        QString filename = it.fileName();\n        if (!filename.startsWith(\"chatgpt-\") || !filename.endsWith(\".txt\"))\n            continue;\n\n        QString apikey;\n        QString modelname(filename);\n        modelname.chop(4); // strip \".txt\" extension\n        modelname.remove(0, 8); // strip \"chatgpt-\" prefix\n        QFile file(info.filePath());\n        if (!file.open(QIODevice::ReadOnly)) {\n            qWarning().noquote() << tr(\"cannot open \\\"%1\\\": %2\").arg(file.fileName(), file.errorString());\n            continue;\n        }\n\n        {\n            QTextStream in(&file);\n            apikey = in.readAll();\n            file.close();\n        }\n\n        QFile newfile(u\"%1/gpt4all-%2.rmodel\"_s.arg(info.dir().path(), modelname));\n        if (!newfile.open(QIODevice::ReadWrite)) {\n            qWarning().noquote() << tr(\"cannot create \\\"%1\\\": %2\").arg(newfile.fileName(), file.errorString());\n            continue;\n        }\n\n        QJsonObject obj {\n            { \"apiKey\",    apikey    },\n            { \"modelName\", modelname },\n        };\n\n        QTextStream out(&newfile);\n        out << QJsonDocument(obj).toJson();\n        newfile.close();\n\n        file.remove();\n    }\n}\n\nvoid ModelList::processModelDirectory(const QString &path)\n{\n    QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories);\n    while (it.hasNext()) {\n        QFileInfo info = it.nextFileInfo();\n\n        QString filename = it.fileName();\n        if (filename.startsWith(\"incomplete\") || FILENAME_BLACKLIST.contains(filename))\n            continue;\n        if (!filename.endsWith(\".gguf\") && !filename.endsWith(\".rmodel\"))\n            continue;\n\n        bool isOnline(filename.endsWith(\".rmodel\"));\n        bool isCompatibleApi(filename.endsWith(\"-capi.rmodel\"));\n\n        QString name;\n        QString description;\n        if (isCompatibleApi) {\n            QJsonObject obj;\n            {\n                QFile file(info.filePath());\n                if (!file.open(QIODeviceBase::ReadOnly)) {\n                    qWarning().noquote() << tr(\"cannot open \\\"%1\\\": %2\").arg(file.fileName(), file.errorString());\n                    continue;\n                }\n                QJsonDocument doc = QJsonDocument::fromJson(file.readAll());\n                obj = doc.object();\n            }\n            {\n                QString apiKey(obj[\"apiKey\"].toString());\n                QString baseUrl(obj[\"baseUrl\"].toString());\n                QString modelName(obj[\"modelName\"].toString());\n                apiKey = apiKey.length() < 10 ? \"*****\" : apiKey.left(5) + \"*****\";\n                name = tr(\"%1 (%2)\").arg(modelName, baseUrl);\n                description = tr(\"<strong>OpenAI-Compatible API Model</strong><br>\"\n                                 \"<ul><li>API Key: %1</li>\"\n                                 \"<li>Base URL: %2</li>\"\n                                 \"<li>Model Name: %3</li></ul>\")\n                                    .arg(apiKey, baseUrl, modelName);\n            }\n        }\n\n        QVector<QString> modelsById;\n        {\n            QMutexLocker locker(&m_mutex);\n            for (ModelInfo *info : m_models)\n                if (info->filename() == filename)\n                    modelsById.append(info->id());\n        }\n\n        if (modelsById.isEmpty()) {\n            if (!contains(filename))\n                addModel(filename);\n            modelsById.append(filename);\n        }\n\n        for (const QString &id : modelsById) {\n            QVector<QPair<int, QVariant>> data {\n                { InstalledRole, true },\n                { FilenameRole, filename },\n                { OnlineRole, isOnline },\n                { CompatibleApiRole, isCompatibleApi },\n                { DirpathRole, info.dir().absolutePath() + \"/\" },\n                { FilesizeRole, toFileSize(info.size()) },\n            };\n            if (isCompatibleApi) {\n                // The data will be saved to \"GPT4All.ini\".\n                data.append({ NameRole, name });\n                // The description is hard-coded into \"GPT4All.ini\" due to performance issue.\n                // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList.\n                data.append({ DescriptionRole, description });\n                data.append({ ChatTemplateRole, RMODEL_CHAT_TEMPLATE });\n            }\n            updateData(id, data);\n        }\n    }\n}\n\nvoid ModelList::updateModelsFromDirectory()\n{\n    const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();\n    const QString localPath = MySettings::globalInstance()->modelPath();\n\n    updateOldRemoteModels(exePath);\n    processModelDirectory(exePath);\n    if (localPath != exePath) {\n        updateOldRemoteModels(localPath);\n        processModelDirectory(localPath);\n    }\n}\n\nstatic QString modelsJsonFilename()\n{\n    return QStringLiteral(\"models\" MODELS_JSON_VERSION \".json\");\n}\n\nstatic std::optional<QFile> modelsJsonCacheFile()\n{\n    constexpr auto loc = QStandardPaths::CacheLocation;\n    QString modelsJsonFname = modelsJsonFilename();\n    if (auto path = QStandardPaths::locate(loc, modelsJsonFname); !path.isEmpty())\n        return std::make_optional<QFile>(path);\n    if (auto path = QStandardPaths::writableLocation(loc); !path.isEmpty())\n        return std::make_optional<QFile>(u\"%1/%2\"_s.arg(path, modelsJsonFname));\n    return std::nullopt;\n}\n\nvoid ModelList::updateModelsFromJson()\n{\n    QString modelsJsonFname = modelsJsonFilename();\n\n#if defined(USE_LOCAL_MODELSJSON)\n    QUrl jsonUrl(u\"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2\"_s.arg(QDir::homePath(), modelsJsonFname));\n#else\n    QUrl jsonUrl(u\"http://gpt4all.io/models/%1\"_s.arg(modelsJsonFname));\n#endif\n\n    QNetworkRequest request(jsonUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *jsonReply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort);\n    QEventLoop loop;\n    connect(jsonReply, &QNetworkReply::finished, &loop, &QEventLoop::quit);\n    QTimer::singleShot(1500, &loop, &QEventLoop::quit);\n    loop.exec();\n    if (jsonReply->error() == QNetworkReply::NoError && jsonReply->isFinished()) {\n        QByteArray jsonData = jsonReply->readAll();\n        jsonReply->deleteLater();\n        parseModelsJsonFile(jsonData, true);\n    } else {\n        qWarning() << \"WARNING: Could not download models.json synchronously\";\n        updateModelsFromJsonAsync();\n\n        auto cacheFile = modelsJsonCacheFile();\n        if (!cacheFile) {\n            // no known location\n        } else if (cacheFile->open(QIODeviceBase::ReadOnly)) {\n            QByteArray jsonData = cacheFile->readAll();\n            cacheFile->close();\n            parseModelsJsonFile(jsonData, false);\n        } else if (cacheFile->exists())\n            qWarning() << \"ERROR: Couldn't read models.json cache file: \" << cacheFile->fileName();\n    }\n    delete jsonReply;\n}\n\nvoid ModelList::updateModelsFromJsonAsync()\n{\n    m_asyncModelRequestOngoing = true;\n    emit asyncModelRequestOngoingChanged();\n    QString modelsJsonFname = modelsJsonFilename();\n\n#if defined(USE_LOCAL_MODELSJSON)\n    QUrl jsonUrl(u\"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2\"_s.arg(QDir::homePath(), modelsJsonFname));\n#else\n    QUrl jsonUrl(u\"http://gpt4all.io/models/%1\"_s.arg(modelsJsonFname));\n#endif\n\n    QNetworkRequest request(jsonUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *jsonReply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort);\n    connect(jsonReply, &QNetworkReply::finished, this, &ModelList::handleModelsJsonDownloadFinished);\n    connect(jsonReply, &QNetworkReply::errorOccurred, this, &ModelList::handleModelsJsonDownloadErrorOccurred);\n}\n\nvoid ModelList::handleModelsJsonDownloadFinished()\n{\n    QNetworkReply *jsonReply = qobject_cast<QNetworkReply *>(sender());\n    if (!jsonReply) {\n        m_asyncModelRequestOngoing = false;\n        emit asyncModelRequestOngoingChanged();\n        return;\n    }\n\n    QByteArray jsonData = jsonReply->readAll();\n    jsonReply->deleteLater();\n    parseModelsJsonFile(jsonData, true);\n    m_asyncModelRequestOngoing = false;\n    emit asyncModelRequestOngoingChanged();\n}\n\nvoid ModelList::handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code)\n{\n    // TODO: Show what error occurred in the GUI\n    m_asyncModelRequestOngoing = false;\n    emit asyncModelRequestOngoingChanged();\n\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n\n    qWarning() << u\"ERROR: Modellist download failed with error code \\\"%1-%2\\\"\"_s\n                      .arg(code).arg(reply->errorString());\n}\n\nvoid ModelList::handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors)\n{\n    QUrl url = reply->request().url();\n    for (const auto &e : errors)\n        qWarning() << \"ERROR: Received ssl error:\" << e.errorString() << \"for\" << url;\n}\n\nvoid ModelList::maybeUpdateDataForSettings(const ModelInfo &info, bool fromInfo)\n{\n    // ignore updates that were *because* of a dataChanged - would cause a circular dependency\n    int idx;\n    if (!fromInfo && (idx = indexByModelId(info.id())) != -1) {\n        emit dataChanged(index(idx, 0), index(idx, 0));\n        emit selectableModelListChanged();\n    }\n}\n\nvoid ModelList::updateDataForSettings()\n{\n    emit dataChanged(index(0, 0), index(m_models.size() - 1, 0));\n    emit selectableModelListChanged();\n}\n\nvoid ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)\n{\n    QJsonParseError err;\n    QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);\n    if (err.error != QJsonParseError::NoError) {\n        qWarning() << \"ERROR: Couldn't parse: \" << jsonData << err.errorString();\n        return;\n    }\n\n    if (save) {\n        auto cacheFile = modelsJsonCacheFile();\n        if (!cacheFile) {\n            // no known location\n        } else if (QFileInfo(*cacheFile).dir().mkpath(u\".\"_s) && cacheFile->open(QIODeviceBase::WriteOnly)) {\n            cacheFile->write(jsonData);\n            cacheFile->close();\n        } else\n            qWarning() << \"ERROR: Couldn't write models config file: \" << cacheFile->fileName();\n    }\n\n    QJsonArray jsonArray = document.array();\n    const QString currentVersion = QCoreApplication::applicationVersion();\n\n    for (const QJsonValue &value : jsonArray) {\n        QJsonObject obj = value.toObject();\n\n        QString modelName = obj[\"name\"].toString();\n        QString modelFilename = obj[\"filename\"].toString();\n        QString modelFilesize = obj[\"filesize\"].toString();\n        QString requiresVersion = obj[\"requires\"].toString();\n        QString versionRemoved = obj[\"removedIn\"].toString();\n        QString url = obj[\"url\"].toString();\n        bool isDefault = obj.contains(\"isDefault\") && obj[\"isDefault\"] == u\"true\"_s;\n        bool disableGUI = obj.contains(\"disableGUI\") && obj[\"disableGUI\"] == u\"true\"_s;\n        QString description = obj[\"description\"].toString();\n        QString order = obj[\"order\"].toString();\n        int ramrequired = obj[\"ramrequired\"].toString().toInt();\n        QString parameters = obj[\"parameters\"].toString();\n        QString quant = obj[\"quant\"].toString();\n        QString type = obj[\"type\"].toString();\n        bool isEmbeddingModel = obj[\"embeddingModel\"].toBool();\n\n        QByteArray modelHash;\n        ModelInfo::HashAlgorithm hashAlgorithm;\n        if (auto it = obj.find(\"sha256sum\"_L1); it != obj.end()) {\n            modelHash = it->toString().toLatin1();\n            hashAlgorithm = ModelInfo::Sha256;\n        } else {\n            modelHash = obj[\"md5sum\"].toString().toLatin1();\n            hashAlgorithm = ModelInfo::Md5;\n        }\n\n        // Some models aren't supported in the GUI at all\n        if (disableGUI)\n            continue;\n\n        // If the current version is strictly less than required version, then skip\n        if (!requiresVersion.isEmpty() && Download::compareAppVersions(currentVersion, requiresVersion) < 0)\n            continue;\n\n        // If the version removed is less than or equal to the current version, then skip\n        if (!versionRemoved.isEmpty() && Download::compareAppVersions(versionRemoved, currentVersion) <= 0)\n            continue;\n\n        modelFilesize = ModelList::toFileSize(modelFilesize.toULongLong());\n\n        const QString id = modelName;\n        Q_ASSERT(!id.isEmpty());\n\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n\n        if (!contains(id))\n            addModel(id);\n\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, modelFilesize },\n            { ModelList::HashRole, modelHash },\n            { ModelList::HashAlgorithmRole, hashAlgorithm },\n            { ModelList::DefaultRole, isDefault },\n            { ModelList::DescriptionRole, description },\n            { ModelList::RequiresVersionRole, requiresVersion },\n            { ModelList::VersionRemovedRole, versionRemoved },\n            { ModelList::UrlRole, url },\n            { ModelList::OrderRole, order },\n            { ModelList::RamrequiredRole, ramrequired },\n            { ModelList::ParametersRole, parameters },\n            { ModelList::QuantRole, quant },\n            { ModelList::TypeRole, type },\n            { ModelList::IsEmbeddingModelRole, isEmbeddingModel },\n        };\n        if (obj.contains(\"temperature\"))\n            data.append({ ModelList::TemperatureRole, obj[\"temperature\"].toDouble() });\n        if (obj.contains(\"topP\"))\n            data.append({ ModelList::TopPRole, obj[\"topP\"].toDouble() });\n        if (obj.contains(\"minP\"))\n            data.append({ ModelList::MinPRole, obj[\"minP\"].toDouble() });\n        if (obj.contains(\"topK\"))\n            data.append({ ModelList::TopKRole, obj[\"topK\"].toInt() });\n        if (obj.contains(\"maxLength\"))\n            data.append({ ModelList::MaxLengthRole, obj[\"maxLength\"].toInt() });\n        if (obj.contains(\"promptBatchSize\"))\n            data.append({ ModelList::PromptBatchSizeRole, obj[\"promptBatchSize\"].toInt() });\n        if (obj.contains(\"contextLength\"))\n            data.append({ ModelList::ContextLengthRole, obj[\"contextLength\"].toInt() });\n        if (obj.contains(\"gpuLayers\"))\n            data.append({ ModelList::GpuLayersRole, obj[\"gpuLayers\"].toInt() });\n        if (obj.contains(\"repeatPenalty\"))\n            data.append({ ModelList::RepeatPenaltyRole, obj[\"repeatPenalty\"].toDouble() });\n        if (obj.contains(\"repeatPenaltyTokens\"))\n            data.append({ ModelList::RepeatPenaltyTokensRole, obj[\"repeatPenaltyTokens\"].toInt() });\n        if (auto it = obj.find(\"chatTemplate\"_L1); it != obj.end())\n            data.append({ ModelList::ChatTemplateRole, it->toString() });\n        if (auto it = obj.find(\"systemMessage\"_L1); it != obj.end())\n            data.append({ ModelList::SystemMessageRole, it->toString() });\n        updateData(id, data);\n    }\n\n    const QString chatGPTDesc = tr(\"<ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send\"\n        \" your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used\"\n        \" to communicate with OpenAI</li><li>You can apply for an API key\"\n        \" <a href=\\\"https://platform.openai.com/account/api-keys\\\">here.</a></li>\");\n\n    {\n        const QString modelName = \"ChatGPT-3.5 Turbo\";\n        const QString id = modelName;\n        const QString modelFilename = \"gpt4all-gpt-3.5-turbo.rmodel\";\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br> %1\").arg(chatGPTDesc) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"ca\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"GPT\" },\n            { ModelList::UrlRole, \"https://api.openai.com/v1/chat/completions\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n\n    {\n        const QString chatGPT4Warn = tr(\"<br><br><i>* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.\");\n\n        const QString modelName = \"ChatGPT-4\";\n        const QString id = modelName;\n        const QString modelFilename = \"gpt4all-gpt-4.rmodel\";\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>OpenAI's ChatGPT model GPT-4</strong><br> %1 %2\").arg(chatGPTDesc).arg(chatGPT4Warn) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"cb\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"GPT\" },\n            { ModelList::UrlRole, \"https://api.openai.com/v1/chat/completions\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n\n    const QString mistralDesc = tr(\"<ul><li>Requires personal Mistral API key.</li><li>WARNING: Will send\"\n                                   \" your chats to Mistral!</li><li>Your API key will be stored on disk</li><li>Will only be used\"\n                                   \" to communicate with Mistral</li><li>You can apply for an API key\"\n                                   \" <a href=\\\"https://console.mistral.ai/user/api-keys\\\">here</a>.</li>\");\n\n    {\n        const QString modelName = \"Mistral Tiny API\";\n        const QString id = modelName;\n        const QString modelFilename = \"gpt4all-mistral-tiny.rmodel\";\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>Mistral Tiny model</strong><br> %1\").arg(mistralDesc) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"cc\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"Mistral\" },\n            { ModelList::UrlRole, \"https://api.mistral.ai/v1/chat/completions\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n    {\n        const QString modelName = \"Mistral Small API\";\n        const QString id = modelName;\n        const QString modelFilename = \"gpt4all-mistral-small.rmodel\";\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>Mistral Small model</strong><br> %1\").arg(mistralDesc) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"cd\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"Mistral\" },\n            { ModelList::UrlRole, \"https://api.mistral.ai/v1/chat/completions\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n\n    {\n        const QString modelName = \"Mistral Medium API\";\n        const QString id = modelName;\n        const QString modelFilename = \"gpt4all-mistral-medium.rmodel\";\n        if (contains(modelFilename))\n            changeId(modelFilename, id);\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilenameRole, modelFilename },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>Mistral Medium model</strong><br> %1\").arg(mistralDesc) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"ce\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"Mistral\" },\n            { ModelList::UrlRole, \"https://api.mistral.ai/v1/chat/completions\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n\n    const QString compatibleDesc = tr(\"<ul><li>Requires personal API key and the API base URL.</li>\"\n                                      \"<li>WARNING: Will send your chats to \"\n                                      \"the OpenAI-compatible API Server you specified!</li>\"\n                                      \"<li>Your API key will be stored on disk</li><li>Will only be used\"\n                                      \" to communicate with the OpenAI-compatible API Server</li>\");\n\n    {\n        const QString modelName = \"OpenAI-compatible\";\n        const QString id = modelName;\n        if (!contains(id))\n            addModel(id);\n        QVector<QPair<int, QVariant>> data {\n            { ModelList::NameRole, modelName },\n            { ModelList::FilesizeRole, \"minimal\" },\n            { ModelList::OnlineRole, true },\n            { ModelList::CompatibleApiRole, true },\n            { ModelList::DescriptionRole,\n             tr(\"<strong>Connect to OpenAI-compatible API server</strong><br> %1\").arg(compatibleDesc) },\n            { ModelList::RequiresVersionRole, \"2.7.4\" },\n            { ModelList::OrderRole, \"cf\" },\n            { ModelList::RamrequiredRole, 0 },\n            { ModelList::ParametersRole, \"?\" },\n            { ModelList::QuantRole, \"NA\" },\n            { ModelList::TypeRole, \"NA\" },\n            { ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },\n        };\n        updateData(id, data);\n    }\n}\n\nvoid ModelList::updateDiscoveredInstalled(const ModelInfo &info)\n{\n    QVector<QPair<int, QVariant>> data {\n        { ModelList::InstalledRole, true },\n        { ModelList::IsDiscoveredRole, true },\n        { ModelList::NameRole, info.name() },\n        { ModelList::FilenameRole, info.filename() },\n        { ModelList::DescriptionRole, info.description() },\n        { ModelList::UrlRole, info.url() },\n        { ModelList::LikesRole, info.likes() },\n        { ModelList::DownloadsRole, info.downloads() },\n        { ModelList::RecencyRole, info.recency() },\n        { ModelList::QuantRole, info.quant() },\n        { ModelList::TypeRole, info.type() },\n    };\n    updateData(info.id(), data);\n}\n\n// FIXME(jared): This should only contain fields without reasonable defaults such as name, description, and URL.\n//               For other settings, there is no authoritative value and we should load the setting lazily like we do\n//               for any other override.\nvoid ModelList::updateModelsFromSettings()\n{\n    QSettings settings;\n    QStringList groups = settings.childGroups();\n    for (const QString &g: groups) {\n        if (!g.startsWith(\"model-\"))\n            continue;\n\n        const QString id = g.sliced(6);\n        if (contains(id))\n            continue;\n\n        // If we can't find the corresponding file, then ignore it as this reflects a stale model.\n        // The file could have been deleted manually by the user for instance or temporarily renamed.\n        QString filename;\n        {\n            auto value = settings.value(u\"%1/filename\"_s.arg(g));\n            if (!value.isValid() || !modelExists(filename = value.toString()))\n                continue;\n        }\n\n        QVector<QPair<int, QVariant>> data;\n\n        // load data from base model\n        // FIXME(jared): how does \"Restore Defaults\" work for other settings of clones which we don't do this for?\n        if (auto base = modelInfoByFilename(filename, /*allowClone*/ false); !base.id().isNull()) {\n            if (auto tmpl = base.m_chatTemplate)\n                data.append({ ModelList::ChatTemplateRole,  *tmpl });\n            if (auto msg  = base.m_systemMessage; !msg.isNull())\n                data.append({ ModelList::SystemMessageRole,  msg  });\n        }\n\n        addModel(id);\n\n        // load data from settings\n        if (settings.contains(g + \"/name\")) {\n            const QString name = settings.value(g + \"/name\").toString();\n            data.append({ ModelList::NameRole, name });\n        }\n        if (settings.contains(g + \"/filename\")) {\n            const QString filename = settings.value(g + \"/filename\").toString();\n            data.append({ ModelList::FilenameRole, filename });\n        }\n        if (settings.contains(g + \"/description\")) {\n            const QString d = settings.value(g + \"/description\").toString();\n            data.append({ ModelList::DescriptionRole, d });\n        }\n        if (settings.contains(g + \"/url\")) {\n            const QString u = settings.value(g + \"/url\").toString();\n            data.append({ ModelList::UrlRole, u });\n        }\n        if (settings.contains(g + \"/quant\")) {\n            const QString q = settings.value(g + \"/quant\").toString();\n            data.append({ ModelList::QuantRole, q });\n        }\n        if (settings.contains(g + \"/type\")) {\n            const QString t = settings.value(g + \"/type\").toString();\n            data.append({ ModelList::TypeRole, t });\n        }\n        if (settings.contains(g + \"/isClone\")) {\n            const bool b = settings.value(g + \"/isClone\").toBool();\n            data.append({ ModelList::IsCloneRole, b });\n        }\n        if (settings.contains(g + \"/isDiscovered\")) {\n            const bool b = settings.value(g + \"/isDiscovered\").toBool();\n            data.append({ ModelList::IsDiscoveredRole, b });\n        }\n        if (settings.contains(g + \"/likes\")) {\n            const int l = settings.value(g + \"/likes\").toInt();\n            data.append({ ModelList::LikesRole, l });\n        }\n        if (settings.contains(g + \"/downloads\")) {\n            const int d = settings.value(g + \"/downloads\").toInt();\n            data.append({ ModelList::DownloadsRole, d });\n        }\n        if (settings.contains(g + \"/recency\")) {\n            const QDateTime r = settings.value(g + \"/recency\").toDateTime();\n            data.append({ ModelList::RecencyRole, r });\n        }\n        if (settings.contains(g + \"/temperature\")) {\n            const double temperature = settings.value(g + \"/temperature\").toDouble();\n            data.append({ ModelList::TemperatureRole, temperature });\n        }\n        if (settings.contains(g + \"/topP\")) {\n            const double topP = settings.value(g + \"/topP\").toDouble();\n            data.append({ ModelList::TopPRole, topP });\n        }\n        if (settings.contains(g + \"/minP\")) {\n            const double minP = settings.value(g + \"/minP\").toDouble();\n            data.append({ ModelList::MinPRole, minP });\n        }\n        if (settings.contains(g + \"/topK\")) {\n            const int topK = settings.value(g + \"/topK\").toInt();\n            data.append({ ModelList::TopKRole, topK });\n        }\n        if (settings.contains(g + \"/maxLength\")) {\n            const int maxLength = settings.value(g + \"/maxLength\").toInt();\n            data.append({ ModelList::MaxLengthRole, maxLength });\n        }\n        if (settings.contains(g + \"/promptBatchSize\")) {\n            const int promptBatchSize = settings.value(g + \"/promptBatchSize\").toInt();\n            data.append({ ModelList::PromptBatchSizeRole, promptBatchSize });\n        }\n        if (settings.contains(g + \"/contextLength\")) {\n            const int contextLength = settings.value(g + \"/contextLength\").toInt();\n            data.append({ ModelList::ContextLengthRole, contextLength });\n        }\n        if (settings.contains(g + \"/gpuLayers\")) {\n            const int gpuLayers = settings.value(g + \"/gpuLayers\").toInt();\n            data.append({ ModelList::GpuLayersRole, gpuLayers });\n        }\n        if (settings.contains(g + \"/repeatPenalty\")) {\n            const double repeatPenalty = settings.value(g + \"/repeatPenalty\").toDouble();\n            data.append({ ModelList::RepeatPenaltyRole, repeatPenalty });\n        }\n        if (settings.contains(g + \"/repeatPenaltyTokens\")) {\n            const int repeatPenaltyTokens = settings.value(g + \"/repeatPenaltyTokens\").toInt();\n            data.append({ ModelList::RepeatPenaltyTokensRole, repeatPenaltyTokens });\n        }\n        if (settings.contains(g + \"/chatNamePrompt\")) {\n            const QString chatNamePrompt = settings.value(g + \"/chatNamePrompt\").toString();\n            data.append({ ModelList::ChatNamePromptRole, chatNamePrompt });\n        }\n        if (settings.contains(g + \"/suggestedFollowUpPrompt\")) {\n            const QString suggestedFollowUpPrompt = settings.value(g + \"/suggestedFollowUpPrompt\").toString();\n            data.append({ ModelList::SuggestedFollowUpPromptRole, suggestedFollowUpPrompt });\n        }\n        updateData(id, data);\n    }\n}\n\nint ModelList::discoverLimit() const\n{\n    return m_discoverLimit;\n}\n\nvoid ModelList::setDiscoverLimit(int limit)\n{\n    if (m_discoverLimit == limit)\n        return;\n    m_discoverLimit = limit;\n    emit discoverLimitChanged();\n}\n\nint ModelList::discoverSortDirection() const\n{\n    return m_discoverSortDirection;\n}\n\nvoid ModelList::setDiscoverSortDirection(int direction)\n{\n    if (m_discoverSortDirection == direction || (direction != 1 && direction != -1))\n        return;\n    m_discoverSortDirection = direction;\n    emit discoverSortDirectionChanged();\n    resortModel();\n}\n\nModelList::DiscoverSort ModelList::discoverSort() const\n{\n    return m_discoverSort;\n}\n\nvoid ModelList::setDiscoverSort(DiscoverSort sort)\n{\n    if (m_discoverSort == sort)\n        return;\n    m_discoverSort = sort;\n    emit discoverSortChanged();\n    resortModel();\n}\n\nvoid ModelList::clearDiscoveredModels()\n{\n    // NOTE: This could be made much more efficient\n    QList<ModelInfo> infos;\n    {\n        QMutexLocker locker(&m_mutex);\n        for (ModelInfo *info : m_models)\n            if (info->isDiscovered() && !info->installed)\n                infos.append(*info);\n    }\n    for (ModelInfo &info : infos)\n        removeInternal(info);\n}\n\nfloat ModelList::discoverProgress() const\n{\n    if (!m_discoverNumberOfResults)\n        return 0.0f;\n    return m_discoverResultsCompleted / float(m_discoverNumberOfResults);\n}\n\nbool ModelList::discoverInProgress() const\n{\n    return m_discoverInProgress;\n}\n\nvoid ModelList::discoverSearch(const QString &search)\n{\n    Q_ASSERT(!m_discoverInProgress);\n\n    clearDiscoveredModels();\n\n    m_discoverNumberOfResults = 0;\n    m_discoverResultsCompleted = 0;\n    emit discoverProgressChanged();\n\n    if (search.isEmpty()) {\n        return;\n    }\n\n    m_discoverInProgress = true;\n    emit discoverInProgressChanged();\n\n    static const QRegularExpression wsRegex(\"\\\\s+\");\n    QStringList searchParams = search.split(wsRegex); // split by whitespace\n    QString searchString = u\"search=%1&\"_s.arg(searchParams.join('+'));\n    QString limitString = m_discoverLimit > 0 ? u\"limit=%1&\"_s.arg(m_discoverLimit) : QString();\n\n    QString sortString;\n    switch (m_discoverSort) {\n    case Default: break;\n    case Likes:\n        sortString = \"sort=likes&\"; break;\n    case Downloads:\n        sortString = \"sort=downloads&\"; break;\n    case Recent:\n        sortString = \"sort=lastModified&\"; break;\n    }\n\n    QString directionString = !sortString.isEmpty() ? u\"direction=%1&\"_s.arg(m_discoverSortDirection) : QString();\n\n    QUrl hfUrl(u\"https://huggingface.co/api/models?filter=gguf&%1%2%3%4full=true&config=true\"_s\n               .arg(searchString, limitString, sortString, directionString));\n\n    QNetworkRequest request(hfUrl);\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n    QNetworkReply *reply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n    connect(reply, &QNetworkReply::finished, this, &ModelList::handleDiscoveryFinished);\n    connect(reply, &QNetworkReply::errorOccurred, this, &ModelList::handleDiscoveryErrorOccurred);\n}\n\nvoid ModelList::handleDiscoveryFinished()\n{\n    QNetworkReply *jsonReply = qobject_cast<QNetworkReply *>(sender());\n    if (!jsonReply)\n        return;\n\n    QByteArray jsonData = jsonReply->readAll();\n    parseDiscoveryJsonFile(jsonData);\n    jsonReply->deleteLater();\n}\n\nvoid ModelList::handleDiscoveryErrorOccurred(QNetworkReply::NetworkError code)\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n    qWarning() << u\"ERROR: Discovery failed with error code \\\"%1-%2\\\"\"_s\n                      .arg(code).arg(reply->errorString()).toStdString();\n}\n\nenum QuantType {\n    Q4_0 = 0,\n    Q4_1,\n    F16,\n    F32,\n    Unknown\n};\n\nQuantType toQuantType(const QString& filename)\n{\n    QString lowerCaseFilename = filename.toLower();\n    if (lowerCaseFilename.contains(\"q4_0\")) return Q4_0;\n    if (lowerCaseFilename.contains(\"q4_1\")) return Q4_1;\n    if (lowerCaseFilename.contains(\"f16\")) return F16;\n    if (lowerCaseFilename.contains(\"f32\")) return F32;\n    return Unknown;\n}\n\nQString toQuantString(const QString& filename)\n{\n    QString lowerCaseFilename = filename.toLower();\n    if (lowerCaseFilename.contains(\"q4_0\")) return \"q4_0\";\n    if (lowerCaseFilename.contains(\"q4_1\")) return \"q4_1\";\n    if (lowerCaseFilename.contains(\"f16\")) return \"f16\";\n    if (lowerCaseFilename.contains(\"f32\")) return \"f32\";\n    return QString();\n}\n\nvoid ModelList::parseDiscoveryJsonFile(const QByteArray &jsonData)\n{\n    QJsonParseError err;\n    QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);\n    if (err.error != QJsonParseError::NoError) {\n        qWarning() << \"ERROR: Couldn't parse: \" << jsonData << err.errorString();\n        m_discoverNumberOfResults = 0;\n        m_discoverResultsCompleted = 0;\n        emit discoverProgressChanged();\n        m_discoverInProgress = false;\n        emit discoverInProgressChanged();\n        return;\n    }\n\n    QJsonArray jsonArray = document.array();\n\n    for (const QJsonValue &value : jsonArray) {\n        QJsonObject obj = value.toObject();\n        QJsonDocument jsonDocument(obj);\n        QByteArray jsonData = jsonDocument.toJson();\n\n        QString repo_id = obj[\"id\"].toString();\n        QJsonArray siblingsArray = obj[\"siblings\"].toArray();\n        QList<QPair<QuantType, QString>> filteredAndSortedFilenames;\n        for (const QJsonValue &sibling : siblingsArray) {\n\n            QJsonObject s = sibling.toObject();\n            QString filename = s[\"rfilename\"].toString();\n            if (!filename.endsWith(\"gguf\"))\n                continue;\n\n            QuantType quant = toQuantType(filename);\n            if (quant != Unknown)\n                filteredAndSortedFilenames.append({ quant, filename });\n        }\n\n        if (filteredAndSortedFilenames.isEmpty())\n            continue;\n\n        std::sort(filteredAndSortedFilenames.begin(), filteredAndSortedFilenames.end(),\n            [](const QPair<QuantType, QString>& a, const QPair<QuantType, QString>& b) {\n            return a.first < b.first;\n        });\n\n        QPair<QuantType, QString> file = filteredAndSortedFilenames.first();\n        QString filename = file.second;\n        ++m_discoverNumberOfResults;\n\n        QUrl url(u\"https://huggingface.co/%1/resolve/main/%2\"_s.arg(repo_id, filename));\n        QNetworkRequest request(url);\n        request.setRawHeader(\"Accept-Encoding\", \"identity\");\n        request.setAttribute(QNetworkRequest::RedirectPolicyAttribute, QNetworkRequest::ManualRedirectPolicy);\n        request.setAttribute(QNetworkRequest::User, jsonData);\n        request.setAttribute(QNetworkRequest::UserMax, filename);\n        QNetworkReply *reply = m_networkManager.head(request);\n        connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n        connect(reply, &QNetworkReply::finished, this, &ModelList::handleDiscoveryItemFinished);\n        connect(reply, &QNetworkReply::errorOccurred, this, &ModelList::handleDiscoveryItemErrorOccurred);\n    }\n\n    emit discoverProgressChanged();\n    if (!m_discoverNumberOfResults) {\n        m_discoverInProgress = false;\n        emit discoverInProgressChanged();\n    }\n}\n\nvoid ModelList::handleDiscoveryItemFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n\n    QVariant replyCustomData = reply->request().attribute(QNetworkRequest::User);\n    QByteArray customDataByteArray = replyCustomData.toByteArray();\n    QJsonDocument customJsonDocument = QJsonDocument::fromJson(customDataByteArray);\n    QJsonObject obj = customJsonDocument.object();\n\n    QString repo_id = obj[\"id\"].toString();\n    QString modelName = obj[\"modelId\"].toString();\n    QString author = obj[\"author\"].toString();\n    QDateTime lastModified = QDateTime::fromString(obj[\"lastModified\"].toString(), Qt::ISODateWithMs);\n    int likes = obj[\"likes\"].toInt();\n    int downloads = obj[\"downloads\"].toInt();\n    QJsonObject config = obj[\"config\"].toObject();\n    QString type = config[\"model_type\"].toString();\n\n    // QByteArray repoCommitHeader = reply->rawHeader(\"X-Repo-Commit\");\n    QByteArray linkedSizeHeader = reply->rawHeader(\"X-Linked-Size\");\n    QByteArray linkedEtagHeader = reply->rawHeader(\"X-Linked-Etag\");\n    // For some reason these seem to contain quotation marks ewww\n    linkedEtagHeader.replace(\"\\\"\", \"\");\n    linkedEtagHeader.replace(\"\\'\", \"\");\n    // QString locationHeader = reply->header(QNetworkRequest::LocationHeader).toString();\n\n    QString modelFilename = reply->request().attribute(QNetworkRequest::UserMax).toString();\n    QString modelFilesize = ModelList::toFileSize(QString(linkedSizeHeader).toULongLong());\n\n    QString description = tr(\"<strong>Created by %1.</strong><br><ul>\"\n                             \"<li>Published on %2.\"\n                             \"<li>This model has %3 likes.\"\n                             \"<li>This model has %4 downloads.\"\n                             \"<li>More info can be found <a href=\\\"https://huggingface.co/%5\\\">here.</a></ul>\")\n                              .arg(author)\n                              .arg(lastModified.toString(\"ddd MMMM d, yyyy\"))\n                              .arg(likes)\n                              .arg(downloads)\n                              .arg(repo_id);\n\n    const QString id = modelFilename;\n    Q_ASSERT(!id.isEmpty());\n\n    if (contains(modelFilename))\n        changeId(modelFilename, id);\n\n    if (!contains(id))\n        addModel(id);\n\n    QVector<QPair<int, QVariant>> data {\n        { ModelList::NameRole, modelName },\n        { ModelList::FilenameRole, modelFilename },\n        { ModelList::FilesizeRole, modelFilesize },\n        { ModelList::DescriptionRole, description },\n        { ModelList::IsDiscoveredRole, true },\n        { ModelList::UrlRole, reply->request().url() },\n        { ModelList::LikesRole, likes },\n        { ModelList::DownloadsRole, downloads },\n        { ModelList::RecencyRole, lastModified },\n        { ModelList::QuantRole, toQuantString(modelFilename) },\n        { ModelList::TypeRole, type },\n        { ModelList::HashRole, linkedEtagHeader },\n        { ModelList::HashAlgorithmRole, ModelInfo::Sha256 },\n    };\n    updateData(id, data);\n\n    ++m_discoverResultsCompleted;\n    emit discoverProgressChanged();\n\n    if (discoverProgress() >= 1.0) {\n        m_discoverInProgress = false;\n        emit discoverInProgressChanged();\n    }\n\n    reply->deleteLater();\n}\n\nvoid ModelList::handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError code)\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n\n    qWarning() << u\"ERROR: Discovery item failed with error code \\\"%1-%2\\\"\"_s\n                      .arg(code).arg(reply->errorString()).toStdString();\n}\n\nQStringList ModelList::remoteModelList(const QString &apiKey, const QUrl &baseUrl)\n{\n    QStringList modelList;\n\n    // Create the request\n    QNetworkRequest request;\n    request.setUrl(baseUrl.resolved(QUrl(\"models\")));\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n\n    // Add the Authorization header\n    const QString bearerToken = QString(\"Bearer %1\").arg(apiKey);\n    request.setRawHeader(\"Authorization\", bearerToken.toUtf8());\n\n    // Make the GET request\n    QNetworkReply *reply = m_networkManager.get(request);\n\n    // We use a local event loop to wait for the request to complete\n    QEventLoop loop;\n    connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);\n    loop.exec();\n\n    // Check for errors\n    if (reply->error() == QNetworkReply::NoError) {\n        // Parse the JSON response\n        const QByteArray responseData = reply->readAll();\n        const QJsonDocument jsonDoc = QJsonDocument::fromJson(responseData);\n\n        if (!jsonDoc.isNull() && jsonDoc.isObject()) {\n            QJsonObject rootObj = jsonDoc.object();\n            QJsonValue dataValue = rootObj.value(\"data\");\n\n            if (dataValue.isArray()) {\n                QJsonArray dataArray = dataValue.toArray();\n                for (const QJsonValue &val : dataArray) {\n                    if (val.isObject()) {\n                        QJsonObject obj = val.toObject();\n                        const QString modelId = obj.value(\"id\").toString();\n                        modelList.append(modelId);\n                    }\n                }\n            }\n        }\n    } else {\n        // Handle network error (e.g. print it to qDebug)\n        qWarning() << \"Error retrieving models:\" << reply->errorString();\n    }\n\n    // Clean up\n    reply->deleteLater();\n\n    return modelList;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/modellist.h",
    "content": "#ifndef MODELLIST_H\n#define MODELLIST_H\n\n#include <QAbstractListModel>\n#include <QByteArray>\n#include <QDateTime>\n#include <QHash>\n#include <QLatin1StringView> // IWYU pragma: keep\n#include <QList>\n#include <QMutex>\n#include <QNetworkAccessManager>\n#include <QNetworkReply>\n#include <QObject>\n#include <QPair> // IWYU pragma: keep\n#include <QQmlEngine> // IWYU pragma: keep\n#include <QSortFilterProxyModel>\n#include <QSslError>\n#include <QString>\n#include <QVariant>\n#include <QVector> // IWYU pragma: keep\n#include <Qt>\n#include <QtTypes>\n\n#include <optional>\n#include <utility>\n\n// IWYU pragma: no_forward_declare QObject\n// IWYU pragma: no_forward_declare QSslError\nclass QUrl;\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nclass UpgradeableSetting {\n    Q_GADGET\n    QML_ANONYMOUS\n\n    // NOTE: Unset implies there is neither a value nor a default\n    enum class State { Unset, Legacy, Modern };\n\n    Q_PROPERTY(bool     isSet     READ isSet   )\n    Q_PROPERTY(bool     isLegacy  READ isLegacy)\n    Q_PROPERTY(bool     isModern  READ isModern)\n    Q_PROPERTY(QVariant value READ value) // string or null\n\npublic:\n    struct legacy_tag_t { explicit legacy_tag_t() = default; };\n    static inline constexpr legacy_tag_t legacy_tag = legacy_tag_t();\n\n    UpgradeableSetting()                           : m_state(State::Unset ) {}\n    UpgradeableSetting(legacy_tag_t, QString value): m_state(State::Legacy), m_value(std::move(value)) {}\n    UpgradeableSetting(              QString value): m_state(State::Modern), m_value(std::move(value)) {}\n\n    bool     isSet   () const { return m_state != State::Unset;  }\n    bool     isLegacy() const { return m_state == State::Legacy; }\n    bool     isModern() const { return m_state == State::Modern; }\n    QVariant value   () const { return m_state == State::Unset ? QVariant::fromValue(nullptr) : m_value; }\n\n    friend bool operator==(const UpgradeableSetting &a, const UpgradeableSetting &b)\n    { return a.m_state == b.m_state && (a.m_state == State::Unset || a.m_value == b.m_value); }\n\n    // returns std::nullopt if there is a legacy template or it is not set\n    std::optional<QString> asModern() const\n    {\n        if (m_state == State::Modern)\n            return m_value;\n        return std::nullopt;\n    }\n\nprivate:\n    State   m_state;\n    QString m_value;\n};\n\nstruct ModelInfo {\n    Q_GADGET\n    Q_PROPERTY(QString id READ id WRITE setId)\n    Q_PROPERTY(QString name READ name WRITE setName)\n    Q_PROPERTY(QString filename READ filename WRITE setFilename)\n    Q_PROPERTY(QString dirpath MEMBER dirpath)\n    Q_PROPERTY(QString filesize MEMBER filesize)\n    Q_PROPERTY(QByteArray hash MEMBER hash)\n    Q_PROPERTY(HashAlgorithm hashAlgorithm MEMBER hashAlgorithm)\n    Q_PROPERTY(bool calcHash MEMBER calcHash)\n    Q_PROPERTY(bool installed MEMBER installed)\n    Q_PROPERTY(bool isDefault MEMBER isDefault)\n    Q_PROPERTY(bool isOnline MEMBER isOnline)\n    Q_PROPERTY(bool isCompatibleApi MEMBER isCompatibleApi)\n    Q_PROPERTY(QString description READ description WRITE setDescription)\n    Q_PROPERTY(QString requiresVersion MEMBER requiresVersion)\n    Q_PROPERTY(QString versionRemoved MEMBER versionRemoved)\n    Q_PROPERTY(QString url READ url WRITE setUrl)\n    Q_PROPERTY(qint64 bytesReceived MEMBER bytesReceived)\n    Q_PROPERTY(qint64 bytesTotal MEMBER bytesTotal)\n    Q_PROPERTY(qint64 timestamp MEMBER timestamp)\n    Q_PROPERTY(QString speed MEMBER speed)\n    Q_PROPERTY(bool isDownloading MEMBER isDownloading)\n    Q_PROPERTY(bool isIncomplete MEMBER isIncomplete)\n    Q_PROPERTY(QString downloadError MEMBER downloadError)\n    Q_PROPERTY(QString order MEMBER order)\n    Q_PROPERTY(int ramrequired MEMBER ramrequired)\n    Q_PROPERTY(QString parameters MEMBER parameters)\n    Q_PROPERTY(QString quant READ quant WRITE setQuant)\n    Q_PROPERTY(QString type READ type WRITE setType)\n    Q_PROPERTY(bool isClone READ isClone WRITE setIsClone)\n    Q_PROPERTY(bool isDiscovered READ isDiscovered WRITE setIsDiscovered)\n    Q_PROPERTY(bool isEmbeddingModel MEMBER isEmbeddingModel)\n    Q_PROPERTY(double temperature READ temperature WRITE setTemperature)\n    Q_PROPERTY(double topP READ topP WRITE setTopP)\n    Q_PROPERTY(double minP READ minP WRITE setMinP)\n    Q_PROPERTY(int topK READ topK WRITE setTopK)\n    Q_PROPERTY(int maxLength READ maxLength WRITE setMaxLength)\n    Q_PROPERTY(int promptBatchSize READ promptBatchSize WRITE setPromptBatchSize)\n    Q_PROPERTY(int contextLength READ contextLength WRITE setContextLength)\n    Q_PROPERTY(int maxContextLength READ maxContextLength)\n    Q_PROPERTY(int gpuLayers READ gpuLayers WRITE setGpuLayers)\n    Q_PROPERTY(int maxGpuLayers READ maxGpuLayers)\n    Q_PROPERTY(double repeatPenalty READ repeatPenalty WRITE setRepeatPenalty)\n    Q_PROPERTY(int repeatPenaltyTokens READ repeatPenaltyTokens WRITE setRepeatPenaltyTokens)\n    // user-defined chat template and system message must be written through settings because of their legacy compat\n    Q_PROPERTY(QVariant           defaultChatTemplate  READ defaultChatTemplate )\n    Q_PROPERTY(UpgradeableSetting chatTemplate         READ chatTemplate        )\n    Q_PROPERTY(QString            defaultSystemMessage READ defaultSystemMessage)\n    Q_PROPERTY(UpgradeableSetting systemMessage        READ systemMessage       )\n    Q_PROPERTY(QString chatNamePrompt READ chatNamePrompt WRITE setChatNamePrompt)\n    Q_PROPERTY(QString suggestedFollowUpPrompt READ suggestedFollowUpPrompt WRITE setSuggestedFollowUpPrompt)\n    Q_PROPERTY(int likes READ likes WRITE setLikes)\n    Q_PROPERTY(int downloads READ downloads WRITE setDownloads)\n    Q_PROPERTY(QDateTime recency READ recency WRITE setRecency)\n\npublic:\n    enum HashAlgorithm {\n        Md5,\n        Sha256\n    };\n\n    QString id() const;\n    void setId(const QString &id);\n\n    QString name() const;\n    void setName(const QString &name);\n\n    QString filename() const;\n    void setFilename(const QString &name);\n\n    QString description() const;\n    void setDescription(const QString &d);\n\n    QString url() const;\n    void setUrl(const QString &u);\n\n    QString quant() const;\n    void setQuant(const QString &q);\n\n    QString type() const;\n    void setType(const QString &t);\n\n    bool isClone() const;\n    void setIsClone(bool b);\n\n    bool isDiscovered() const;\n    void setIsDiscovered(bool b);\n\n    int likes() const;\n    void setLikes(int l);\n\n    int downloads() const;\n    void setDownloads(int d);\n\n    QDateTime recency() const;\n    void setRecency(const QDateTime &r);\n\n    QString dirpath;\n    QString filesize;\n    QByteArray hash;\n    HashAlgorithm hashAlgorithm;\n    bool calcHash = false;\n    bool installed = false;\n    bool isDefault = false;\n    // Differences between 'isOnline' and 'isCompatibleApi' in ModelInfo:\n    // 'isOnline':\n    // - Indicates whether this is a online model.\n    // - Linked with the ModelList, fetching info from it.\n    bool isOnline = false;\n    // 'isCompatibleApi':\n    // - Indicates whether the model is using the OpenAI-compatible API which user custom.\n    // - When the property is true, 'isOnline' should also be true.\n    // - Does not link to the ModelList directly; instead, fetches info from the *-capi.rmodel file and works standalone.\n    // - Still needs to copy data from gpt4all.ini and *-capi.rmodel to the ModelList in memory while application getting started(as custom .gguf models do).\n    bool isCompatibleApi = false;\n    QString requiresVersion;\n    QString versionRemoved;\n    qint64 bytesReceived = 0;\n    qint64 bytesTotal = 0;\n    qint64 timestamp = 0;\n    QString speed;\n    bool isDownloading = false;\n    bool isIncomplete = false;\n    QString downloadError;\n    QString order;\n    int ramrequired = -1;\n    QString parameters;\n    bool isEmbeddingModel = false;\n    bool checkedEmbeddingModel = false;\n\n    bool operator==(const ModelInfo &other) const {\n        return  m_id == other.m_id;\n    }\n\n    double temperature() const;\n    void setTemperature(double t);\n    double topP() const;\n    void setTopP(double p);\n    double minP() const;\n    void setMinP(double p);\n    int topK() const;\n    void setTopK(int k);\n    int maxLength() const;\n    void setMaxLength(int l);\n    int promptBatchSize() const;\n    void setPromptBatchSize(int s);\n    int contextLength() const;\n    void setContextLength(int l);\n    int maxContextLength() const;\n    int gpuLayers() const;\n    void setGpuLayers(int l);\n    int maxGpuLayers() const;\n    double repeatPenalty() const;\n    void setRepeatPenalty(double p);\n    int repeatPenaltyTokens() const;\n    void setRepeatPenaltyTokens(int t);\n    QVariant defaultChatTemplate() const;\n    UpgradeableSetting chatTemplate() const;\n    QString defaultSystemMessage() const;\n    UpgradeableSetting systemMessage() const;\n    QString chatNamePrompt() const;\n    void setChatNamePrompt(const QString &p);\n    QString suggestedFollowUpPrompt() const;\n    void setSuggestedFollowUpPrompt(const QString &p);\n\n    // Some metadata must be saved to settings because it does not have a meaningful default from some other source.\n    // This is useful for fields such as name, description, and URL.\n    // It is true for any models that have not been installed from models.json.\n    bool shouldSaveMetadata() const;\n\nprivate:\n    QVariant getField(QLatin1StringView name) const;\n\n    QString m_id;\n    QString m_name;\n    QString m_filename;\n    QString m_description;\n    QString m_url;\n    QString m_quant;\n    QString m_type;\n    bool    m_isClone                 = false;\n    bool    m_isDiscovered            = false;\n    int     m_likes                   = -1;\n    int     m_downloads               = -1;\n    QDateTime m_recency;\n    double  m_temperature             = 0.7;\n    double  m_topP                    = 0.4;\n    double  m_minP                    = 0.0;\n    int     m_topK                    = 40;\n    int     m_maxLength               = 4096;\n    int     m_promptBatchSize         = 128;\n    int     m_contextLength           = 2048;\n    mutable int m_maxContextLength    = -1;\n    int     m_gpuLayers               = 100;\n    mutable int m_maxGpuLayers        = -1;\n    double  m_repeatPenalty           = 1.18;\n    int     m_repeatPenaltyTokens     = 64;\n            std::optional<QString> m_chatTemplate;\n    mutable std::optional<QString> m_modelChatTemplate;\n    QString m_systemMessage;\n    QString m_chatNamePrompt          = \"Describe the above conversation. Your entire response must be three words or less.\";\n    QString m_suggestedFollowUpPrompt = \"Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts.\";\n    friend class MySettings;\n    friend class ModelList;\n};\nQ_DECLARE_METATYPE(ModelInfo)\n\nclass InstalledModels : public QSortFilterProxyModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\npublic:\n    explicit InstalledModels(QObject *parent, bool selectable = false);\n    int count() const { return rowCount(); }\n\nQ_SIGNALS:\n    void countChanged();\n\nprotected:\n    bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;\n\nprivate:\n    bool m_selectable;\n};\n\nclass GPT4AllDownloadableModels : public QSortFilterProxyModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\npublic:\n    explicit GPT4AllDownloadableModels(QObject *parent);\n    int count() const;\n\n    Q_INVOKABLE void filter(const QVector<QString> &keywords);\n\nQ_SIGNALS:\n    void countChanged();\n\nprotected:\n    bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;\n\nprivate:\n    QVector<QString> m_keywords;\n};\n\nclass HuggingFaceDownloadableModels : public QSortFilterProxyModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\npublic:\n    explicit HuggingFaceDownloadableModels(QObject *parent);\n    int count() const;\n\n    Q_INVOKABLE void discoverAndFilter(const QString &discover);\n\nQ_SIGNALS:\n    void countChanged();\n\nprotected:\n    bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;\n\nprivate:\n    int m_limit;\n    QString m_discoverFilter;\n};\n\nclass ModelList : public QAbstractListModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n    Q_PROPERTY(InstalledModels* installedModels READ installedModels NOTIFY installedModelsChanged)\n    Q_PROPERTY(InstalledModels* selectableModels READ selectableModels NOTIFY selectableModelsChanged)\n    Q_PROPERTY(GPT4AllDownloadableModels* gpt4AllDownloadableModels READ gpt4AllDownloadableModels CONSTANT)\n    Q_PROPERTY(HuggingFaceDownloadableModels* huggingFaceDownloadableModels READ huggingFaceDownloadableModels CONSTANT)\n    Q_PROPERTY(QList<ModelInfo> selectableModelList READ selectableModelList NOTIFY  selectableModelListChanged)\n    Q_PROPERTY(bool asyncModelRequestOngoing READ asyncModelRequestOngoing NOTIFY asyncModelRequestOngoingChanged)\n    Q_PROPERTY(int discoverLimit READ discoverLimit WRITE setDiscoverLimit NOTIFY discoverLimitChanged)\n    Q_PROPERTY(int discoverSortDirection READ discoverSortDirection WRITE setDiscoverSortDirection NOTIFY discoverSortDirectionChanged)\n    Q_PROPERTY(DiscoverSort discoverSort READ discoverSort WRITE setDiscoverSort NOTIFY discoverSortChanged)\n    Q_PROPERTY(float discoverProgress READ discoverProgress NOTIFY discoverProgressChanged)\n    Q_PROPERTY(bool discoverInProgress READ discoverInProgress NOTIFY discoverInProgressChanged)\n\npublic:\n    static ModelList *globalInstance();\n\n    static QString compatibleModelNameHash(QUrl baseUrl, QString modelName);\n    static QString compatibleModelFilename(QUrl baseUrl, QString modelName);\n\n    enum DiscoverSort {\n        Default,\n        Likes,\n        Downloads,\n        Recent\n    };\n\n    enum Roles {\n        IdRole = Qt::UserRole + 1,\n        NameRole,\n        FilenameRole,\n        DirpathRole,\n        FilesizeRole,\n        HashRole,\n        HashAlgorithmRole,\n        CalcHashRole,\n        InstalledRole,\n        DefaultRole,\n        OnlineRole,\n        CompatibleApiRole,\n        DescriptionRole,\n        RequiresVersionRole,\n        VersionRemovedRole,\n        UrlRole,\n        BytesReceivedRole,\n        BytesTotalRole,\n        TimestampRole,\n        SpeedRole,\n        DownloadingRole,\n        IncompleteRole,\n        DownloadErrorRole,\n        OrderRole,\n        RamrequiredRole,\n        ParametersRole,\n        QuantRole,\n        TypeRole,\n        IsCloneRole,\n        IsDiscoveredRole,\n        IsEmbeddingModelRole,\n        TemperatureRole,\n        TopPRole,\n        TopKRole,\n        MaxLengthRole,\n        PromptBatchSizeRole,\n        ContextLengthRole,\n        GpuLayersRole,\n        RepeatPenaltyRole,\n        RepeatPenaltyTokensRole,\n        ChatTemplateRole,\n        SystemMessageRole,\n        ChatNamePromptRole,\n        SuggestedFollowUpPromptRole,\n        MinPRole,\n        LikesRole,\n        DownloadsRole,\n        RecencyRole\n    };\n\n    QHash<int, QByteArray> roleNames() const override\n    {\n        QHash<int, QByteArray> roles;\n        roles[IdRole] = \"id\";\n        roles[NameRole] = \"name\";\n        roles[FilenameRole] = \"filename\";\n        roles[DirpathRole] = \"dirpath\";\n        roles[FilesizeRole] = \"filesize\";\n        roles[HashRole] = \"hash\";\n        roles[HashAlgorithmRole] = \"hashAlgorithm\";\n        roles[CalcHashRole] = \"calcHash\";\n        roles[InstalledRole] = \"installed\";\n        roles[DefaultRole] = \"isDefault\";\n        roles[OnlineRole] = \"isOnline\";\n        roles[CompatibleApiRole] = \"isCompatibleApi\";\n        roles[DescriptionRole] = \"description\";\n        roles[RequiresVersionRole] = \"requiresVersion\";\n        roles[VersionRemovedRole] = \"versionRemoved\";\n        roles[UrlRole] = \"url\";\n        roles[BytesReceivedRole] = \"bytesReceived\";\n        roles[BytesTotalRole] = \"bytesTotal\";\n        roles[TimestampRole] = \"timestamp\";\n        roles[SpeedRole] = \"speed\";\n        roles[DownloadingRole] = \"isDownloading\";\n        roles[IncompleteRole] = \"isIncomplete\";\n        roles[DownloadErrorRole] = \"downloadError\";\n        roles[OrderRole] = \"order\";\n        roles[RamrequiredRole] = \"ramrequired\";\n        roles[ParametersRole] = \"parameters\";\n        roles[QuantRole] = \"quant\";\n        roles[TypeRole] = \"type\";\n        roles[IsCloneRole] = \"isClone\";\n        roles[IsDiscoveredRole] = \"isDiscovered\";\n        roles[IsEmbeddingModelRole] = \"isEmbeddingModel\";\n        roles[TemperatureRole] = \"temperature\";\n        roles[TopPRole] = \"topP\";\n        roles[MinPRole] = \"minP\";\n        roles[TopKRole] = \"topK\";\n        roles[MaxLengthRole] = \"maxLength\";\n        roles[PromptBatchSizeRole] = \"promptBatchSize\";\n        roles[ContextLengthRole] = \"contextLength\";\n        roles[GpuLayersRole] = \"gpuLayers\";\n        roles[RepeatPenaltyRole] = \"repeatPenalty\";\n        roles[RepeatPenaltyTokensRole] = \"repeatPenaltyTokens\";\n        roles[ChatTemplateRole] = \"chatTemplate\";\n        roles[SystemMessageRole] = \"systemMessage\";\n        roles[ChatNamePromptRole] = \"chatNamePrompt\";\n        roles[SuggestedFollowUpPromptRole] = \"suggestedFollowUpPrompt\";\n        roles[LikesRole] = \"likes\";\n        roles[DownloadsRole] = \"downloads\";\n        roles[RecencyRole] = \"recency\";\n        return roles;\n    }\n\n    int rowCount(const QModelIndex &parent = QModelIndex()) const override;\n    QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override;\n    QVariant data(const QString &id, int role) const;\n    QVariant dataByFilename(const QString &filename, int role) const;\n    void updateDataByFilename(const QString &filename, QVector<QPair<int, QVariant>> data);\n    void updateData(const QString &id, const QVector<QPair<int, QVariant>> &data);\n\n    int count() const { return m_models.size(); }\n\n    bool contains(const QString &id) const;\n    bool containsByFilename(const QString &filename) const;\n    Q_INVOKABLE ModelInfo modelInfo(const QString &id) const;\n    Q_INVOKABLE ModelInfo modelInfoByFilename(const QString &filename, bool allowClone = true) const;\n    Q_INVOKABLE bool isUniqueName(const QString &name) const;\n    Q_INVOKABLE QString clone(const ModelInfo &model);\n    Q_INVOKABLE void removeClone(const ModelInfo &model);\n    Q_INVOKABLE void removeInstalled(const ModelInfo &model);\n    ModelInfo defaultModelInfo() const;\n\n    void addModel(const QString &id);\n    void changeId(const QString &oldId, const QString &newId);\n\n    const QList<ModelInfo> selectableModelList() const;\n\n    InstalledModels *installedModels() const { return m_installedModels; }\n    InstalledModels *selectableModels() const { return m_selectableModels; }\n    GPT4AllDownloadableModels *gpt4AllDownloadableModels() const { return m_gpt4AllDownloadableModels; }\n    HuggingFaceDownloadableModels *huggingFaceDownloadableModels() const { return m_huggingFaceDownloadableModels; }\n\n    static inline QString toFileSize(quint64 sz) {\n        if (sz < 1024) {\n            return u\"%1 bytes\"_s.arg(sz);\n        } else if (sz < 1024 * 1024) {\n            return u\"%1 KB\"_s.arg(qreal(sz) / 1024, 0, 'g', 3);\n        } else if (sz < 1024 * 1024 * 1024) {\n            return u\"%1 MB\"_s.arg(qreal(sz) / (1024 * 1024), 0, 'g', 3);\n        } else {\n            return u\"%1 GB\"_s.arg(qreal(sz) / (1024 * 1024 * 1024), 0, 'g', 3);\n        }\n    }\n\n    QString incompleteDownloadPath(const QString &modelFile);\n    bool asyncModelRequestOngoing() const { return m_asyncModelRequestOngoing; }\n\n    void updateModelsFromDirectory();\n    void updateDiscoveredInstalled(const ModelInfo &info);\n\n    int discoverLimit() const;\n    void setDiscoverLimit(int limit);\n\n    int discoverSortDirection() const;\n    void setDiscoverSortDirection(int direction); // -1 or 1\n\n    DiscoverSort discoverSort() const;\n    void setDiscoverSort(DiscoverSort sort);\n\n    float discoverProgress() const;\n    bool discoverInProgress() const;\n\n    Q_INVOKABLE void discoverSearch(const QString &discover);\n\n    Q_INVOKABLE QStringList remoteModelList(const QString &apiKey, const QUrl &baseUrl);\n\nQ_SIGNALS:\n    void countChanged();\n    void installedModelsChanged();\n    void selectableModelsChanged();\n    void selectableModelListChanged();\n    void asyncModelRequestOngoingChanged();\n    void discoverLimitChanged();\n    void discoverSortDirectionChanged();\n    void discoverSortChanged();\n    void discoverProgressChanged();\n    void discoverInProgressChanged();\n    void modelInfoChanged(const ModelInfo &info);\n\nprotected:\n    bool eventFilter(QObject *obj, QEvent *ev) override;\n\nprivate Q_SLOTS:\n    void onDataChanged(const QModelIndex &topLeft, const QModelIndex &bottomRight, const QList<int> &roles);\n    void resortModel();\n    void updateModelsFromJson();\n    void updateModelsFromJsonAsync();\n    void updateModelsFromSettings();\n    void maybeUpdateDataForSettings(const ModelInfo &info, bool fromInfo);\n    void updateDataForSettings();\n    void handleModelsJsonDownloadFinished();\n    void handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code);\n    void handleDiscoveryFinished();\n    void handleDiscoveryErrorOccurred(QNetworkReply::NetworkError code);\n    void handleDiscoveryItemFinished();\n    void handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError code);\n    void handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors);\n\nprivate:\n    // Return the index of the model with the given id, or -1 if not found.\n    int indexByModelId(const QString &id) const;\n\n    void removeInternal(const ModelInfo &model);\n    void clearDiscoveredModels();\n    bool modelExists(const QString &fileName) const;\n    int indexForModel(ModelInfo *model);\n    QVariant dataInternal(const ModelInfo *info, int role) const;\n    static bool lessThan(const ModelInfo* a, const ModelInfo* b, DiscoverSort s, int d);\n    void parseModelsJsonFile(const QByteArray &jsonData, bool save);\n    void parseDiscoveryJsonFile(const QByteArray &jsonData);\n    QString uniqueModelName(const ModelInfo &model) const;\n    void updateOldRemoteModels(const QString &path);\n    void processModelDirectory(const QString &path);\n\nprivate:\n    mutable QMutex m_mutex;\n    QNetworkAccessManager m_networkManager;\n    InstalledModels *m_installedModels;\n    InstalledModels *m_selectableModels;\n    GPT4AllDownloadableModels *m_gpt4AllDownloadableModels;\n    HuggingFaceDownloadableModels *m_huggingFaceDownloadableModels;\n    QList<ModelInfo*> m_models;\n    QHash<QString, ModelInfo*> m_modelMap;\n    bool m_asyncModelRequestOngoing;\n    int m_discoverLimit;\n    int m_discoverSortDirection;\n    DiscoverSort m_discoverSort;\n    int m_discoverNumberOfResults;\n    int m_discoverResultsCompleted;\n    bool m_discoverInProgress;\n\nprotected:\n    explicit ModelList();\n    ~ModelList() override { for (auto *model: std::as_const(m_models)) { delete model; } }\n    friend class MyModelList;\n};\n\n#endif // MODELLIST_H\n"
  },
  {
    "path": "gpt4all-chat/src/mysettings.cpp",
    "content": "#include \"mysettings.h\"\n\n#include \"chatllm.h\"\n#include \"modellist.h\"\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QDebug>\n#include <QDir>\n#include <QFile>\n#include <QFileInfo>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QIODevice> // IWYU pragma: keep\n#include <QMap>\n#include <QMetaObject>\n#include <QStandardPaths>\n#include <QThread>\n#include <QUrl>\n#include <QtLogging>\n#include <QtAssert>\n\n#include <algorithm>\n#include <string>\n#include <thread>\n#include <vector>\n\n#if !(defined(Q_OS_MAC) && defined(__aarch64__))\n    #include <cstring>\n#endif\n\nusing namespace Qt::Literals::StringLiterals;\n\n\n// used only for settings serialization, do not translate\nstatic const QStringList suggestionModeNames { \"LocalDocsOnly\", \"On\", \"Off\" };\nstatic const QStringList chatThemeNames      { \"Light\", \"Dark\", \"LegacyDark\" };\nstatic const QStringList fontSizeNames       { \"Small\", \"Medium\", \"Large\" };\n\n// psuedo-enum\nnamespace ModelSettingsKey { namespace {\n    auto ChatTemplate   = \"chatTemplate\"_L1;\n    auto PromptTemplate = \"promptTemplate\"_L1; // legacy\n    auto SystemMessage  = \"systemMessage\"_L1;\n    auto SystemPrompt   = \"systemPrompt\"_L1;   // legacy\n} } // namespace ModelSettingsKey::(anonymous)\n\nnamespace defaults {\n\nstatic const int     threadCount             = std::min(4, (int32_t) std::thread::hardware_concurrency());\nstatic const bool    forceMetal              = false;\nstatic const bool    networkIsActive         = false;\nstatic const bool    networkUsageStatsActive = false;\nstatic const QString device                  = \"Auto\";\nstatic const QString languageAndLocale       = \"System Locale\";\n\n} // namespace defaults\n\nstatic const QVariantMap basicDefaults {\n    { \"chatTheme\",                QVariant::fromValue(ChatTheme::Light) },\n    { \"fontSize\",                 QVariant::fromValue(FontSize::Small) },\n    { \"lastVersionStarted\",       \"\" },\n    { \"networkPort\",              4891, },\n    { \"systemTray\",               false },\n    { \"serverChat\",               false },\n    { \"userDefaultModel\",         \"Application default\" },\n    { \"suggestionMode\",           QVariant::fromValue(SuggestionMode::LocalDocsOnly) },\n    { \"localdocs/chunkSize\",      512 },\n    { \"localdocs/retrievalSize\",  3 },\n    { \"localdocs/showReferences\", true },\n    { \"localdocs/fileExtensions\", QStringList { \"docx\", \"pdf\", \"txt\", \"md\", \"rst\" } },\n    { \"localdocs/useRemoteEmbed\", false },\n    { \"localdocs/nomicAPIKey\",    \"\" },\n    { \"localdocs/embedDevice\",    \"Auto\" },\n    { \"network/attribution\",      \"\" },\n};\n\nstatic QString defaultLocalModelsPath()\n{\n    QString localPath = QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation)\n        + \"/\";\n    QString testWritePath = localPath + u\"test_write.txt\"_s;\n    QString canonicalLocalPath = QFileInfo(localPath).canonicalFilePath() + \"/\";\n    QDir localDir(localPath);\n    if (!localDir.exists()) {\n        if (!localDir.mkpath(localPath)) {\n            qWarning() << \"ERROR: Local download directory can't be created:\" << canonicalLocalPath;\n            return canonicalLocalPath;\n        }\n    }\n\n    if (QFileInfo::exists(testWritePath))\n        return canonicalLocalPath;\n\n    QFile testWriteFile(testWritePath);\n    if (testWriteFile.open(QIODeviceBase::ReadWrite)) {\n        testWriteFile.close();\n        return canonicalLocalPath;\n    }\n\n    qWarning() << \"ERROR: Local download path appears not writeable:\" << canonicalLocalPath;\n    return canonicalLocalPath;\n}\n\nstatic QStringList getDevices(bool skipKompute = false)\n{\n    QStringList deviceList;\n#if defined(Q_OS_MAC) && defined(__aarch64__)\n    deviceList << \"Metal\";\n#else\n    std::vector<LLModel::GPUDevice> devices = LLModel::Implementation::availableGPUDevices();\n    for (LLModel::GPUDevice &d : devices) {\n        if (!skipKompute || strcmp(d.backend, \"kompute\"))\n            deviceList << QString::fromStdString(d.selectionName());\n    }\n#endif\n    deviceList << \"CPU\";\n    return deviceList;\n}\n\nstatic QString getUiLanguage(const QString directory, const QString fileName)\n{\n    QTranslator translator;\n    const QString filePath = directory + QDir::separator() + fileName;\n    if (translator.load(filePath)) {\n        const QString lang = fileName.mid(fileName.indexOf('_') + 1,\n            fileName.lastIndexOf('.') - fileName.indexOf('_') - 1);\n        return lang;\n    }\n\n    qDebug() << \"ERROR: Failed to load translation file:\" << filePath;\n    return QString();\n}\n\nstatic QStringList getUiLanguages(const QString &modelPath)\n{\n    QStringList languageList;\n    static const QStringList releasedLanguages = { \"en_US\", \"it_IT\", \"zh_CN\", \"zh_TW\", \"es_MX\", \"pt_BR\", \"ro_RO\" };\n\n    // Add the language translations from model path files first which is used by translation developers\n    // to load translations in progress without having to rebuild all of GPT4All from source\n    {\n        const QDir dir(modelPath);\n        const QStringList qmFiles = dir.entryList({\"*.qm\"}, QDir::Files);\n        for (const QString &fileName : qmFiles)\n            languageList << getUiLanguage(modelPath, fileName);\n    }\n\n    // Now add the internal language translations\n    {\n        const QDir dir(\":/i18n\");\n        const QStringList qmFiles = dir.entryList({\"*.qm\"}, QDir::Files);\n        for (const QString &fileName : qmFiles) {\n            const QString lang = getUiLanguage(\":/i18n\", fileName);\n            if (!languageList.contains(lang) && releasedLanguages.contains(lang))\n                languageList.append(lang);\n        }\n    }\n    return languageList;\n}\n\nstatic QString modelSettingName(const ModelInfo &info, auto &&name)\n{\n    return u\"model-%1/%2\"_s.arg(info.id(), name);\n}\n\nclass MyPrivateSettings: public MySettings { };\nQ_GLOBAL_STATIC(MyPrivateSettings, settingsInstance)\nMySettings *MySettings::globalInstance()\n{\n    return settingsInstance();\n}\n\nMySettings::MySettings()\n    : QObject(nullptr)\n    , m_deviceList(getDevices())\n    , m_embeddingsDeviceList(getDevices(/*skipKompute*/ true))\n    , m_uiLanguages(getUiLanguages(modelPath()))\n{\n}\n\nQVariant MySettings::checkJinjaTemplateError(const QString &tmpl)\n{\n    if (auto err = ChatLLM::checkJinjaTemplateError(tmpl.toStdString()))\n        return QString::fromStdString(*err);\n    return QVariant::fromValue(nullptr);\n}\n\n// Unset settings come from ModelInfo. Listen for changes so we can emit our own setting-specific signals.\nvoid MySettings::onModelInfoChanged(const QModelIndex &topLeft, const QModelIndex &bottomRight, const QList<int> &roles)\n{\n    auto settingChanged = [&](const auto &info, auto role, const auto &name) {\n        return (roles.isEmpty() || roles.contains(role)) && !m_settings.contains(modelSettingName(info, name));\n    };\n\n    auto &modelList = dynamic_cast<const ModelList &>(*QObject::sender());\n    for (int row = topLeft.row(); row <= bottomRight.row(); row++) {\n        using enum ModelList::Roles;\n        using namespace ModelSettingsKey;\n        auto index = topLeft.siblingAtRow(row);\n        if (auto info = modelList.modelInfo(index.data(IdRole).toString()); !info.id().isNull()) {\n            if (settingChanged(info, ChatTemplateRole, ChatTemplate))\n                emit chatTemplateChanged(info, /*fromInfo*/ true);\n            if (settingChanged(info, SystemMessageRole, SystemMessage))\n                emit systemMessageChanged(info, /*fromInfo*/ true);\n        }\n    }\n}\n\nQVariant MySettings::getBasicSetting(const QString &name) const\n{\n    return m_settings.value(name, basicDefaults.value(name));\n}\n\nvoid MySettings::setBasicSetting(const QString &name, const QVariant &value, std::optional<QString> signal)\n{\n    if (getBasicSetting(name) == value)\n        return;\n\n    m_settings.setValue(name, value);\n    QMetaObject::invokeMethod(this, u\"%1Changed\"_s.arg(signal.value_or(name)).toLatin1().constData());\n}\n\nint MySettings::getEnumSetting(const QString &setting, const QStringList &valueNames) const\n{\n    int idx = valueNames.indexOf(getBasicSetting(setting).toString());\n    return idx != -1 ? idx : *reinterpret_cast<const int *>(basicDefaults.value(setting).constData());\n}\n\nvoid MySettings::restoreModelDefaults(const ModelInfo &info)\n{\n    setModelTemperature(info, info.m_temperature);\n    setModelTopP(info, info.m_topP);\n    setModelMinP(info, info.m_minP);\n    setModelTopK(info, info.m_topK);\n    setModelMaxLength(info, info.m_maxLength);\n    setModelPromptBatchSize(info, info.m_promptBatchSize);\n    setModelContextLength(info, info.m_contextLength);\n    setModelGpuLayers(info, info.m_gpuLayers);\n    setModelRepeatPenalty(info, info.m_repeatPenalty);\n    setModelRepeatPenaltyTokens(info, info.m_repeatPenaltyTokens);\n    resetModelChatTemplate (info);\n    resetModelSystemMessage(info);\n    setModelChatNamePrompt(info, info.m_chatNamePrompt);\n    setModelSuggestedFollowUpPrompt(info, info.m_suggestedFollowUpPrompt);\n}\n\nvoid MySettings::restoreApplicationDefaults()\n{\n    setChatTheme(basicDefaults.value(\"chatTheme\").value<ChatTheme>());\n    setFontSize(basicDefaults.value(\"fontSize\").value<FontSize>());\n    setDevice(defaults::device);\n    setThreadCount(defaults::threadCount);\n    setSystemTray(basicDefaults.value(\"systemTray\").toBool());\n    setServerChat(basicDefaults.value(\"serverChat\").toBool());\n    setNetworkPort(basicDefaults.value(\"networkPort\").toInt());\n    setModelPath(defaultLocalModelsPath());\n    setUserDefaultModel(basicDefaults.value(\"userDefaultModel\").toString());\n    setForceMetal(defaults::forceMetal);\n    setSuggestionMode(basicDefaults.value(\"suggestionMode\").value<SuggestionMode>());\n    setLanguageAndLocale(defaults::languageAndLocale);\n}\n\nvoid MySettings::restoreLocalDocsDefaults()\n{\n    setLocalDocsChunkSize(basicDefaults.value(\"localdocs/chunkSize\").toInt());\n    setLocalDocsRetrievalSize(basicDefaults.value(\"localdocs/retrievalSize\").toInt());\n    setLocalDocsShowReferences(basicDefaults.value(\"localdocs/showReferences\").toBool());\n    setLocalDocsFileExtensions(basicDefaults.value(\"localdocs/fileExtensions\").toStringList());\n    setLocalDocsUseRemoteEmbed(basicDefaults.value(\"localdocs/useRemoteEmbed\").toBool());\n    setLocalDocsNomicAPIKey(basicDefaults.value(\"localdocs/nomicAPIKey\").toString());\n    setLocalDocsEmbedDevice(basicDefaults.value(\"localdocs/embedDevice\").toString());\n}\n\nvoid MySettings::eraseModel(const ModelInfo &info)\n{\n    m_settings.remove(u\"model-%1\"_s.arg(info.id()));\n}\n\nQString MySettings::modelName(const ModelInfo &info) const\n{\n    return m_settings.value(u\"model-%1/name\"_s.arg(info.id()),\n        !info.m_name.isEmpty() ? info.m_name : info.m_filename).toString();\n}\n\nvoid MySettings::setModelName(const ModelInfo &info, const QString &value, bool force)\n{\n    if ((modelName(info) == value || info.id().isEmpty()) && !force)\n        return;\n\n    if ((info.m_name == value || info.m_filename == value) && !info.shouldSaveMetadata())\n        m_settings.remove(u\"model-%1/name\"_s.arg(info.id()));\n    else\n        m_settings.setValue(u\"model-%1/name\"_s.arg(info.id()), value);\n    if (!force)\n        emit nameChanged(info);\n}\n\nQVariant MySettings::getModelSetting(QLatin1StringView name, const ModelInfo &info) const\n{\n    QLatin1StringView nameL1(name);\n    return m_settings.value(modelSettingName(info, nameL1), info.getField(nameL1));\n}\n\nQVariant MySettings::getModelSetting(const char *name, const ModelInfo &info) const\n{\n    return getModelSetting(QLatin1StringView(name), info);\n}\n\nvoid MySettings::setModelSetting(QLatin1StringView name, const ModelInfo &info, const QVariant &value, bool force,\n                                 bool signal)\n{\n    if (!force && (info.id().isEmpty() || getModelSetting(name, info) == value))\n        return;\n\n    QLatin1StringView nameL1(name);\n    QString settingName = modelSettingName(info, nameL1);\n    if (info.getField(nameL1) == value && !info.shouldSaveMetadata())\n        m_settings.remove(settingName);\n    else\n        m_settings.setValue(settingName, value);\n    if (signal && !force)\n        QMetaObject::invokeMethod(this, u\"%1Changed\"_s.arg(nameL1).toLatin1().constData(), Q_ARG(ModelInfo, info));\n}\n\nvoid MySettings::setModelSetting(const char *name, const ModelInfo &info, const QVariant &value, bool force,\n                                 bool signal)\n{\n    setModelSetting(QLatin1StringView(name), info, value, force, signal);\n}\n\nQString   MySettings::modelFilename               (const ModelInfo &info) const { return getModelSetting(\"filename\",                info).toString(); }\nQString   MySettings::modelDescription            (const ModelInfo &info) const { return getModelSetting(\"description\",             info).toString(); }\nQString   MySettings::modelUrl                    (const ModelInfo &info) const { return getModelSetting(\"url\",                     info).toString(); }\nQString   MySettings::modelQuant                  (const ModelInfo &info) const { return getModelSetting(\"quant\",                   info).toString(); }\nQString   MySettings::modelType                   (const ModelInfo &info) const { return getModelSetting(\"type\",                    info).toString(); }\nbool      MySettings::modelIsClone                (const ModelInfo &info) const { return getModelSetting(\"isClone\",                 info).toBool(); }\nbool      MySettings::modelIsDiscovered           (const ModelInfo &info) const { return getModelSetting(\"isDiscovered\",            info).toBool(); }\nint       MySettings::modelLikes                  (const ModelInfo &info) const { return getModelSetting(\"likes\",                   info).toInt(); }\nint       MySettings::modelDownloads              (const ModelInfo &info) const { return getModelSetting(\"downloads\",               info).toInt(); }\nQDateTime MySettings::modelRecency                (const ModelInfo &info) const { return getModelSetting(\"recency\",                 info).toDateTime(); }\ndouble    MySettings::modelTemperature            (const ModelInfo &info) const { return getModelSetting(\"temperature\",             info).toDouble(); }\ndouble    MySettings::modelTopP                   (const ModelInfo &info) const { return getModelSetting(\"topP\",                    info).toDouble(); }\ndouble    MySettings::modelMinP                   (const ModelInfo &info) const { return getModelSetting(\"minP\",                    info).toDouble(); }\nint       MySettings::modelTopK                   (const ModelInfo &info) const { return getModelSetting(\"topK\",                    info).toInt(); }\nint       MySettings::modelMaxLength              (const ModelInfo &info) const { return getModelSetting(\"maxLength\",               info).toInt(); }\nint       MySettings::modelPromptBatchSize        (const ModelInfo &info) const { return getModelSetting(\"promptBatchSize\",         info).toInt(); }\nint       MySettings::modelContextLength          (const ModelInfo &info) const { return getModelSetting(\"contextLength\",           info).toInt(); }\nint       MySettings::modelGpuLayers              (const ModelInfo &info) const { return getModelSetting(\"gpuLayers\",               info).toInt(); }\ndouble    MySettings::modelRepeatPenalty          (const ModelInfo &info) const { return getModelSetting(\"repeatPenalty\",           info).toDouble(); }\nint       MySettings::modelRepeatPenaltyTokens    (const ModelInfo &info) const { return getModelSetting(\"repeatPenaltyTokens\",     info).toInt(); }\nQString   MySettings::modelChatNamePrompt         (const ModelInfo &info) const { return getModelSetting(\"chatNamePrompt\",          info).toString(); }\nQString   MySettings::modelSuggestedFollowUpPrompt(const ModelInfo &info) const { return getModelSetting(\"suggestedFollowUpPrompt\", info).toString(); }\n\nauto MySettings::getUpgradeableModelSetting(\n    const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n) const -> UpgradeableSetting\n{\n    if (info.id().isEmpty()) {\n        qWarning(\"%s: got null model\", Q_FUNC_INFO);\n        return {};\n    }\n\n    auto value = m_settings.value(modelSettingName(info, legacyKey));\n    if (value.isValid())\n        return { UpgradeableSetting::legacy_tag, value.toString() };\n\n    value = getModelSetting(newKey, info);\n    if (!value.isNull())\n        return value.toString();\n    return {}; // neither a default nor an override\n}\n\nbool MySettings::isUpgradeableModelSettingSet(\n    const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n) const\n{\n    if (info.id().isEmpty()) {\n        qWarning(\"%s: got null model\", Q_FUNC_INFO);\n        return false;\n    }\n\n    if (m_settings.contains(modelSettingName(info, legacyKey)))\n        return true;\n\n    // NOTE: unlike getUpgradeableSetting(), this ignores the default\n    return m_settings.contains(modelSettingName(info, newKey));\n}\n\nauto MySettings::modelChatTemplate(const ModelInfo &info) const -> UpgradeableSetting\n{\n    using namespace ModelSettingsKey;\n    return getUpgradeableModelSetting(info, PromptTemplate, ChatTemplate);\n}\n\nbool MySettings::isModelChatTemplateSet(const ModelInfo &info) const\n{\n    using namespace ModelSettingsKey;\n    return isUpgradeableModelSettingSet(info, PromptTemplate, ChatTemplate);\n}\n\nauto MySettings::modelSystemMessage(const ModelInfo &info) const -> UpgradeableSetting\n{\n    using namespace ModelSettingsKey;\n    return getUpgradeableModelSetting(info, SystemPrompt, SystemMessage);\n}\n\nbool MySettings::isModelSystemMessageSet(const ModelInfo &info) const\n{\n    using namespace ModelSettingsKey;\n    return isUpgradeableModelSettingSet(info, SystemPrompt, SystemMessage);\n}\n\nvoid MySettings::setModelFilename(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"filename\", info, value, force, true);\n}\n\nvoid MySettings::setModelDescription(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"description\", info, value, force, true);\n}\n\nvoid MySettings::setModelUrl(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"url\", info, value, force);\n}\n\nvoid MySettings::setModelQuant(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"quant\", info, value, force);\n}\n\nvoid MySettings::setModelType(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"type\", info, value, force);\n}\n\nvoid MySettings::setModelIsClone(const ModelInfo &info, bool value, bool force)\n{\n    setModelSetting(\"isClone\", info, value, force);\n}\n\nvoid MySettings::setModelIsDiscovered(const ModelInfo &info, bool value, bool force)\n{\n    setModelSetting(\"isDiscovered\", info, value, force);\n}\n\nvoid MySettings::setModelLikes(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"likes\", info, value, force);\n}\n\nvoid MySettings::setModelDownloads(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"downloads\", info, value, force);\n}\n\nvoid MySettings::setModelRecency(const ModelInfo &info, const QDateTime &value, bool force)\n{\n    setModelSetting(\"recency\", info, value, force);\n}\n\nvoid MySettings::setModelTemperature(const ModelInfo &info, double value, bool force)\n{\n    setModelSetting(\"temperature\", info, value, force, true);\n}\n\nvoid MySettings::setModelTopP(const ModelInfo &info, double value, bool force)\n{\n    setModelSetting(\"topP\", info, value, force, true);\n}\n\nvoid MySettings::setModelMinP(const ModelInfo &info, double value, bool force)\n{\n    setModelSetting(\"minP\", info, value, force, true);\n}\n\nvoid MySettings::setModelTopK(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"topK\", info, value, force, true);\n}\n\nvoid MySettings::setModelMaxLength(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"maxLength\", info, value, force, true);\n}\n\nvoid MySettings::setModelPromptBatchSize(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"promptBatchSize\", info, value, force, true);\n}\n\nvoid MySettings::setModelContextLength(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"contextLength\", info, value, force, true);\n}\n\nvoid MySettings::setModelGpuLayers(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"gpuLayers\", info, value, force, true);\n}\n\nvoid MySettings::setModelRepeatPenalty(const ModelInfo &info, double value, bool force)\n{\n    setModelSetting(\"repeatPenalty\", info, value, force, true);\n}\n\nvoid MySettings::setModelRepeatPenaltyTokens(const ModelInfo &info, int value, bool force)\n{\n    setModelSetting(\"repeatPenaltyTokens\", info, value, force, true);\n}\n\nbool MySettings::setUpgradeableModelSetting(\n    const ModelInfo &info, const QString &value, QLatin1StringView legacyKey, QLatin1StringView newKey\n) {\n    if (info.id().isEmpty()) {\n        qWarning(\"%s: got null model\", Q_FUNC_INFO);\n        return false;\n    }\n\n    auto legacyModelKey = modelSettingName(info, legacyKey);\n    auto newModelKey    = modelSettingName(info, newKey   );\n    bool changed = false;\n    if (m_settings.contains(legacyModelKey)) {\n        m_settings.remove(legacyModelKey);\n        changed = true;\n    }\n    auto oldValue = m_settings.value(newModelKey);\n    if (!oldValue.isValid() || oldValue.toString() != value) {\n        m_settings.setValue(newModelKey, value);\n        changed = true;\n    }\n    return changed;\n}\n\nbool MySettings::resetUpgradeableModelSetting(\n    const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n) {\n    if (info.id().isEmpty()) {\n        qWarning(\"%s: got null model\", Q_FUNC_INFO);\n        return false;\n    }\n\n    auto legacyModelKey = modelSettingName(info, legacyKey);\n    auto newModelKey    = modelSettingName(info, newKey   );\n    bool changed        = false;\n    if (m_settings.contains(legacyModelKey)) {\n        m_settings.remove(legacyModelKey);\n        changed = true;\n    }\n    if (m_settings.contains(newModelKey)) {\n        m_settings.remove(newModelKey);\n        changed = true;\n    }\n    return changed;\n}\n\nvoid MySettings::setModelChatTemplate(const ModelInfo &info, const QString &value)\n{\n    using namespace ModelSettingsKey;\n    if (setUpgradeableModelSetting(info, value, PromptTemplate, ChatTemplate))\n        emit chatTemplateChanged(info);\n}\n\nvoid MySettings::resetModelChatTemplate(const ModelInfo &info)\n{\n    using namespace ModelSettingsKey;\n    if (resetUpgradeableModelSetting(info, PromptTemplate, ChatTemplate))\n        emit chatTemplateChanged(info);\n}\n\nvoid MySettings::setModelSystemMessage(const ModelInfo &info, const QString &value)\n{\n    using namespace ModelSettingsKey;\n    if (setUpgradeableModelSetting(info, value, SystemPrompt, SystemMessage))\n        emit systemMessageChanged(info);\n}\n\nvoid MySettings::resetModelSystemMessage(const ModelInfo &info)\n{\n    using namespace ModelSettingsKey;\n    if (resetUpgradeableModelSetting(info, SystemPrompt, SystemMessage))\n        emit systemMessageChanged(info);\n}\n\nvoid MySettings::setModelChatNamePrompt(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"chatNamePrompt\", info, value, force, true);\n}\n\nvoid MySettings::setModelSuggestedFollowUpPrompt(const ModelInfo &info, const QString &value, bool force)\n{\n    setModelSetting(\"suggestedFollowUpPrompt\", info, value, force, true);\n}\n\nint MySettings::threadCount() const\n{\n    int c = m_settings.value(\"threadCount\", defaults::threadCount).toInt();\n    // The old thread setting likely left many people with 0 in settings config file, which means\n    // we should reset it to the default going forward\n    if (c <= 0)\n        c = defaults::threadCount;\n    c = std::max(c, 1);\n    c = std::min(c, QThread::idealThreadCount());\n    return c;\n}\n\nvoid MySettings::setThreadCount(int value)\n{\n    if (threadCount() == value)\n        return;\n\n    value = std::max(value, 1);\n    value = std::min(value, QThread::idealThreadCount());\n    m_settings.setValue(\"threadCount\", value);\n    emit threadCountChanged();\n}\n\nbool        MySettings::systemTray() const              { return getBasicSetting(\"systemTray\"              ).toBool(); }\nbool        MySettings::serverChat() const              { return getBasicSetting(\"serverChat\"              ).toBool(); }\nint         MySettings::networkPort() const             { return getBasicSetting(\"networkPort\"             ).toInt(); }\nQString     MySettings::userDefaultModel() const        { return getBasicSetting(\"userDefaultModel\"        ).toString(); }\nQString     MySettings::lastVersionStarted() const      { return getBasicSetting(\"lastVersionStarted\"      ).toString(); }\nint         MySettings::localDocsChunkSize() const      { return getBasicSetting(\"localdocs/chunkSize\"     ).toInt(); }\nint         MySettings::localDocsRetrievalSize() const  { return getBasicSetting(\"localdocs/retrievalSize\" ).toInt(); }\nbool        MySettings::localDocsShowReferences() const { return getBasicSetting(\"localdocs/showReferences\").toBool(); }\nQStringList MySettings::localDocsFileExtensions() const { return getBasicSetting(\"localdocs/fileExtensions\").toStringList(); }\nbool        MySettings::localDocsUseRemoteEmbed() const { return getBasicSetting(\"localdocs/useRemoteEmbed\").toBool(); }\nQString     MySettings::localDocsNomicAPIKey() const    { return getBasicSetting(\"localdocs/nomicAPIKey\"   ).toString(); }\nQString     MySettings::localDocsEmbedDevice() const    { return getBasicSetting(\"localdocs/embedDevice\"   ).toString(); }\nQString     MySettings::networkAttribution() const      { return getBasicSetting(\"network/attribution\"     ).toString(); }\n\nChatTheme      MySettings::chatTheme() const      { return ChatTheme     (getEnumSetting(\"chatTheme\", chatThemeNames)); }\nFontSize       MySettings::fontSize() const       { return FontSize      (getEnumSetting(\"fontSize\",  fontSizeNames)); }\nSuggestionMode MySettings::suggestionMode() const { return SuggestionMode(getEnumSetting(\"suggestionMode\", suggestionModeNames)); }\n\nvoid MySettings::setSystemTray(bool value)                            { setBasicSetting(\"systemTray\",               value); }\nvoid MySettings::setServerChat(bool value)                            { setBasicSetting(\"serverChat\",               value); }\nvoid MySettings::setNetworkPort(int value)                            { setBasicSetting(\"networkPort\",              value); }\nvoid MySettings::setUserDefaultModel(const QString &value)            { setBasicSetting(\"userDefaultModel\",         value); }\nvoid MySettings::setLastVersionStarted(const QString &value)          { setBasicSetting(\"lastVersionStarted\",       value); }\nvoid MySettings::setLocalDocsChunkSize(int value)                     { setBasicSetting(\"localdocs/chunkSize\",      value, \"localDocsChunkSize\"); }\nvoid MySettings::setLocalDocsRetrievalSize(int value)                 { setBasicSetting(\"localdocs/retrievalSize\",  value, \"localDocsRetrievalSize\"); }\nvoid MySettings::setLocalDocsShowReferences(bool value)               { setBasicSetting(\"localdocs/showReferences\", value, \"localDocsShowReferences\"); }\nvoid MySettings::setLocalDocsFileExtensions(const QStringList &value) { setBasicSetting(\"localdocs/fileExtensions\", value, \"localDocsFileExtensions\"); }\nvoid MySettings::setLocalDocsUseRemoteEmbed(bool value)               { setBasicSetting(\"localdocs/useRemoteEmbed\", value, \"localDocsUseRemoteEmbed\"); }\nvoid MySettings::setLocalDocsNomicAPIKey(const QString &value)        { setBasicSetting(\"localdocs/nomicAPIKey\",    value, \"localDocsNomicAPIKey\"); }\nvoid MySettings::setLocalDocsEmbedDevice(const QString &value)        { setBasicSetting(\"localdocs/embedDevice\",    value, \"localDocsEmbedDevice\"); }\nvoid MySettings::setNetworkAttribution(const QString &value)          { setBasicSetting(\"network/attribution\",      value, \"networkAttribution\"); }\n\nvoid MySettings::setChatTheme(ChatTheme value)           { setBasicSetting(\"chatTheme\",      chatThemeNames     .value(int(value))); }\nvoid MySettings::setFontSize(FontSize value)             { setBasicSetting(\"fontSize\",       fontSizeNames      .value(int(value))); }\nvoid MySettings::setSuggestionMode(SuggestionMode value) { setBasicSetting(\"suggestionMode\", suggestionModeNames.value(int(value))); }\n\nQString MySettings::modelPath()\n{\n    // We have to migrate the old setting because I changed the setting key recklessly in v2.4.11\n    // which broke a lot of existing installs\n    const bool containsOldSetting = m_settings.contains(\"modelPaths\");\n    if (containsOldSetting) {\n        const bool containsNewSetting = m_settings.contains(\"modelPath\");\n        if (!containsNewSetting)\n            m_settings.setValue(\"modelPath\", m_settings.value(\"modelPaths\"));\n        m_settings.remove(\"modelPaths\");\n    }\n    return m_settings.value(\"modelPath\", defaultLocalModelsPath()).toString();\n}\n\nvoid MySettings::setModelPath(const QString &value)\n{\n    QString filePath = (value.startsWith(\"file://\") ?\n                        QUrl(value).toLocalFile() : value);\n    QString canonical = QFileInfo(filePath).canonicalFilePath() + \"/\";\n    if (modelPath() == canonical)\n        return;\n    m_settings.setValue(\"modelPath\", canonical);\n    emit modelPathChanged();\n}\n\nQString MySettings::device()\n{\n    auto value = m_settings.value(\"device\");\n    if (!value.isValid())\n        return defaults::device;\n\n    auto device = value.toString();\n    if (!device.isEmpty()) {\n        auto deviceStr = device.toStdString();\n        auto newNameStr = LLModel::GPUDevice::updateSelectionName(deviceStr);\n        if (newNameStr != deviceStr) {\n            auto newName = QString::fromStdString(newNameStr);\n            qWarning() << \"updating device name:\" << device << \"->\" << newName;\n            device = newName;\n            m_settings.setValue(\"device\", device);\n        }\n    }\n    return device;\n}\n\nvoid MySettings::setDevice(const QString &value)\n{\n    if (device() != value) {\n        m_settings.setValue(\"device\", value);\n        emit deviceChanged();\n    }\n}\n\nbool MySettings::forceMetal() const\n{\n    return m_forceMetal;\n}\n\nvoid MySettings::setForceMetal(bool value)\n{\n    if (m_forceMetal != value) {\n        m_forceMetal = value;\n        emit forceMetalChanged(value);\n    }\n}\n\nbool MySettings::networkIsActive() const\n{\n    return m_settings.value(\"network/isActive\", defaults::networkIsActive).toBool();\n}\n\nbool MySettings::isNetworkIsActiveSet() const\n{\n    return m_settings.value(\"network/isActive\").isValid();\n}\n\nvoid MySettings::setNetworkIsActive(bool value)\n{\n    auto cur = m_settings.value(\"network/isActive\");\n    if (!cur.isValid() || cur.toBool() != value) {\n        m_settings.setValue(\"network/isActive\", value);\n        emit networkIsActiveChanged();\n    }\n}\n\nbool MySettings::networkUsageStatsActive() const\n{\n    return m_settings.value(\"network/usageStatsActive\", defaults::networkUsageStatsActive).toBool();\n}\n\nbool MySettings::isNetworkUsageStatsActiveSet() const\n{\n    return m_settings.value(\"network/usageStatsActive\").isValid();\n}\n\nvoid MySettings::setNetworkUsageStatsActive(bool value)\n{\n    auto cur = m_settings.value(\"network/usageStatsActive\");\n    if (!cur.isValid() || cur.toBool() != value) {\n        m_settings.setValue(\"network/usageStatsActive\", value);\n        emit networkUsageStatsActiveChanged();\n    }\n}\n\nQString MySettings::languageAndLocale() const\n{\n    auto value = m_settings.value(\"languageAndLocale\");\n    if (!value.isValid())\n        return defaults::languageAndLocale;\n    return value.toString();\n}\n\nQString MySettings::filePathForLocale(const QLocale &locale)\n{\n    // Check and see if we have a translation for the chosen locale and set it if possible otherwise\n    // we return the filepath for the 'en_US' translation\n    QStringList uiLanguages = locale.uiLanguages();\n    for (int i = 0; i < uiLanguages.size(); ++i)\n        uiLanguages[i].replace('-', '_');\n\n    // Scan this directory for files named like gpt4all_%1.qm that match and if so return them first\n    // this is the model download directory and it can be used by translation developers who are\n    // trying to test their translations by just compiling the translation with the lrelease tool\n    // rather than having to recompile all of GPT4All\n    QString directory = modelPath();\n    for (const QString &bcp47Name : uiLanguages) {\n        QString filePath = u\"%1/gpt4all_%2.qm\"_s.arg(directory, bcp47Name);\n        QFileInfo filePathInfo(filePath);\n        if (filePathInfo.exists()) return filePath;\n    }\n\n    // Now scan the internal built-in translations\n    for (QString bcp47Name : uiLanguages) {\n        QString filePath = u\":/i18n/gpt4all_%1.qm\"_s.arg(bcp47Name);\n        QFileInfo filePathInfo(filePath);\n        if (filePathInfo.exists()) return filePath;\n    }\n    return u\":/i18n/gpt4all_en_US.qm\"_s;\n}\n\nvoid MySettings::setLanguageAndLocale(const QString &bcp47Name)\n{\n    if (!bcp47Name.isEmpty() && languageAndLocale() != bcp47Name)\n        m_settings.setValue(\"languageAndLocale\", bcp47Name);\n\n    // When the app is started this method is called with no bcp47Name given which sets the translation\n    // to either the default which is the system locale or the one explicitly set by the user previously.\n    QLocale locale;\n    const QString l = languageAndLocale();\n    if (l == \"System Locale\")\n        locale = QLocale::system();\n    else\n        locale = QLocale(l);\n\n    // If we previously installed a translator, then remove it\n    if (m_translator) {\n        if (!qGuiApp->removeTranslator(m_translator.get())) {\n            qDebug() << \"ERROR: Failed to remove the previous translator\";\n        } else {\n            m_translator.reset();\n        }\n    }\n\n    // We expect that the translator was removed and is now a nullptr\n    Q_ASSERT(!m_translator);\n\n    const QString filePath = filePathForLocale(locale);\n    if (!m_translator) {\n        // Create a new translator object on the heap\n        m_translator = std::make_unique<QTranslator>(this);\n        bool success = m_translator->load(filePath);\n        Q_ASSERT(success);\n        if (!success) {\n            qDebug() << \"ERROR: Failed to load translation file:\" << filePath;\n            m_translator.reset();\n        }\n\n        // If we've successfully loaded it, then try and install it\n        if (!qGuiApp->installTranslator(m_translator.get())) {\n            qDebug() << \"ERROR: Failed to install the translator:\" << filePath;\n            m_translator.reset();\n        }\n    }\n\n    // Finally, set the locale whether we have a translation or not\n    QLocale::setDefault(locale);\n    emit languageAndLocaleChanged();\n}\n"
  },
  {
    "path": "gpt4all-chat/src/mysettings.h",
    "content": "#ifndef MYSETTINGS_H\n#define MYSETTINGS_H\n\n#include \"modellist.h\" // IWYU pragma: keep\n\n#include <QDateTime>\n#include <QLatin1StringView> // IWYU pragma: keep\n#include <QList>\n#include <QModelIndex>\n#include <QObject>\n#include <QSettings>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QTranslator>\n#include <QVariant>\n\n#include <cstdint>\n#include <memory>\n#include <optional>\n\n// IWYU pragma: no_forward_declare QModelIndex\nclass QLocale;\n\n\nnamespace MySettingsEnums {\n    Q_NAMESPACE\n\n    /* NOTE: values of these enums are used as indices for the corresponding combo boxes in\n     *       ApplicationSettings.qml, as well as the corresponding name lists in mysettings.cpp */\n\n    enum class SuggestionMode {\n        LocalDocsOnly = 0,\n        On            = 1,\n        Off           = 2,\n    };\n    Q_ENUM_NS(SuggestionMode)\n\n    enum class ChatTheme {\n        Light      = 0,\n        Dark       = 1,\n        LegacyDark = 2,\n    };\n    Q_ENUM_NS(ChatTheme)\n\n    enum class FontSize {\n        Small  = 0,\n        Medium = 1,\n        Large  = 2,\n    };\n    Q_ENUM_NS(FontSize)\n}\nusing namespace MySettingsEnums;\n\nclass MySettings : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(int threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)\n    Q_PROPERTY(bool systemTray READ systemTray WRITE setSystemTray NOTIFY systemTrayChanged)\n    Q_PROPERTY(bool serverChat READ serverChat WRITE setServerChat NOTIFY serverChatChanged)\n    Q_PROPERTY(QString modelPath READ modelPath WRITE setModelPath NOTIFY modelPathChanged)\n    Q_PROPERTY(QString userDefaultModel READ userDefaultModel WRITE setUserDefaultModel NOTIFY userDefaultModelChanged)\n    Q_PROPERTY(ChatTheme chatTheme READ chatTheme WRITE setChatTheme NOTIFY chatThemeChanged)\n    Q_PROPERTY(FontSize fontSize READ fontSize WRITE setFontSize NOTIFY fontSizeChanged)\n    Q_PROPERTY(QString languageAndLocale READ languageAndLocale WRITE setLanguageAndLocale NOTIFY languageAndLocaleChanged)\n    Q_PROPERTY(bool forceMetal READ forceMetal WRITE setForceMetal NOTIFY forceMetalChanged)\n    Q_PROPERTY(QString lastVersionStarted READ lastVersionStarted WRITE setLastVersionStarted NOTIFY lastVersionStartedChanged)\n    Q_PROPERTY(int localDocsChunkSize READ localDocsChunkSize WRITE setLocalDocsChunkSize NOTIFY localDocsChunkSizeChanged)\n    Q_PROPERTY(int localDocsRetrievalSize READ localDocsRetrievalSize WRITE setLocalDocsRetrievalSize NOTIFY localDocsRetrievalSizeChanged)\n    Q_PROPERTY(bool localDocsShowReferences READ localDocsShowReferences WRITE setLocalDocsShowReferences NOTIFY localDocsShowReferencesChanged)\n    Q_PROPERTY(QStringList localDocsFileExtensions READ localDocsFileExtensions WRITE setLocalDocsFileExtensions NOTIFY localDocsFileExtensionsChanged)\n    Q_PROPERTY(bool localDocsUseRemoteEmbed READ localDocsUseRemoteEmbed WRITE setLocalDocsUseRemoteEmbed NOTIFY localDocsUseRemoteEmbedChanged)\n    Q_PROPERTY(QString localDocsNomicAPIKey READ localDocsNomicAPIKey WRITE setLocalDocsNomicAPIKey NOTIFY localDocsNomicAPIKeyChanged)\n    Q_PROPERTY(QString localDocsEmbedDevice READ localDocsEmbedDevice WRITE setLocalDocsEmbedDevice NOTIFY localDocsEmbedDeviceChanged)\n    Q_PROPERTY(QString networkAttribution READ networkAttribution WRITE setNetworkAttribution NOTIFY networkAttributionChanged)\n    Q_PROPERTY(bool networkIsActive READ networkIsActive WRITE setNetworkIsActive NOTIFY networkIsActiveChanged)\n    Q_PROPERTY(bool networkUsageStatsActive READ networkUsageStatsActive WRITE setNetworkUsageStatsActive NOTIFY networkUsageStatsActiveChanged)\n    Q_PROPERTY(QString device READ device WRITE setDevice NOTIFY deviceChanged)\n    Q_PROPERTY(QStringList deviceList MEMBER m_deviceList CONSTANT)\n    Q_PROPERTY(QStringList embeddingsDeviceList MEMBER m_embeddingsDeviceList CONSTANT)\n    Q_PROPERTY(int networkPort READ networkPort WRITE setNetworkPort NOTIFY networkPortChanged)\n    Q_PROPERTY(SuggestionMode suggestionMode READ suggestionMode WRITE setSuggestionMode NOTIFY suggestionModeChanged)\n    Q_PROPERTY(QStringList uiLanguages MEMBER m_uiLanguages CONSTANT)\n\nprivate:\n    explicit MySettings();\n    ~MySettings() override = default;\n\npublic Q_SLOTS:\n    void onModelInfoChanged(const QModelIndex &topLeft, const QModelIndex &bottomRight, const QList<int> &roles = {});\n\npublic:\n    static MySettings *globalInstance();\n\n    Q_INVOKABLE static QVariant checkJinjaTemplateError(const QString &tmpl);\n\n    // Restore methods\n    Q_INVOKABLE void restoreModelDefaults(const ModelInfo &info);\n    Q_INVOKABLE void restoreApplicationDefaults();\n    Q_INVOKABLE void restoreLocalDocsDefaults();\n\n    // Model/Character settings\n    void eraseModel(const ModelInfo &info);\n    QString modelName(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelName(const ModelInfo &info, const QString &name, bool force = false);\n    QString modelFilename(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelFilename(const ModelInfo &info, const QString &filename, bool force = false);\n\n    QString modelDescription(const ModelInfo &info) const;\n    void setModelDescription(const ModelInfo &info, const QString &value, bool force = false);\n    QString modelUrl(const ModelInfo &info) const;\n    void setModelUrl(const ModelInfo &info, const QString &value, bool force = false);\n    QString modelQuant(const ModelInfo &info) const;\n    void setModelQuant(const ModelInfo &info, const QString &value, bool force = false);\n    QString modelType(const ModelInfo &info) const;\n    void setModelType(const ModelInfo &info, const QString &value, bool force = false);\n    bool modelIsClone(const ModelInfo &info) const;\n    void setModelIsClone(const ModelInfo &info, bool value, bool force = false);\n    bool modelIsDiscovered(const ModelInfo &info) const;\n    void setModelIsDiscovered(const ModelInfo &info, bool value, bool force = false);\n    int modelLikes(const ModelInfo &info) const;\n    void setModelLikes(const ModelInfo &info, int value, bool force = false);\n    int modelDownloads(const ModelInfo &info) const;\n    void setModelDownloads(const ModelInfo &info, int value, bool force = false);\n    QDateTime modelRecency(const ModelInfo &info) const;\n    void setModelRecency(const ModelInfo &info, const QDateTime &value, bool force = false);\n\n    double modelTemperature(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelTemperature(const ModelInfo &info, double value, bool force = false);\n    double modelTopP(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelTopP(const ModelInfo &info, double value, bool force = false);\n    double modelMinP(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelMinP(const ModelInfo &info, double value, bool force = false);\n    int modelTopK(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelTopK(const ModelInfo &info, int value, bool force = false);\n    int modelMaxLength(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelMaxLength(const ModelInfo &info, int value, bool force = false);\n    int modelPromptBatchSize(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelPromptBatchSize(const ModelInfo &info, int value, bool force = false);\n    double modelRepeatPenalty(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelRepeatPenalty(const ModelInfo &info, double value, bool force = false);\n    int modelRepeatPenaltyTokens(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelRepeatPenaltyTokens(const ModelInfo &info, int value, bool force = false);\n    auto modelChatTemplate(const ModelInfo &info) const -> UpgradeableSetting;\n    Q_INVOKABLE bool isModelChatTemplateSet(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelChatTemplate(const ModelInfo &info, const QString &value);\n    Q_INVOKABLE void resetModelChatTemplate(const ModelInfo &info);\n    auto modelSystemMessage(const ModelInfo &info) const -> UpgradeableSetting;\n    Q_INVOKABLE bool isModelSystemMessageSet(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelSystemMessage(const ModelInfo &info, const QString &value);\n    Q_INVOKABLE void resetModelSystemMessage(const ModelInfo &info);\n    int modelContextLength(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelContextLength(const ModelInfo &info, int value, bool force = false);\n    int modelGpuLayers(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelGpuLayers(const ModelInfo &info, int value, bool force = false);\n    QString modelChatNamePrompt(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelChatNamePrompt(const ModelInfo &info, const QString &value, bool force = false);\n    QString modelSuggestedFollowUpPrompt(const ModelInfo &info) const;\n    Q_INVOKABLE void setModelSuggestedFollowUpPrompt(const ModelInfo &info, const QString &value, bool force = false);\n\n    // Application settings\n    int threadCount() const;\n    void setThreadCount(int value);\n    bool systemTray() const;\n    void setSystemTray(bool value);\n    bool serverChat() const;\n    void setServerChat(bool value);\n    QString modelPath();\n    void setModelPath(const QString &value);\n    QString userDefaultModel() const;\n    void setUserDefaultModel(const QString &value);\n    ChatTheme chatTheme() const;\n    void setChatTheme(ChatTheme value);\n    FontSize fontSize() const;\n    void setFontSize(FontSize value);\n    bool forceMetal() const;\n    void setForceMetal(bool value);\n    QString device();\n    void setDevice(const QString &value);\n    int32_t contextLength() const;\n    void setContextLength(int32_t value);\n    int32_t gpuLayers() const;\n    void setGpuLayers(int32_t value);\n    SuggestionMode suggestionMode() const;\n    void setSuggestionMode(SuggestionMode value);\n\n    QString languageAndLocale() const;\n    void setLanguageAndLocale(const QString &bcp47Name = QString()); // called on startup with QString()\n\n    // Release/Download settings\n    QString lastVersionStarted() const;\n    void setLastVersionStarted(const QString &value);\n\n    // Localdocs settings\n    int localDocsChunkSize() const;\n    void setLocalDocsChunkSize(int value);\n    int localDocsRetrievalSize() const;\n    void setLocalDocsRetrievalSize(int value);\n    bool localDocsShowReferences() const;\n    void setLocalDocsShowReferences(bool value);\n    QStringList localDocsFileExtensions() const;\n    void setLocalDocsFileExtensions(const QStringList &value);\n    bool localDocsUseRemoteEmbed() const;\n    void setLocalDocsUseRemoteEmbed(bool value);\n    QString localDocsNomicAPIKey() const;\n    void setLocalDocsNomicAPIKey(const QString &value);\n    QString localDocsEmbedDevice() const;\n    void setLocalDocsEmbedDevice(const QString &value);\n\n    // Network settings\n    QString networkAttribution() const;\n    void setNetworkAttribution(const QString &value);\n    bool networkIsActive() const;\n    Q_INVOKABLE bool isNetworkIsActiveSet() const;\n    void setNetworkIsActive(bool value);\n    bool networkUsageStatsActive() const;\n    Q_INVOKABLE bool isNetworkUsageStatsActiveSet() const;\n    void setNetworkUsageStatsActive(bool value);\n    int networkPort() const;\n    void setNetworkPort(int value);\n\nQ_SIGNALS:\n    void nameChanged(const ModelInfo &info);\n    void filenameChanged(const ModelInfo &info);\n    void descriptionChanged(const ModelInfo &info);\n    void temperatureChanged(const ModelInfo &info);\n    void topPChanged(const ModelInfo &info);\n    void minPChanged(const ModelInfo &info);\n    void topKChanged(const ModelInfo &info);\n    void maxLengthChanged(const ModelInfo &info);\n    void promptBatchSizeChanged(const ModelInfo &info);\n    void contextLengthChanged(const ModelInfo &info);\n    void gpuLayersChanged(const ModelInfo &info);\n    void repeatPenaltyChanged(const ModelInfo &info);\n    void repeatPenaltyTokensChanged(const ModelInfo &info);\n    void chatTemplateChanged(const ModelInfo &info, bool fromInfo = false);\n    void systemMessageChanged(const ModelInfo &info, bool fromInfo = false);\n    void chatNamePromptChanged(const ModelInfo &info);\n    void suggestedFollowUpPromptChanged(const ModelInfo &info);\n    void threadCountChanged();\n    void systemTrayChanged();\n    void serverChatChanged();\n    void modelPathChanged();\n    void userDefaultModelChanged();\n    void chatThemeChanged();\n    void fontSizeChanged();\n    void forceMetalChanged(bool);\n    void lastVersionStartedChanged();\n    void localDocsChunkSizeChanged();\n    void localDocsRetrievalSizeChanged();\n    void localDocsShowReferencesChanged();\n    void localDocsFileExtensionsChanged();\n    void localDocsUseRemoteEmbedChanged();\n    void localDocsNomicAPIKeyChanged();\n    void localDocsEmbedDeviceChanged();\n    void networkAttributionChanged();\n    void networkIsActiveChanged();\n    void networkPortChanged();\n    void networkUsageStatsActiveChanged();\n    void attemptModelLoadChanged();\n    void deviceChanged();\n    void suggestionModeChanged();\n    void languageAndLocaleChanged();\n\nprivate:\n    QVariant getBasicSetting(const QString &name) const;\n    void setBasicSetting(const QString &name, const QVariant &value, std::optional<QString> signal = std::nullopt);\n    int getEnumSetting(const QString &setting, const QStringList &valueNames) const;\n    QVariant getModelSetting(QLatin1StringView name, const ModelInfo &info) const;\n    QVariant getModelSetting(const char *name, const ModelInfo &info) const;\n    void setModelSetting(QLatin1StringView name, const ModelInfo &info, const QVariant &value, bool force,\n                         bool signal = false);\n    void setModelSetting(const char *name, const ModelInfo &info, const QVariant &value, bool force,\n                         bool signal = false);\n    auto getUpgradeableModelSetting(\n        const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n    ) const -> UpgradeableSetting;\n    bool isUpgradeableModelSettingSet(\n        const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n    ) const;\n    bool setUpgradeableModelSetting(\n        const ModelInfo &info, const QString &value, QLatin1StringView legacyKey, QLatin1StringView newKey\n    );\n    bool resetUpgradeableModelSetting(\n        const ModelInfo &info, QLatin1StringView legacyKey, QLatin1StringView newKey\n    );\n    QString filePathForLocale(const QLocale &locale);\n\nprivate:\n    QSettings m_settings;\n    bool m_forceMetal;\n    const QStringList m_deviceList;\n    const QStringList m_embeddingsDeviceList;\n    const QStringList m_uiLanguages;\n    std::unique_ptr<QTranslator> m_translator;\n\n    friend class MyPrivateSettings;\n};\n\n#endif // MYSETTINGS_H\n"
  },
  {
    "path": "gpt4all-chat/src/network.cpp",
    "content": "#include \"network.h\"\n\n#include \"chat.h\"\n#include \"chatlistmodel.h\"\n#include \"download.h\"\n#include \"llm.h\"\n#include \"localdocs.h\"\n#include \"localdocsmodel.h\"\n#include \"modellist.h\"\n#include \"mysettings.h\"\n\n#include <gpt4all-backend/llmodel.h>\n\n#include <QCoreApplication>\n#include <QDateTime>\n#include <QDebug>\n#include <QGlobalStatic>\n#include <QGuiApplication>\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QLibraryInfo>\n#include <QNetworkRequest>\n#include <QScreen>\n#include <QSettings>\n#include <QSize>\n#include <QSslConfiguration>\n#include <QSslSocket>\n#include <QSysInfo>\n#include <Qt>\n#include <QtLogging>\n#include <QUrl>\n#include <QUuid>\n\n#include <cmath>\n#include <cstring>\n#include <utility>\n\n#ifdef __GLIBC__\n#   include <gnu/libc-version.h>\n#endif\n\nusing namespace Qt::Literals::StringLiterals;\n\n//#define DEBUG\n\n#define STR_(x) #x\n#define STR(x) STR_(x)\n\n\nstatic const char MIXPANEL_TOKEN[] = \"ce362e568ddaee16ed243eaffb5860a2\";\n\n#ifdef __clang__\n#ifdef __apple_build_version__\nstatic const char COMPILER_NAME[] = \"Apple Clang\";\n#else\nstatic const char COMPILER_NAME[] = \"LLVM Clang\";\n#endif\nstatic const char COMPILER_VER[]  = STR(__clang_major__) \".\" STR(__clang_minor__) \".\" STR(__clang_patchlevel__);\n#elif defined(_MSC_VER)\nstatic const char COMPILER_NAME[] = \"MSVC\";\nstatic const char COMPILER_VER[]  = STR(_MSC_VER) \" (\" STR(_MSC_FULL_VER) \")\";\n#elif defined(__GNUC__)\nstatic const char COMPILER_NAME[] = \"GCC\";\nstatic const char COMPILER_VER[]  = STR(__GNUC__) \".\" STR(__GNUC_MINOR__) \".\" STR(__GNUC_PATCHLEVEL__);\n#endif\n\n\n#if defined(Q_OS_MAC)\n\n#include <sys/sysctl.h>\nstatic std::optional<QString> getSysctl(const char *name)\n{\n    char buffer[256] = \"\";\n    size_t bufferlen = sizeof(buffer);\n    if (sysctlbyname(name, &buffer, &bufferlen, NULL, 0) < 0) {\n        int err = errno;\n        qWarning().nospace() << \"sysctlbyname(\\\"\" << name << \"\\\") failed: \" << strerror(err);\n        return std::nullopt;\n    }\n    return std::make_optional<QString>(buffer);\n}\n\nstatic QString getCPUModel() { return getSysctl(\"machdep.cpu.brand_string\").value_or(u\"(unknown)\"_s); }\n\n#elif defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86)\n\n#ifndef _MSC_VER\nstatic void get_cpuid(int level, int *regs)\n{\n    asm volatile(\"cpuid\" : \"=a\" (regs[0]), \"=b\" (regs[1]), \"=c\" (regs[2]), \"=d\" (regs[3]) : \"0\" (level) : \"memory\");\n}\n#else\n#define get_cpuid(level, regs) __cpuid(regs, level)\n#endif\n\nstatic QString getCPUModel()\n{\n    int regs[12];\n\n    // EAX=800000000h: Get Highest Extended Function Implemented\n    get_cpuid(0x80000000, regs);\n    if (regs[0] < 0x80000004)\n        return \"(unknown)\";\n\n    // EAX=800000002h-800000004h: Processor Brand String\n    get_cpuid(0x80000002, regs);\n    get_cpuid(0x80000003, regs + 4);\n    get_cpuid(0x80000004, regs + 8);\n\n    char str[sizeof(regs) + 1];\n    memcpy(str, regs, sizeof(regs));\n    str[sizeof(regs)] = 0;\n\n    return QString(str).trimmed();\n}\n\n#else\n\nstatic QString getCPUModel() { return \"(non-x86)\"; }\n\n#endif\n\n\nclass MyNetwork: public Network { };\nQ_GLOBAL_STATIC(MyNetwork, networkInstance)\nNetwork *Network::globalInstance()\n{\n    return networkInstance();\n}\n\nbool Network::isHttpUrlValid(QUrl url) {\n    if (!url.isValid())\n        return false;\n    QString scheme(url.scheme());\n    if (scheme != \"http\" && scheme != \"https\")\n        return false;\n    return true;\n}\n\nNetwork::Network()\n    : QObject{nullptr}\n{\n    QSettings settings;\n    m_uniqueId = settings.value(\"uniqueId\", generateUniqueId()).toString();\n    settings.setValue(\"uniqueId\", m_uniqueId);\n    m_sessionId = generateUniqueId();\n\n    // allow sendMixpanel to be called from any thread\n    connect(this, &Network::requestMixpanel, this, &Network::sendMixpanel, Qt::QueuedConnection);\n\n    const auto *mySettings = MySettings::globalInstance();\n    connect(mySettings, &MySettings::networkIsActiveChanged, this, &Network::handleIsActiveChanged);\n    connect(mySettings, &MySettings::networkUsageStatsActiveChanged, this, &Network::handleUsageStatsActiveChanged);\n\n    m_hasSentOptIn  = !Download::globalInstance()->isFirstStart() &&  mySettings->networkUsageStatsActive();\n    m_hasSentOptOut = !Download::globalInstance()->isFirstStart() && !mySettings->networkUsageStatsActive();\n\n    if (mySettings->networkIsActive())\n        sendHealth();\n    connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this,\n        &Network::handleSslErrors);\n}\n\n// NOTE: this won't be useful until we make it possible to change this via the settings page\nvoid Network::handleUsageStatsActiveChanged()\n{\n    if (!MySettings::globalInstance()->networkUsageStatsActive())\n        m_sendUsageStats = false;\n}\n\nvoid Network::handleIsActiveChanged()\n{\n    if (MySettings::globalInstance()->networkUsageStatsActive())\n        sendHealth();\n}\n\nQString Network::generateUniqueId() const\n{\n    return QUuid::createUuid().toString(QUuid::WithoutBraces);\n}\n\nbool Network::packageAndSendJson(const QString &ingestId, const QString &json)\n{\n    if (!MySettings::globalInstance()->networkIsActive())\n        return false;\n\n    QJsonParseError err;\n    QJsonDocument doc = QJsonDocument::fromJson(json.toUtf8(), &err);\n    if (err.error != QJsonParseError::NoError) {\n        qDebug() << \"Couldn't parse: \" << json << err.errorString();\n        return false;\n    }\n\n    auto *currentChat = ChatListModel::globalInstance()->currentChat();\n    Q_ASSERT(currentChat);\n    auto modelInfo = currentChat->modelInfo();\n\n    Q_ASSERT(doc.isObject());\n    QJsonObject object = doc.object();\n    object.insert(\"source\", \"gpt4all-chat\");\n    object.insert(\"agent_id\", modelInfo.filename());\n    object.insert(\"submitter_id\", m_uniqueId);\n    object.insert(\"ingest_id\", ingestId);\n\n    QString attribution = MySettings::globalInstance()->networkAttribution();\n    if (!attribution.isEmpty())\n        object.insert(\"network/attribution\", attribution);\n\n    if (!modelInfo.id().isNull())\n        if (auto tmpl = modelInfo.chatTemplate().asModern())\n            object.insert(\"chat_template\"_L1, *tmpl);\n\n    QJsonDocument newDoc;\n    newDoc.setObject(object);\n\n#if defined(DEBUG)\n    printf(\"%s\\n\", qPrintable(newDoc.toJson(QJsonDocument::Indented)));\n    fflush(stdout);\n#endif\n\n    QUrl jsonUrl(\"https://api.gpt4all.io/v1/ingest/chat\");\n    QNetworkRequest request(jsonUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QByteArray body(newDoc.toJson(QJsonDocument::Compact));\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n    QNetworkReply *jsonReply = m_networkManager.post(request, body);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort);\n    connect(jsonReply, &QNetworkReply::finished, this, &Network::handleJsonUploadFinished);\n    m_activeUploads.append(jsonReply);\n    return true;\n}\n\nvoid Network::handleJsonUploadFinished()\n{\n    QNetworkReply *jsonReply = qobject_cast<QNetworkReply *>(sender());\n    if (!jsonReply)\n        return;\n\n    m_activeUploads.removeAll(jsonReply);\n\n    if (jsonReply->error() != QNetworkReply::NoError) {\n        qWarning() << \"Request to\" << jsonReply->url().toString() << \"failed:\" << jsonReply->errorString();\n        jsonReply->deleteLater();\n        return;\n    }\n\n    QVariant response = jsonReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n    Q_ASSERT(response.isValid());\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok)\n        qWarning() << \"ERROR: ingest invalid response.\";\n    if (code != 200) {\n        qWarning() << \"ERROR: ingest response != 200 code:\" << code;\n        sendHealth();\n    }\n\n#if defined(DEBUG)\n    QByteArray jsonData = jsonReply->readAll();\n    QJsonParseError err;\n\n    QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);\n    if (err.error != QJsonParseError::NoError) {\n        qDebug() << \"ERROR: Couldn't parse: \" << jsonData << err.errorString();\n        return;\n    }\n\n    printf(\"%s\\n\", qPrintable(document.toJson(QJsonDocument::Indented)));\n    fflush(stdout);\n#endif\n\n    jsonReply->deleteLater();\n}\n\nvoid Network::handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors)\n{\n    QUrl url = reply->request().url();\n    for (const auto &e : errors)\n        qWarning() << \"ERROR: Received ssl error:\" << e.errorString() << \"for\" << url;\n}\n\nvoid Network::sendOptOut()\n{\n    QJsonObject properties;\n    properties.insert(\"token\", MIXPANEL_TOKEN);\n    properties.insert(\"time\", QDateTime::currentMSecsSinceEpoch());\n    properties.insert(\"distinct_id\", m_uniqueId);\n    properties.insert(\"$insert_id\", generateUniqueId());\n\n    QJsonObject event;\n    event.insert(\"event\", \"opt_out\");\n    event.insert(\"properties\", properties);\n\n    QJsonArray array;\n    array.append(event);\n\n    QJsonDocument doc;\n    doc.setArray(array);\n    emit requestMixpanel(doc.toJson(QJsonDocument::Compact));\n\n#if defined(DEBUG)\n    printf(\"%s %s\\n\", qPrintable(\"opt_out\"), qPrintable(doc.toJson(QJsonDocument::Indented)));\n    fflush(stdout);\n#endif\n}\n\nvoid Network::sendStartup()\n{\n    const auto *mySettings = MySettings::globalInstance();\n    Q_ASSERT(mySettings->isNetworkUsageStatsActiveSet());\n    if (!mySettings->networkUsageStatsActive()) {\n        // send a single opt-out per session after the user has made their selections,\n        // unless this is a normal start (same version) and the user was already opted out\n        if (!m_hasSentOptOut) {\n            sendOptOut();\n            m_hasSentOptOut = true;\n        }\n        return;\n    }\n\n    // only chance to enable usage stats is at the start of a new session\n    m_sendUsageStats = true;\n\n    const auto *display = QGuiApplication::primaryScreen();\n    trackEvent(\"startup\", {\n        // Build info\n        { \"build_compiler\",     COMPILER_NAME                                                         },\n        { \"build_compiler_ver\", COMPILER_VER                                                          },\n        { \"build_abi\",          QSysInfo::buildAbi()                                                  },\n        { \"build_cpu_arch\",     QSysInfo::buildCpuArchitecture()                                      },\n#ifdef __GLIBC__\n        { \"build_glibc_ver\",    QStringLiteral(STR(__GLIBC__) \".\" STR(__GLIBC_MINOR__))               },\n#endif\n        { \"qt_version\",         QLibraryInfo::version().toString()                                    },\n        { \"qt_debug\" ,          QLibraryInfo::isDebugBuild()                                          },\n        { \"qt_shared\",          QLibraryInfo::isSharedBuild()                                         },\n        // System info\n        { \"runtime_cpu_arch\",   QSysInfo::currentCpuArchitecture()                                    },\n#ifdef __GLIBC__\n        { \"runtime_glibc_ver\",  gnu_get_libc_version()                                                },\n#endif\n        { \"sys_kernel_type\",    QSysInfo::kernelType()                                                },\n        { \"sys_kernel_ver\",     QSysInfo::kernelVersion()                                             },\n        { \"sys_product_type\",   QSysInfo::productType()                                               },\n        { \"sys_product_ver\",    QSysInfo::productVersion()                                            },\n#ifdef Q_OS_MAC\n        { \"sys_hw_model\",       getSysctl(\"hw.model\").value_or(u\"(unknown)\"_s)                        },\n#endif\n        { \"$screen_dpi\",        std::round(display->physicalDotsPerInch())                            },\n        { \"display\",            u\"%1x%2\"_s.arg(display->size().width()).arg(display->size().height()) },\n        { \"ram\",                LLM::globalInstance()->systemTotalRAMInGB()                           },\n        { \"cpu\",                getCPUModel()                                                         },\n        { \"cpu_supports_avx2\",  LLModel::Implementation::cpuSupportsAVX2()                            },\n        // Datalake status\n        { \"datalake_active\",    mySettings->networkIsActive()                                         },\n    });\n    sendIpify();\n\n    // mirror opt-out logic so the ratio can be used to infer totals\n    if (!m_hasSentOptIn) {\n        trackEvent(\"opt_in\");\n        m_hasSentOptIn = true;\n    }\n}\n\nvoid Network::trackChatEvent(const QString &ev, QVariantMap props)\n{\n    auto *curChat = ChatListModel::globalInstance()->currentChat();\n    Q_ASSERT(curChat);\n    if (!props.contains(\"model\"))\n        props.insert(\"model\", curChat->modelInfo().filename());\n    props.insert(\"device_backend\", curChat->deviceBackend());\n    props.insert(\"actualDevice\", curChat->device());\n    props.insert(\"doc_collections_enabled\", curChat->collectionList().count());\n    props.insert(\"doc_collections_total\", LocalDocs::globalInstance()->localDocsModel()->rowCount());\n    props.insert(\"datalake_active\", MySettings::globalInstance()->networkIsActive());\n    props.insert(\"using_server\", curChat->isServer());\n    trackEvent(ev, props);\n}\n\nvoid Network::trackEvent(const QString &ev, const QVariantMap &props)\n{\n    if (!m_sendUsageStats)\n        return;\n\n    QJsonObject properties;\n\n    properties.insert(\"token\", MIXPANEL_TOKEN);\n    if (!props.contains(\"time\"))\n        properties.insert(\"time\", QDateTime::currentMSecsSinceEpoch());\n    properties.insert(\"distinct_id\", m_uniqueId); // effectively a device ID\n    properties.insert(\"$insert_id\", generateUniqueId());\n\n    if (!m_ipify.isEmpty())\n        properties.insert(\"ip\", m_ipify);\n\n    properties.insert(\"$os\", QSysInfo::prettyProductName());\n    properties.insert(\"session_id\", m_sessionId);\n    properties.insert(\"name\", QCoreApplication::applicationName() + \" v\" + QCoreApplication::applicationVersion());\n\n    for (const auto &[key, value]: props.asKeyValueRange())\n        properties.insert(key, QJsonValue::fromVariant(value));\n\n    QJsonObject event;\n    event.insert(\"event\", ev);\n    event.insert(\"properties\", properties);\n\n    QJsonArray array;\n    array.append(event);\n\n    QJsonDocument doc;\n    doc.setArray(array);\n    emit requestMixpanel(doc.toJson(QJsonDocument::Compact));\n\n#if defined(DEBUG)\n    printf(\"%s %s\\n\", qPrintable(ev), qPrintable(doc.toJson(QJsonDocument::Indented)));\n    fflush(stdout);\n#endif\n}\n\nvoid Network::sendIpify()\n{\n    if (!m_sendUsageStats || !m_ipify.isEmpty())\n        return;\n\n    QUrl ipifyUrl(\"https://api.ipify.org\");\n    QNetworkRequest request(ipifyUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *reply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort);\n    connect(reply, &QNetworkReply::finished, this, &Network::handleIpifyFinished);\n}\n\nvoid Network::sendMixpanel(const QByteArray &json)\n{\n    QUrl trackUrl(\"https://api.mixpanel.com/track\");\n    QNetworkRequest request(trackUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    request.setHeader(QNetworkRequest::ContentTypeHeader, \"application/json\");\n    QNetworkReply *trackReply = m_networkManager.post(request, json);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, trackReply, &QNetworkReply::abort);\n    connect(trackReply, &QNetworkReply::finished, this, &Network::handleMixpanelFinished);\n}\n\nvoid Network::handleIpifyFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n    if (reply->error() != QNetworkReply::NoError) {\n        qWarning() << \"Request to\" << reply->url().toString() << \"failed:\" << reply->errorString();\n        reply->deleteLater();\n        return;\n    }\n\n    QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n    Q_ASSERT(response.isValid());\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok)\n        qWarning() << \"ERROR: ipify invalid response.\";\n    if (code != 200)\n        qWarning() << \"ERROR: ipify response != 200 code:\" << code;\n    m_ipify = reply->readAll();\n#if defined(DEBUG)\n    printf(\"ipify finished %s\\n\", qPrintable(m_ipify));\n    fflush(stdout);\n#endif\n    reply->deleteLater();\n\n    trackEvent(\"ipify_complete\");\n}\n\nvoid Network::handleMixpanelFinished()\n{\n    QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());\n    if (!reply)\n        return;\n    if (reply->error() != QNetworkReply::NoError) {\n        qWarning() << \"Request to\" << reply->url().toString() << \"failed:\" << reply->errorString();\n        reply->deleteLater();\n        return;\n    }\n\n    QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n    Q_ASSERT(response.isValid());\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok)\n        qWarning() << \"ERROR: track invalid response.\";\n    if (code != 200)\n        qWarning() << \"ERROR: track response != 200 code:\" << code;\n#if defined(DEBUG)\n    printf(\"mixpanel finished %s\\n\", qPrintable(reply->readAll()));\n    fflush(stdout);\n#endif\n    reply->deleteLater();\n}\n\nbool Network::sendConversation(const QString &ingestId, const QString &conversation)\n{\n    return packageAndSendJson(ingestId, conversation);\n}\n\nvoid Network::sendHealth()\n{\n    QUrl healthUrl(\"https://api.gpt4all.io/v1/health\");\n    QNetworkRequest request(healthUrl);\n    QSslConfiguration conf = request.sslConfiguration();\n    conf.setPeerVerifyMode(QSslSocket::VerifyNone);\n    request.setSslConfiguration(conf);\n    QNetworkReply *healthReply = m_networkManager.get(request);\n    connect(qGuiApp, &QCoreApplication::aboutToQuit, healthReply, &QNetworkReply::abort);\n    connect(healthReply, &QNetworkReply::finished, this, &Network::handleHealthFinished);\n}\n\nvoid Network::handleHealthFinished()\n{\n    QNetworkReply *healthReply = qobject_cast<QNetworkReply *>(sender());\n    if (!healthReply)\n        return;\n    if (healthReply->error() != QNetworkReply::NoError) {\n        qWarning() << \"Request to\" << healthReply->url().toString() << \"failed:\" << healthReply->errorString();\n        healthReply->deleteLater();\n        return;\n    }\n\n    QVariant response = healthReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);\n    Q_ASSERT(response.isValid());\n    bool ok;\n    int code = response.toInt(&ok);\n    if (!ok)\n        qWarning() << \"ERROR: health invalid response.\";\n    if (code != 200) {\n        qWarning() << \"ERROR: health response != 200 code:\" << code;\n        emit healthCheckFailed(code);\n        MySettings::globalInstance()->setNetworkIsActive(false);\n    }\n    healthReply->deleteLater();\n}\n"
  },
  {
    "path": "gpt4all-chat/src/network.h",
    "content": "#ifndef NETWORK_H\n#define NETWORK_H\n\n#include <QByteArray>\n#include <QJsonValue>\n#include <QList>\n#include <QMap>\n#include <QNetworkAccessManager>\n#include <QNetworkReply>\n#include <QObject>\n#include <QSslError>\n#include <QString>\n#include <QVariant>\n#include <QVariantMap> // IWYU pragma: keep\n#include <QVector> // IWYU pragma: keep\n\n// IWYU pragma: no_forward_declare QByteArray\n// IWYU pragma: no_forward_declare QNetworkReply\n// IWYU pragma: no_forward_declare QSslError\nclass QUrl;\n\n\nstruct KeyValue {\n    QString key;\n    QJsonValue value;\n};\n\nclass Network : public QObject\n{\n    Q_OBJECT\npublic:\n    static Network *globalInstance();\n    static bool isHttpUrlValid(const QUrl url);\n\n    Q_INVOKABLE QString generateUniqueId() const;\n    Q_INVOKABLE bool sendConversation(const QString &ingestId, const QString &conversation);\n    Q_INVOKABLE void trackChatEvent(const QString &event, QVariantMap props = QVariantMap());\n    Q_INVOKABLE void trackEvent(const QString &event, const QVariantMap &props = QVariantMap());\n\nQ_SIGNALS:\n    void healthCheckFailed(int code);\n    void requestMixpanel(const QByteArray &json, bool isOptOut = false);\n\npublic Q_SLOTS:\n    void sendStartup();\n\nprivate Q_SLOTS:\n    void handleIpifyFinished();\n    void handleHealthFinished();\n    void handleJsonUploadFinished();\n    void handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors);\n    void handleMixpanelFinished();\n    void handleIsActiveChanged();\n    void handleUsageStatsActiveChanged();\n    void sendMixpanel(const QByteArray &json);\n\nprivate:\n    void sendOptOut();\n    void sendHealth();\n    void sendIpify();\n    bool packageAndSendJson(const QString &ingestId, const QString &json);\n\nprivate:\n    bool m_sendUsageStats = false;\n    bool m_hasSentOptIn;\n    bool m_hasSentOptOut;\n    QString m_ipify;\n    QString m_uniqueId;\n    QString m_sessionId;\n    QNetworkAccessManager m_networkManager;\n    QVector<QNetworkReply*> m_activeUploads;\n\nprivate:\n    explicit Network();\n    ~Network() {}\n    friend class MyNetwork;\n};\n\n#endif // LLM_H\n"
  },
  {
    "path": "gpt4all-chat/src/server.cpp",
    "content": "#include \"server.h\"\n\n#include \"chat.h\"\n#include \"chatmodel.h\"\n#include \"modellist.h\"\n#include \"mysettings.h\"\n#include \"utils.h\" // IWYU pragma: keep\n\n#include <fmt/format.h>\n#include <gpt4all-backend/llmodel.h>\n\n#include <QByteArray>\n#include <QCborArray>\n#include <QCborMap>\n#include <QCborValue>\n#include <QDateTime>\n#include <QDebug>\n#include <QHostAddress>\n#include <QHttpHeaders>\n#include <QHttpServer>\n#include <QHttpServerRequest>\n#include <QHttpServerResponder>\n#include <QJsonArray>\n#include <QJsonDocument>\n#include <QJsonObject>\n#include <QJsonValue>\n#include <QLatin1StringView>\n#include <QPair> // IWYU pragma: keep\n#include <QTcpServer>\n#include <QVariant>\n#include <Qt>\n#include <QtAssert>\n#include <QtCborCommon>\n#include <QtLogging>\n#include <QtMinMax>\n#include <QtPreprocessorSupport>\n#include <QtTypes>\n\n#include <cstdint>\n#include <exception>\n#include <iostream>\n#include <optional>\n#include <span>\n#include <stdexcept>\n#include <string>\n#include <string_view>\n#include <unordered_map>\n#include <utility>\n#include <variant>\n#include <vector>\n\nusing namespace std::string_literals;\nusing namespace Qt::Literals::StringLiterals;\n\n//#define DEBUG\n\n\nnamespace {\n\nclass InvalidRequestError: public std::invalid_argument {\n    using std::invalid_argument::invalid_argument;\n\npublic:\n    QHttpServerResponse asResponse() const\n    {\n        QJsonObject error {\n            { \"message\", what(),                     },\n            { \"type\",    u\"invalid_request_error\"_s, },\n            { \"param\",   QJsonValue::Null            },\n            { \"code\",    QJsonValue::Null            },\n        };\n        return { QJsonObject {{ \"error\", error }},\n                 QHttpServerResponder::StatusCode::BadRequest };\n    }\n\nprivate:\n    Q_DISABLE_COPY_MOVE(InvalidRequestError)\n};\n\n} // namespace\n\nstatic inline QJsonObject modelToJson(const ModelInfo &info)\n{\n    QJsonObject model;\n    model.insert(\"id\", info.name());\n    model.insert(\"object\", \"model\");\n    model.insert(\"created\", 0);\n    model.insert(\"owned_by\", \"humanity\");\n    model.insert(\"root\", info.name());\n    model.insert(\"parent\", QJsonValue::Null);\n\n    QJsonArray permissions;\n    QJsonObject permissionObj;\n    permissionObj.insert(\"id\", \"placeholder\");\n    permissionObj.insert(\"object\", \"model_permission\");\n    permissionObj.insert(\"created\", 0);\n    permissionObj.insert(\"allow_create_engine\", false);\n    permissionObj.insert(\"allow_sampling\", false);\n    permissionObj.insert(\"allow_logprobs\", false);\n    permissionObj.insert(\"allow_search_indices\", false);\n    permissionObj.insert(\"allow_view\", true);\n    permissionObj.insert(\"allow_fine_tuning\", false);\n    permissionObj.insert(\"organization\", \"*\");\n    permissionObj.insert(\"group\", QJsonValue::Null);\n    permissionObj.insert(\"is_blocking\", false);\n    permissions.append(permissionObj);\n    model.insert(\"permissions\", permissions);\n    return model;\n}\n\nstatic inline QJsonObject resultToJson(const ResultInfo &info)\n{\n    QJsonObject result;\n    result.insert(\"file\", info.file);\n    result.insert(\"title\", info.title);\n    result.insert(\"author\", info.author);\n    result.insert(\"date\", info.date);\n    result.insert(\"text\", info.text);\n    result.insert(\"page\", info.page);\n    result.insert(\"from\", info.from);\n    result.insert(\"to\", info.to);\n    return result;\n}\n\nclass BaseCompletionRequest {\npublic:\n    QString model; // required\n    // NB: some parameters are not supported yet\n    int32_t max_tokens = 16;\n    qint64 n = 1;\n    float temperature = 1.f;\n    float top_p = 1.f;\n    float min_p = 0.f;\n\n    BaseCompletionRequest() = default;\n    virtual ~BaseCompletionRequest() = default;\n\n    virtual BaseCompletionRequest &parse(QCborMap request)\n    {\n        parseImpl(request);\n        if (!request.isEmpty())\n            throw InvalidRequestError(fmt::format(\n                \"Unrecognized request argument supplied: {}\", request.keys().constFirst().toString()\n            ));\n        return *this;\n    }\n\nprotected:\n    virtual void parseImpl(QCborMap &request)\n    {\n        using enum Type;\n\n        auto reqValue = [&request](auto &&...args) { return takeValue(request, args...); };\n        QCborValue value;\n\n        this->model = reqValue(\"model\", String, /*required*/ true).toString();\n\n        value = reqValue(\"frequency_penalty\", Number, false, /*min*/ -2, /*max*/ 2);\n        if (value.isDouble() || value.toInteger() != 0)\n            throw InvalidRequestError(\"'frequency_penalty' is not supported\");\n\n        value = reqValue(\"max_tokens\", Integer, false, /*min*/ 1);\n        if (!value.isNull())\n            this->max_tokens = int32_t(qMin(value.toInteger(), INT32_MAX));\n\n        value = reqValue(\"n\", Integer, false, /*min*/ 1);\n        if (!value.isNull())\n            this->n = value.toInteger();\n\n        value = reqValue(\"presence_penalty\", Number);\n        if (value.isDouble() || value.toInteger() != 0)\n            throw InvalidRequestError(\"'presence_penalty' is not supported\");\n\n        value = reqValue(\"seed\", Integer);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'seed' is not supported\");\n\n        value = reqValue(\"stop\");\n        if (!value.isNull())\n            throw InvalidRequestError(\"'stop' is not supported\");\n\n        value = reqValue(\"stream\", Boolean);\n        if (value.isTrue())\n            throw InvalidRequestError(\"'stream' is not supported\");\n\n        value = reqValue(\"stream_options\", Object);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'stream_options' is not supported\");\n\n        value = reqValue(\"temperature\", Number, false, /*min*/ 0, /*max*/ 2);\n        if (!value.isNull())\n            this->temperature = float(value.toDouble());\n\n        value = reqValue(\"top_p\", Number, false, /*min*/ 0, /*max*/ 1);\n        if (!value.isNull())\n            this->top_p = float(value.toDouble());\n\n        value = reqValue(\"min_p\", Number, false, /*min*/ 0, /*max*/ 1);\n        if (!value.isNull())\n            this->min_p = float(value.toDouble());\n\n        reqValue(\"user\", String); // validate but don't use\n    }\n\n    enum class Type : uint8_t {\n        Boolean,\n        Integer,\n        Number,\n        String,\n        Array,\n        Object,\n    };\n\n    static const std::unordered_map<Type, const char *> s_typeNames;\n\n    static bool typeMatches(const QCborValue &value, Type type) noexcept {\n        using enum Type;\n        switch (type) {\n            case Boolean: return value.isBool();\n            case Integer: return value.isInteger();\n            case Number:  return value.isInteger() || value.isDouble();\n            case String:  return value.isString();\n            case Array:   return value.isArray();\n            case Object:  return value.isMap();\n        }\n        Q_UNREACHABLE();\n    }\n\n    static QCborValue takeValue(\n        QCborMap &obj, const char *key, std::optional<Type> type = {}, bool required = false,\n        std::optional<qint64> min = {}, std::optional<qint64> max = {}\n    ) {\n        auto value = obj.take(QLatin1StringView(key));\n        if (value.isUndefined())\n            value = QCborValue(QCborSimpleType::Null);\n        if (required && value.isNull())\n            throw InvalidRequestError(fmt::format(\"you must provide a {} parameter\", key));\n        if (type && !value.isNull() && !typeMatches(value, *type))\n            throw InvalidRequestError(fmt::format(\"'{}' is not of type '{}' - '{}'\",\n                                                  value.toVariant(), s_typeNames.at(*type), key));\n        if (!value.isNull()) {\n            double num = value.toDouble();\n            if (min && num < double(*min))\n                throw InvalidRequestError(fmt::format(\"{} is less than the minimum of {} - '{}'\", num, *min, key));\n            if (max && num > double(*max))\n                throw InvalidRequestError(fmt::format(\"{} is greater than the maximum of {} - '{}'\", num, *max, key));\n        }\n        return value;\n    }\n\nprivate:\n    Q_DISABLE_COPY_MOVE(BaseCompletionRequest)\n};\n\nclass CompletionRequest : public BaseCompletionRequest {\npublic:\n    QString prompt; // required\n    // some parameters are not supported yet - these ones are\n    bool echo = false;\n\n    CompletionRequest &parse(QCborMap request) override\n    {\n        BaseCompletionRequest::parse(std::move(request));\n        return *this;\n    }\n\nprotected:\n    void parseImpl(QCborMap &request) override\n    {\n        using enum Type;\n\n        auto reqValue = [&request](auto &&...args) { return takeValue(request, args...); };\n        QCborValue value;\n\n        BaseCompletionRequest::parseImpl(request);\n\n        this->prompt = reqValue(\"prompt\", String, /*required*/ true).toString();\n\n        value = reqValue(\"best_of\", Integer);\n        {\n            qint64 bof = value.toInteger(1);\n            if (this->n > bof)\n                throw InvalidRequestError(fmt::format(\n                    \"You requested that the server return more choices than it will generate (HINT: you must set 'n' \"\n                    \"(currently {}) to be at most 'best_of' (currently {}), or omit either parameter if you don't \"\n                    \"specifically want to use them.)\",\n                    this->n, bof\n                ));\n            if (bof > this->n)\n                throw InvalidRequestError(\"'best_of' is not supported\");\n        }\n\n        value = reqValue(\"echo\", Boolean);\n        if (value.isBool())\n            this->echo = value.toBool();\n\n        // we don't bother deeply typechecking unsupported subobjects for now\n        value = reqValue(\"logit_bias\", Object);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'logit_bias' is not supported\");\n\n        value = reqValue(\"logprobs\", Integer, false, /*min*/ 0);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'logprobs' is not supported\");\n\n        value = reqValue(\"suffix\", String);\n        if (!value.isNull() && !value.toString().isEmpty())\n            throw InvalidRequestError(\"'suffix' is not supported\");\n    }\n};\n\nconst std::unordered_map<BaseCompletionRequest::Type, const char *> BaseCompletionRequest::s_typeNames = {\n    { BaseCompletionRequest::Type::Boolean, \"boolean\" },\n    { BaseCompletionRequest::Type::Integer, \"integer\" },\n    { BaseCompletionRequest::Type::Number,  \"number\"  },\n    { BaseCompletionRequest::Type::String,  \"string\"  },\n    { BaseCompletionRequest::Type::Array,   \"array\"   },\n    { BaseCompletionRequest::Type::Object,  \"object\"  },\n};\n\nclass ChatRequest : public BaseCompletionRequest {\npublic:\n    struct Message {\n        enum class Role { System, User, Assistant };\n        Role    role;\n        QString content;\n    };\n\n    QList<Message> messages; // required\n\n    ChatRequest &parse(QCborMap request) override\n    {\n        BaseCompletionRequest::parse(std::move(request));\n        return *this;\n    }\n\nprotected:\n    void parseImpl(QCborMap &request) override\n    {\n        using enum Type;\n\n        auto reqValue = [&request](auto &&...args) { return takeValue(request, args...); };\n        QCborValue value;\n\n        BaseCompletionRequest::parseImpl(request);\n\n        value = reqValue(\"messages\", std::nullopt, /*required*/ true);\n        if (!value.isArray() || value.toArray().isEmpty())\n            throw InvalidRequestError(fmt::format(\n                \"Invalid type for 'messages': expected a non-empty array of objects, but got '{}' instead.\",\n                value.toVariant()\n            ));\n\n        this->messages.clear();\n        {\n            QCborArray arr = value.toArray();\n            for (qsizetype i = 0; i < arr.size(); i++) {\n                const auto &elem = arr[i];\n                if (!elem.isMap())\n                    throw InvalidRequestError(fmt::format(\n                        \"Invalid type for 'messages[{}]': expected an object, but got '{}' instead.\",\n                        i, elem.toVariant()\n                    ));\n                QCborMap msg = elem.toMap();\n                Message res;\n                QString role = takeValue(msg, \"role\", String, /*required*/ true).toString();\n                if (role == u\"system\"_s) {\n                    res.role = Message::Role::System;\n                } else if (role == u\"user\"_s) {\n                    res.role = Message::Role::User;\n                } else if (role == u\"assistant\"_s) {\n                    res.role = Message::Role::Assistant;\n                } else {\n                    throw InvalidRequestError(fmt::format(\n                        \"Invalid 'messages[{}].role': expected one of 'system', 'assistant', or 'user', but got '{}'\"\n                        \" instead.\",\n                        i, role.toStdString()\n                    ));\n                }\n                res.content = takeValue(msg, \"content\", String, /*required*/ true).toString();\n                this->messages.append(res);\n\n                if (!msg.isEmpty())\n                    throw InvalidRequestError(fmt::format(\n                        \"Invalid 'messages[{}]': unrecognized key: '{}'\", i, msg.keys().constFirst().toString()\n                    ));\n            }\n        }\n\n        // we don't bother deeply typechecking unsupported subobjects for now\n        value = reqValue(\"logit_bias\", Object);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'logit_bias' is not supported\");\n\n        value = reqValue(\"logprobs\", Boolean);\n        if (value.isTrue())\n            throw InvalidRequestError(\"'logprobs' is not supported\");\n\n        value = reqValue(\"top_logprobs\", Integer, false, /*min*/ 0);\n        if (!value.isNull())\n            throw InvalidRequestError(\"The 'top_logprobs' parameter is only allowed when 'logprobs' is enabled.\");\n\n        value = reqValue(\"response_format\", Object);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'response_format' is not supported\");\n\n        reqValue(\"service_tier\", String); // validate but don't use\n\n        value = reqValue(\"tools\", Array);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'tools' is not supported\");\n\n        value = reqValue(\"tool_choice\");\n        if (!value.isNull())\n            throw InvalidRequestError(\"'tool_choice' is not supported\");\n\n        // validate but don't use\n        reqValue(\"parallel_tool_calls\", Boolean);\n\n        value = reqValue(\"function_call\");\n        if (!value.isNull())\n            throw InvalidRequestError(\"'function_call' is not supported\");\n\n        value = reqValue(\"functions\", Array);\n        if (!value.isNull())\n            throw InvalidRequestError(\"'functions' is not supported\");\n    }\n};\n\ntemplate <typename T>\nT &parseRequest(T &request, QJsonObject &&obj)\n{\n    // lossless conversion to CBOR exposes more type information\n    return request.parse(QCborMap::fromJsonObject(obj));\n}\n\nServer::Server(Chat *chat)\n    : ChatLLM(chat, true /*isServer*/)\n    , m_chat(chat)\n{\n    connect(this, &Server::threadStarted, this, &Server::start);\n    connect(this, &Server::databaseResultsChanged, this, &Server::handleDatabaseResultsChanged);\n    connect(chat, &Chat::collectionListChanged, this, &Server::handleCollectionListChanged, Qt::QueuedConnection);\n}\n\nstatic QJsonObject requestFromJson(const QByteArray &request)\n{\n    QJsonParseError err;\n    const QJsonDocument document = QJsonDocument::fromJson(request, &err);\n    if (err.error || !document.isObject())\n        throw InvalidRequestError(fmt::format(\n            \"error parsing request JSON: {}\",\n            err.error ? err.errorString().toStdString() : \"not an object\"s\n        ));\n    return document.object();\n}\n\nvoid Server::start()\n{\n    m_server = std::make_unique<QHttpServer>(this);\n    auto *tcpServer = new QTcpServer(m_server.get());\n\n    auto port = MySettings::globalInstance()->networkPort();\n    if (!tcpServer->listen(QHostAddress::LocalHost, port)) {\n        qWarning() << \"Server ERROR: Failed to listen on port\" << port;\n        return;\n    }\n    if (!m_server->bind(tcpServer)) {\n        qWarning() << \"Server ERROR: Failed to HTTP server to socket\" << port;\n        return;\n    }\n\n    m_server->route(\"/v1/models\", QHttpServerRequest::Method::Get,\n        [](const QHttpServerRequest &) {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n\n            const QList<ModelInfo> modelList = ModelList::globalInstance()->selectableModelList();\n            QJsonObject root;\n            root.insert(\"object\", \"list\");\n            QJsonArray data;\n            for (const ModelInfo &info : modelList) {\n                Q_ASSERT(info.installed);\n                if (!info.installed)\n                    continue;\n                data.append(modelToJson(info));\n            }\n            root.insert(\"data\", data);\n            return QHttpServerResponse(root);\n        }\n    );\n\n    m_server->route(\"/v1/models/<arg>\", QHttpServerRequest::Method::Get,\n        [](const QString &model, const QHttpServerRequest &) {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n\n            const QList<ModelInfo> modelList = ModelList::globalInstance()->selectableModelList();\n            QJsonObject object;\n            for (const ModelInfo &info : modelList) {\n                Q_ASSERT(info.installed);\n                if (!info.installed)\n                    continue;\n\n                if (model == info.name()) {\n                    object = modelToJson(info);\n                    break;\n                }\n            }\n            return QHttpServerResponse(object);\n        }\n    );\n\n    m_server->route(\"/v1/completions\", QHttpServerRequest::Method::Post,\n        [this](const QHttpServerRequest &request) {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n\n            try {\n                auto reqObj = requestFromJson(request.body());\n#if defined(DEBUG)\n                qDebug().noquote() << \"/v1/completions request\" << QJsonDocument(reqObj).toJson(QJsonDocument::Indented);\n#endif\n                CompletionRequest req;\n                parseRequest(req, std::move(reqObj));\n                auto [resp, respObj] = handleCompletionRequest(req);\n#if defined(DEBUG)\n                if (respObj)\n                    qDebug().noquote() << \"/v1/completions reply\" << QJsonDocument(*respObj).toJson(QJsonDocument::Indented);\n#endif\n                return std::move(resp);\n            } catch (const InvalidRequestError &e) {\n                return e.asResponse();\n            }\n        }\n    );\n\n    m_server->route(\"/v1/chat/completions\", QHttpServerRequest::Method::Post,\n        [this](const QHttpServerRequest &request) {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n\n            try {\n                auto reqObj = requestFromJson(request.body());\n#if defined(DEBUG)\n                qDebug().noquote() << \"/v1/chat/completions request\" << QJsonDocument(reqObj).toJson(QJsonDocument::Indented);\n#endif\n                ChatRequest req;\n                parseRequest(req, std::move(reqObj));\n                auto [resp, respObj] = handleChatRequest(req);\n                (void)respObj;\n#if defined(DEBUG)\n                if (respObj)\n                    qDebug().noquote() << \"/v1/chat/completions reply\" << QJsonDocument(*respObj).toJson(QJsonDocument::Indented);\n#endif\n                return std::move(resp);\n            } catch (const InvalidRequestError &e) {\n                return e.asResponse();\n            }\n        }\n    );\n\n    // Respond with code 405 to wrong HTTP methods:\n    m_server->route(\"/v1/models\",  QHttpServerRequest::Method::Post,\n        [] {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n            return QHttpServerResponse(\n                QJsonDocument::fromJson(\"{\\\"error\\\": {\\\"message\\\": \\\"Not allowed to POST on /v1/models.\"\n                    \" (HINT: Perhaps you meant to use a different HTTP method?)\\\",\"\n                    \" \\\"type\\\": \\\"invalid_request_error\\\", \\\"param\\\": null, \\\"code\\\": null}}\").object(),\n                QHttpServerResponder::StatusCode::MethodNotAllowed);\n        }\n    );\n\n    m_server->route(\"/v1/models/<arg>\", QHttpServerRequest::Method::Post,\n        [](const QString &model) {\n            (void)model;\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n            return QHttpServerResponse(\n                QJsonDocument::fromJson(\"{\\\"error\\\": {\\\"message\\\": \\\"Not allowed to POST on /v1/models/*.\"\n                    \" (HINT: Perhaps you meant to use a different HTTP method?)\\\",\"\n                    \" \\\"type\\\": \\\"invalid_request_error\\\", \\\"param\\\": null, \\\"code\\\": null}}\").object(),\n                QHttpServerResponder::StatusCode::MethodNotAllowed);\n        }\n    );\n\n    m_server->route(\"/v1/completions\", QHttpServerRequest::Method::Get,\n        [] {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n            return QHttpServerResponse(\n                QJsonDocument::fromJson(\"{\\\"error\\\": {\\\"message\\\": \\\"Only POST requests are accepted.\\\",\"\n                    \" \\\"type\\\": \\\"invalid_request_error\\\", \\\"param\\\": null, \\\"code\\\": \\\"method_not_supported\\\"}}\").object(),\n                QHttpServerResponder::StatusCode::MethodNotAllowed);\n        }\n    );\n\n    m_server->route(\"/v1/chat/completions\", QHttpServerRequest::Method::Get,\n        [] {\n            if (!MySettings::globalInstance()->serverChat())\n                return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);\n            return QHttpServerResponse(\n                QJsonDocument::fromJson(\"{\\\"error\\\": {\\\"message\\\": \\\"Only POST requests are accepted.\\\",\"\n                    \" \\\"type\\\": \\\"invalid_request_error\\\", \\\"param\\\": null, \\\"code\\\": \\\"method_not_supported\\\"}}\").object(),\n                QHttpServerResponder::StatusCode::MethodNotAllowed);\n        }\n    );\n\n    m_server->addAfterRequestHandler(this, [](const QHttpServerRequest &req, QHttpServerResponse &resp) {\n        Q_UNUSED(req);\n        auto headers = resp.headers();\n        headers.append(\"Access-Control-Allow-Origin\"_L1, \"*\"_L1);\n        resp.setHeaders(std::move(headers));\n    });\n\n    connect(this, &Server::requestResetResponseState, m_chat, &Chat::resetResponseState, Qt::BlockingQueuedConnection);\n}\n\nstatic auto makeError(auto &&...args) -> std::pair<QHttpServerResponse, std::optional<QJsonObject>>\n{\n    return {QHttpServerResponse(args...), std::nullopt};\n}\n\nauto Server::handleCompletionRequest(const CompletionRequest &request)\n    -> std::pair<QHttpServerResponse, std::optional<QJsonObject>>\n{\n    Q_ASSERT(m_chatModel);\n\n    auto *mySettings = MySettings::globalInstance();\n\n    ModelInfo modelInfo = ModelList::globalInstance()->defaultModelInfo();\n    const QList<ModelInfo> modelList = ModelList::globalInstance()->selectableModelList();\n    for (const ModelInfo &info : modelList) {\n        Q_ASSERT(info.installed);\n        if (!info.installed)\n            continue;\n        if (request.model == info.name() || request.model == info.filename()) {\n            modelInfo = info;\n            break;\n        }\n    }\n\n    // load the new model if necessary\n    setShouldBeLoaded(true);\n\n    if (modelInfo.filename().isEmpty()) {\n        std::cerr << \"ERROR: couldn't load default model \" << request.model.toStdString() << std::endl;\n        return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n    }\n\n    emit requestResetResponseState(); // blocks\n    qsizetype prevMsgIndex = m_chatModel->count() - 1;\n    if (prevMsgIndex >= 0)\n        m_chatModel->updateCurrentResponse(prevMsgIndex, false);\n\n    // NB: this resets the context, regardless of whether this model is already loaded\n    if (!loadModel(modelInfo)) {\n        std::cerr << \"ERROR: couldn't load model \" << modelInfo.name().toStdString() << std::endl;\n        return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n    }\n\n    // add prompt/response items to GUI\n    m_chatModel->appendPrompt(request.prompt);\n    m_chatModel->appendResponse();\n\n    // FIXME(jared): taking parameters from the UI inhibits reproducibility of results\n    LLModel::PromptContext promptCtx {\n        .n_predict      = request.max_tokens,\n        .top_k          = mySettings->modelTopK(modelInfo),\n        .top_p          = request.top_p,\n        .min_p          = request.min_p,\n        .temp           = request.temperature,\n        .n_batch        = mySettings->modelPromptBatchSize(modelInfo),\n        .repeat_penalty = float(mySettings->modelRepeatPenalty(modelInfo)),\n        .repeat_last_n  = mySettings->modelRepeatPenaltyTokens(modelInfo),\n    };\n\n    auto promptUtf8 = request.prompt.toUtf8();\n    int promptTokens = 0;\n    int responseTokens = 0;\n    QStringList responses;\n    for (int i = 0; i < request.n; ++i) {\n        PromptResult result;\n        try {\n            result = promptInternal(std::string_view(promptUtf8.cbegin(), promptUtf8.cend()),\n                                    promptCtx,\n                                    /*usedLocalDocs*/ false);\n        } catch (const std::exception &e) {\n            m_chatModel->setResponseValue(e.what());\n            m_chatModel->setError();\n            emit responseStopped(0);\n            return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n        }\n        QString resp = QString::fromUtf8(result.response);\n        if (request.echo)\n            resp = request.prompt + resp;\n        responses << resp;\n        if (i == 0)\n            promptTokens = result.promptTokens;\n        responseTokens += result.responseTokens;\n    }\n\n    QJsonObject responseObject {\n        { \"id\",      \"placeholder\"                      },\n        { \"object\",  \"text_completion\"                  },\n        { \"created\", QDateTime::currentSecsSinceEpoch() },\n        { \"model\",   modelInfo.name()                   },\n    };\n\n    QJsonArray choices;\n    for (qsizetype i = 0; auto &resp : std::as_const(responses)) {\n        choices << QJsonObject {\n            { \"text\",          resp                                                     },\n            { \"index\",         i++                                                      },\n            { \"logprobs\",      QJsonValue::Null                                         },\n            { \"finish_reason\", responseTokens == request.max_tokens ? \"length\" : \"stop\" },\n        };\n    }\n\n    responseObject.insert(\"choices\", choices);\n    responseObject.insert(\"usage\", QJsonObject {\n        { \"prompt_tokens\",     promptTokens                  },\n        { \"completion_tokens\", responseTokens                },\n        { \"total_tokens\",      promptTokens + responseTokens },\n    });\n\n    return {QHttpServerResponse(responseObject), responseObject};\n}\n\nauto Server::handleChatRequest(const ChatRequest &request)\n    -> std::pair<QHttpServerResponse, std::optional<QJsonObject>>\n{\n    auto *mySettings = MySettings::globalInstance();\n\n    ModelInfo modelInfo = ModelList::globalInstance()->defaultModelInfo();\n    const QList<ModelInfo> modelList = ModelList::globalInstance()->selectableModelList();\n    for (const ModelInfo &info : modelList) {\n        Q_ASSERT(info.installed);\n        if (!info.installed)\n            continue;\n        if (request.model == info.name() || request.model == info.filename()) {\n            modelInfo = info;\n            break;\n        }\n    }\n\n    // load the new model if necessary\n    setShouldBeLoaded(true);\n\n    if (modelInfo.filename().isEmpty()) {\n        std::cerr << \"ERROR: couldn't load default model \" << request.model.toStdString() << std::endl;\n        return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n    }\n\n    emit requestResetResponseState(); // blocks\n\n    // NB: this resets the context, regardless of whether this model is already loaded\n    if (!loadModel(modelInfo)) {\n        std::cerr << \"ERROR: couldn't load model \" << modelInfo.name().toStdString() << std::endl;\n        return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n    }\n\n    m_chatModel->updateCurrentResponse(m_chatModel->count() - 1, false);\n\n    Q_ASSERT(!request.messages.isEmpty());\n\n    // adds prompt/response items to GUI\n    std::vector<MessageInput> messages;\n    for (auto &message : request.messages) {\n        using enum ChatRequest::Message::Role;\n        switch (message.role) {\n            case System:    messages.push_back({ MessageInput::Type::System,   message.content }); break;\n            case User:      messages.push_back({ MessageInput::Type::Prompt,   message.content }); break;\n            case Assistant: messages.push_back({ MessageInput::Type::Response, message.content }); break;\n        }\n    }\n    auto startOffset = m_chatModel->appendResponseWithHistory(messages);\n\n    // FIXME(jared): taking parameters from the UI inhibits reproducibility of results\n    LLModel::PromptContext promptCtx {\n        .n_predict      = request.max_tokens,\n        .top_k          = mySettings->modelTopK(modelInfo),\n        .top_p          = request.top_p,\n        .min_p          = request.min_p,\n        .temp           = request.temperature,\n        .n_batch        = mySettings->modelPromptBatchSize(modelInfo),\n        .repeat_penalty = float(mySettings->modelRepeatPenalty(modelInfo)),\n        .repeat_last_n  = mySettings->modelRepeatPenaltyTokens(modelInfo),\n    };\n\n    int promptTokens   = 0;\n    int responseTokens = 0;\n    QList<QPair<QString, QList<ResultInfo>>> responses;\n    for (int i = 0; i < request.n; ++i) {\n        ChatPromptResult result;\n        try {\n            result = promptInternalChat(m_collections, promptCtx, startOffset);\n        } catch (const std::exception &e) {\n            m_chatModel->setResponseValue(e.what());\n            m_chatModel->setError();\n            emit responseStopped(0);\n            return makeError(QHttpServerResponder::StatusCode::InternalServerError);\n        }\n        responses.emplace_back(result.response, result.databaseResults);\n        if (i == 0)\n            promptTokens = result.promptTokens;\n        responseTokens += result.responseTokens;\n    }\n\n    QJsonObject responseObject {\n        { \"id\",      \"placeholder\"                      },\n        { \"object\",  \"chat.completion\"                  },\n        { \"created\", QDateTime::currentSecsSinceEpoch() },\n        { \"model\",   modelInfo.name()                   },\n    };\n\n    QJsonArray choices;\n    {\n        int index = 0;\n        for (const auto &r : responses) {\n            QString result = r.first;\n            QList<ResultInfo> infos = r.second;\n            QJsonObject message {\n                { \"role\",    \"assistant\" },\n                { \"content\", result      },\n            };\n            QJsonObject choice {\n                { \"index\",         index++                                                  },\n                { \"message\",       message                                                  },\n                { \"finish_reason\", responseTokens == request.max_tokens ? \"length\" : \"stop\" },\n                { \"logprobs\",      QJsonValue::Null                                         },\n            };\n            if (MySettings::globalInstance()->localDocsShowReferences()) {\n                QJsonArray references;\n                for (const auto &ref : infos)\n                    references.append(resultToJson(ref));\n                choice.insert(\"references\", references.isEmpty() ? QJsonValue::Null : QJsonValue(references));\n            }\n            choices.append(choice);\n        }\n    }\n\n    responseObject.insert(\"choices\", choices);\n    responseObject.insert(\"usage\", QJsonObject {\n        { \"prompt_tokens\",     promptTokens                  },\n        { \"completion_tokens\", responseTokens                },\n        { \"total_tokens\",      promptTokens + responseTokens },\n    });\n\n    return {QHttpServerResponse(responseObject), responseObject};\n}\n"
  },
  {
    "path": "gpt4all-chat/src/server.h",
    "content": "#ifndef SERVER_H\n#define SERVER_H\n\n#include \"chatllm.h\"\n#include \"database.h\"\n\n#include <QHttpServer>\n#include <QHttpServerResponse>\n#include <QJsonObject>\n#include <QList>\n#include <QObject> // IWYU pragma: keep\n#include <QString>\n\n#include <memory>\n#include <optional>\n#include <utility>\n\nclass Chat;\nclass ChatRequest;\nclass CompletionRequest;\n\n\nclass Server : public ChatLLM\n{\n    Q_OBJECT\n\npublic:\n    explicit Server(Chat *chat);\n    ~Server() override = default;\n\npublic Q_SLOTS:\n    void start();\n\nQ_SIGNALS:\n    void requestResetResponseState();\n\nprivate:\n    auto handleCompletionRequest(const CompletionRequest &request) -> std::pair<QHttpServerResponse, std::optional<QJsonObject>>;\n    auto handleChatRequest(const ChatRequest &request) -> std::pair<QHttpServerResponse, std::optional<QJsonObject>>;\n\nprivate Q_SLOTS:\n    void handleDatabaseResultsChanged(const QList<ResultInfo> &results) { m_databaseResults = results; }\n    void handleCollectionListChanged(const QList<QString> &collectionList) { m_collections = collectionList; }\n\nprivate:\n    Chat *m_chat;\n    std::unique_ptr<QHttpServer> m_server;\n    QList<ResultInfo> m_databaseResults;\n    QList<QString> m_collections;\n};\n\n#endif // SERVER_H\n"
  },
  {
    "path": "gpt4all-chat/src/tool.cpp",
    "content": "#include \"tool.h\"\n\n#include <QDataStream>\n#include <QtTypes>\n\n#include <string>\n\nusing json = nlohmann::ordered_json;\n\n\njson::object_t Tool::jinjaValue() const\n{\n    json::array_t paramList;\n    const QList<ToolParamInfo> p = parameters();\n    for (auto &info : p) {\n        std::string typeStr;\n        switch (info.type) {\n        using enum ToolEnums::ParamType;\n        case String:   typeStr = \"string\"; break;\n        case Number:   typeStr = \"number\"; break;\n        case Integer:  typeStr = \"integer\"; break;\n        case Object:   typeStr = \"object\"; break;\n        case Array:    typeStr = \"array\"; break;\n        case Boolean:  typeStr = \"boolean\"; break;\n        case Null:     typeStr = \"null\"; break;\n        }\n        paramList.emplace_back(json::initializer_list_t {\n            { \"name\",        info.name.toStdString()        },\n            { \"type\",        typeStr                        },\n            { \"description\", info.description.toStdString() },\n            { \"required\",    info.required                  },\n        });\n    }\n\n    return {\n        { \"name\",           name().toStdString()           },\n        { \"description\",    description().toStdString()    },\n        { \"function\",       function().toStdString()       },\n        { \"parameters\",     paramList                      },\n        { \"symbolicFormat\", symbolicFormat().toStdString() },\n        { \"examplePrompt\",  examplePrompt().toStdString()  },\n        { \"exampleCall\",    exampleCall().toStdString()    },\n        { \"exampleReply\",   exampleReply().toStdString()   },\n    };\n}\n\nvoid ToolCallInfo::serialize(QDataStream &stream, int version)\n{\n    stream << name;\n    stream << params.size();\n    for (auto param : params) {\n        stream << param.name;\n        stream << param.type;\n        stream << param.value;\n    }\n    stream << result;\n    stream << error;\n    stream << errorString;\n}\n\nbool ToolCallInfo::deserialize(QDataStream &stream, int version)\n{\n    stream >> name;\n    qsizetype count;\n    stream >> count;\n    for (int i = 0; i < count; ++i) {\n        ToolParam p;\n        stream >> p.name;\n        stream >> p.type;\n        stream >> p.value;\n    }\n    stream >> result;\n    stream >> error;\n    stream >> errorString;\n    return true;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/tool.h",
    "content": "#ifndef TOOL_H\n#define TOOL_H\n\n#include <nlohmann/json.hpp>\n\n#include <QList>\n#include <QObject>\n#include <QString>\n#include <QVariant>\n#include <QtGlobal>\n\nclass QDataStream;\n\nusing json = nlohmann::ordered_json;\n\n\nnamespace ToolEnums\n{\n    Q_NAMESPACE\n    enum class Error\n    {\n        NoError = 0,\n        TimeoutError = 2,\n        UnknownError = 499,\n    };\n    Q_ENUM_NS(Error)\n\n    enum class ParamType { String, Number, Integer, Object, Array, Boolean, Null }; // json schema types\n    Q_ENUM_NS(ParamType)\n\n    enum class ParseState {\n        None,\n        InTagChoice,\n        InStart,\n        Partial,\n        Complete,\n    };\n    Q_ENUM_NS(ParseState)\n}\n\nstruct ToolParamInfo\n{\n    QString name;\n    ToolEnums::ParamType type;\n    QString description;\n    bool required;\n};\nQ_DECLARE_METATYPE(ToolParamInfo)\n\nstruct ToolParam\n{\n    QString name;\n    ToolEnums::ParamType type;\n    QVariant value;\n    bool operator==(const ToolParam& other) const\n    {\n        return name == other.name && type == other.type && value == other.value;\n    }\n};\nQ_DECLARE_METATYPE(ToolParam)\n\nstruct ToolCallInfo\n{\n    QString name;\n    QList<ToolParam> params;\n    QString result;\n    ToolEnums::Error error = ToolEnums::Error::NoError;\n    QString errorString;\n\n    void serialize(QDataStream &stream, int version);\n    bool deserialize(QDataStream &stream, int version);\n\n    bool operator==(const ToolCallInfo& other) const\n    {\n        return name == other.name && result == other.result && params == other.params\n            && error == other.error && errorString == other.errorString;\n    }\n};\nQ_DECLARE_METATYPE(ToolCallInfo)\n\nclass Tool : public QObject\n{\n    Q_OBJECT\n    Q_PROPERTY(QString name READ name CONSTANT)\n    Q_PROPERTY(QString description READ description CONSTANT)\n    Q_PROPERTY(QString function READ function CONSTANT)\n    Q_PROPERTY(QList<ToolParamInfo> parameters READ parameters CONSTANT)\n    Q_PROPERTY(QString examplePrompt READ examplePrompt CONSTANT)\n    Q_PROPERTY(QString exampleCall READ exampleCall CONSTANT)\n    Q_PROPERTY(QString exampleReply READ exampleReply CONSTANT)\n\npublic:\n    Tool() : QObject(nullptr) {}\n    virtual ~Tool() {}\n\n    virtual void run(const QList<ToolParam> &params) = 0;\n    virtual bool interrupt() = 0;\n\n    // Tools should set these if they encounter errors. For instance, a tool depending upon the network\n    // might set these error variables if the network is not available.\n    virtual ToolEnums::Error error() const { return ToolEnums::Error::NoError; }\n    virtual QString errorString() const { return QString(); }\n\n    // [Required] Human readable name of the tool.\n    virtual QString name() const = 0;\n\n    // [Required] Human readable description of what the tool does. Use this tool to: {{description}}\n    virtual QString description() const = 0;\n\n    // [Required] Must be unique. Name of the function to invoke. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n    virtual QString function() const = 0;\n\n    // [Optional] List describing the tool's parameters. An empty list specifies no parameters.\n    virtual QList<ToolParamInfo> parameters() const { return {}; }\n\n    // [Optional] The symbolic format of the toolcall.\n    virtual QString symbolicFormat() const { return QString(); }\n\n    // [Optional] A human generated example of a prompt that could result in this tool being called.\n    virtual QString examplePrompt() const { return QString(); }\n\n    // [Optional] An example of this tool call that pairs with the example query. It should be the\n    // complete string that the model must generate.\n    virtual QString exampleCall() const { return QString(); }\n\n    // [Optional] An example of the reply the model might generate given the result of the tool call.\n    virtual QString exampleReply() const { return QString(); }\n\n    bool operator==(const Tool &other) const { return function() == other.function(); }\n\n    json::object_t jinjaValue() const;\n\nQ_SIGNALS:\n    void runComplete(const ToolCallInfo &info);\n};\n\n#endif // TOOL_H\n"
  },
  {
    "path": "gpt4all-chat/src/toolcallparser.cpp",
    "content": "#include \"toolcallparser.h\"\n\n#include \"tool.h\"\n\n#include <QChar>\n#include <QSet>\n#include <QtAssert>\n#include <QtTypes>\n\n#include <stdexcept>\n\n\nToolCallParser::ToolCallParser()\n    : ToolCallParser(ToolCallConstants::AllTagNames)\n    {}\n\nToolCallParser::ToolCallParser(const QStringList &tagNames)\n{\n    QSet<QChar> firstChars;\n    for (auto &name : tagNames) {\n        if (name.isEmpty())\n            throw std::invalid_argument(\"ToolCallParser(): tag names must not be empty\");\n        if (firstChars.contains(name.at(0)))\n            throw std::invalid_argument(\"ToolCallParser(): tag names must not share any prefix\");\n        firstChars << name.at(0);\n        m_possibleStartTags << makeStartTag(name).toUtf8();\n        m_possibleEndTags   << makeEndTag  (name).toUtf8();\n    }\n    reset();\n}\n\nvoid ToolCallParser::reset()\n{\n    // Resets the search state, but not the buffer or global state\n    resetSearchState();\n\n    // These are global states maintained between update calls\n    m_buffers.clear();\n    m_buffers << QByteArray();\n}\n\nvoid ToolCallParser::resetSearchState()\n{\n    m_expected = {'<'};\n    m_expectedIndex = 0;\n    m_state = ToolEnums::ParseState::None;\n\n    m_toolCall.clear();\n    m_startTagBuffer.clear();\n    m_endTagBuffer.clear();\n\n    m_currentTagIndex = -1;\n    m_startIndex = -1;\n    m_endIndex = -1;\n}\n\nbool ToolCallParser::isExpected(char c) const\n{\n    return m_expected.isEmpty() || m_expected.contains(c);\n}\n\nvoid ToolCallParser::setExpected(const QList<QByteArray> &tags)\n{\n    m_expected.clear();\n    for (const auto &tag : tags) {\n        Q_ASSERT(tag.size() > m_expectedIndex);\n        m_expected << tag.at(m_expectedIndex);\n    }\n}\n\nQByteArray ToolCallParser::startTag() const\n{\n    if (m_currentTagIndex < 0)\n        return {};\n    return m_possibleStartTags.at(m_currentTagIndex);\n}\n\nQByteArray ToolCallParser::endTag() const\n{\n    if (m_currentTagIndex < 0)\n        return {};\n    return m_possibleEndTags.at(m_currentTagIndex);\n}\n\nQByteArray &ToolCallParser::currentBuffer()\n{\n    return m_buffers.last();\n}\n\n// This method is called with an arbitrary string and a current state. This method should take the\n// current state into account and then parse through the update character by character to arrive at\n// the new state.\nvoid ToolCallParser::update(const QByteArray &update)\n{\n    currentBuffer().append(update);\n\n    for (qsizetype i = currentBuffer().size() - update.size(); i < currentBuffer().size(); ++i) {\n        const char c = currentBuffer()[i];\n        const bool foundMatch = isExpected(c);\n        if (!foundMatch) {\n            resetSearchState();\n            continue;\n        }\n\n        switch (m_state) {\n        case ToolEnums::ParseState::None:\n            {\n                m_expectedIndex = 1;\n                setExpected(m_possibleStartTags);\n                m_state = ToolEnums::ParseState::InTagChoice;\n                m_startIndex = i;\n                break;\n            }\n        case ToolEnums::ParseState::InTagChoice:\n            {\n                for (int i = 0; i < m_possibleStartTags.size(); ++i) {\n                    const auto &tag = m_possibleStartTags.at(i);\n                    if (c == tag.at(1)) m_currentTagIndex = i;\n                }\n                if (m_currentTagIndex >= 0) {\n                    m_expectedIndex = 2;\n                    setExpected({m_possibleStartTags.at(m_currentTagIndex)});\n                    m_state = ToolEnums::ParseState::InStart;\n                } else\n                    resetSearchState();\n                break;\n            }\n        case ToolEnums::ParseState::InStart:\n            {\n                m_startTagBuffer.append(c);\n\n                const auto startTag = this->startTag();\n                Q_ASSERT(!startTag.isEmpty());\n                if (m_expectedIndex == startTag.size() - 1) {\n                    m_expectedIndex = 0;\n                    setExpected({});\n                    m_state = ToolEnums::ParseState::Partial;\n                } else {\n                    ++m_expectedIndex;\n                    Q_ASSERT(m_currentTagIndex >= 0);\n                    setExpected({startTag});\n                }\n                break;\n            }\n        case ToolEnums::ParseState::Partial:\n            {\n                Q_ASSERT(m_currentTagIndex >= 0);\n                const auto endTag = this->endTag();\n                Q_ASSERT(!endTag.isEmpty());\n                m_toolCall.append(c);\n                m_endTagBuffer.append(c);\n                if (m_endTagBuffer.size() > endTag.size())\n                    m_endTagBuffer.remove(0, 1);\n                if (m_endTagBuffer == endTag) {\n                    m_endIndex = i + 1;\n                    m_toolCall.chop(endTag.size());\n                    m_state = ToolEnums::ParseState::Complete;\n                    m_endTagBuffer.clear();\n                }\n                break;\n            }\n        case ToolEnums::ParseState::Complete:\n            {\n                // Already complete, do nothing further\n                break;\n            }\n        }\n    }\n}\n\nbool ToolCallParser::splitIfPossible()\n{\n    // The first split happens when we're in a partial state\n    if (m_buffers.size() < 2 && m_state == ToolEnums::ParseState::Partial) {\n        Q_ASSERT(m_startIndex >= 0);\n        const auto beforeToolCall = currentBuffer().left(m_startIndex);\n        const auto toolCall       = currentBuffer().mid (m_startIndex);\n        m_buffers = { beforeToolCall, toolCall };\n        return true;\n    }\n\n    // The second split happens when we're in the complete state\n    if (m_buffers.size() < 3 && m_state == ToolEnums::ParseState::Complete) {\n        Q_ASSERT(m_endIndex >= 0);\n        const auto &beforeToolCall = m_buffers.first();\n        const auto toolCall        = currentBuffer().left(m_endIndex);\n        const auto afterToolCall   = currentBuffer().mid (m_endIndex);\n        m_buffers = { beforeToolCall, toolCall, afterToolCall };\n        return true;\n    }\n\n    return false;\n}\n\nQStringList ToolCallParser::buffers() const\n{\n    QStringList result;\n    result.reserve(m_buffers.size());\n    for (const auto &buffer : m_buffers)\n        result << QString::fromUtf8(buffer);\n    return result;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/toolcallparser.h",
    "content": "#ifndef TOOLCALLPARSER_H\n#define TOOLCALLPARSER_H\n\n#include <QByteArray>\n#include <QList>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n\nnamespace ToolEnums { enum class ParseState; }\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nclass ToolCallParser\n{\npublic:\n    ToolCallParser();\n    ToolCallParser(const QStringList &tagNames);\n\n    void reset();\n    void update(const QByteArray &update);\n    QString toolCall() const { return QString::fromUtf8(m_toolCall); }\n    int startIndex() const { return m_startIndex; }\n    ToolEnums::ParseState state() const { return m_state; }\n    QByteArray startTag() const;\n    QByteArray endTag() const;\n\n    bool splitIfPossible();\n    QStringList buffers() const;\n    int numberOfBuffers() const { return m_buffers.size(); }\n\n    static QString makeStartTag(const QString &name) { return u\"<%1>\"_s .arg(name); }\n    static QString makeEndTag  (const QString &name) { return u\"</%1>\"_s.arg(name); }\n\nprivate:\n    QByteArray &currentBuffer();\n    void resetSearchState();\n    bool isExpected(char c) const;\n    void setExpected(const QList<QByteArray> &tags);\n\n    QList<QByteArray> m_possibleStartTags;\n    QList<QByteArray> m_possibleEndTags;\n    QByteArray m_startTagBuffer;\n    QByteArray m_endTagBuffer;\n    int m_currentTagIndex;\n\n    QList<char> m_expected;\n    int m_expectedIndex;\n    ToolEnums::ParseState m_state;\n    QList<QByteArray> m_buffers;\n    QByteArray m_toolCall;\n    int m_startIndex;\n    int m_endIndex;\n};\n\nnamespace ToolCallConstants\n{\n    // NB: the parsing code assumes the first char of the various tags differ\n\n    inline const QString CodeInterpreterFunction = u\"javascript_interpret\"_s;\n    inline const QString CodeInterpreterStartTag = ToolCallParser::makeStartTag(CodeInterpreterFunction);\n    inline const QString CodeInterpreterEndTag   = ToolCallParser::makeEndTag  (CodeInterpreterFunction);\n    inline const QString CodeInterpreterPrefix   = u\"%1\\n```javascript\\n\"_s.arg(CodeInterpreterStartTag);\n    inline const QString CodeInterpreterSuffix   = u\"```\\n%1\"_s            .arg(CodeInterpreterEndTag  );\n\n    inline const QString ThinkTagName  = u\"think\"_s;\n    inline const QString ThinkStartTag = ToolCallParser::makeStartTag(ThinkTagName);\n    inline const QString ThinkEndTag   = ToolCallParser::makeEndTag  (ThinkTagName);\n\n    inline const QStringList AllTagNames { CodeInterpreterFunction, ThinkTagName };\n}\n\n#endif // TOOLCALLPARSER_H\n"
  },
  {
    "path": "gpt4all-chat/src/toolmodel.cpp",
    "content": "#include \"toolmodel.h\"\n\n#include \"codeinterpreter.h\"\n\n#include <QCoreApplication>\n#include <QEvent>\n#include <QGlobalStatic>\n\n\nclass MyToolModel: public ToolModel { };\nQ_GLOBAL_STATIC(MyToolModel, toolModelInstance)\nToolModel *ToolModel::globalInstance()\n{\n    return toolModelInstance();\n}\n\nToolModel::ToolModel()\n    : QAbstractListModel(nullptr)\n{\n    QCoreApplication::instance()->installEventFilter(this);\n\n    Tool* codeInterpreter = new CodeInterpreter;\n    m_tools.append(codeInterpreter);\n    m_toolMap.insert(codeInterpreter->function(), codeInterpreter);\n}\n\nbool ToolModel::eventFilter(QObject *obj, QEvent *ev)\n{\n    if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange)\n        emit dataChanged(index(0, 0), index(m_tools.size() - 1, 0));\n    return false;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/toolmodel.h",
    "content": "#ifndef TOOLMODEL_H\n#define TOOLMODEL_H\n\n#include \"tool.h\"\n\n#include <QAbstractListModel>\n#include <QByteArray>\n#include <QHash>\n#include <QList>\n#include <QString>\n#include <QVariant>\n#include <QtPreprocessorSupport>\n\n\nclass ToolModel : public QAbstractListModel\n{\n    Q_OBJECT\n    Q_PROPERTY(int count READ count NOTIFY countChanged)\n\npublic:\n    static ToolModel *globalInstance();\n\n    enum Roles {\n        NameRole = Qt::UserRole + 1,\n        DescriptionRole,\n        FunctionRole,\n        ParametersRole,\n        SymbolicFormatRole,\n        ExamplePromptRole,\n        ExampleCallRole,\n        ExampleReplyRole,\n    };\n\n    int rowCount(const QModelIndex &parent = QModelIndex()) const override\n    {\n        Q_UNUSED(parent)\n        return m_tools.size();\n    }\n\n    QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override\n    {\n        if (!index.isValid() || index.row() < 0 || index.row() >= m_tools.size())\n            return QVariant();\n\n        const Tool *item = m_tools.at(index.row());\n        switch (role) {\n            case NameRole:\n                return item->name();\n            case DescriptionRole:\n                return item->description();\n            case FunctionRole:\n                return item->function();\n            case ParametersRole:\n                return QVariant::fromValue(item->parameters());\n            case SymbolicFormatRole:\n                return item->symbolicFormat();\n            case ExamplePromptRole:\n                return item->examplePrompt();\n            case ExampleCallRole:\n                return item->exampleCall();\n            case ExampleReplyRole:\n                return item->exampleReply();\n        }\n\n        return QVariant();\n    }\n\n    QHash<int, QByteArray> roleNames() const override\n    {\n        QHash<int, QByteArray> roles;\n        roles[NameRole] = \"name\";\n        roles[DescriptionRole] = \"description\";\n        roles[FunctionRole] = \"function\";\n        roles[ParametersRole] = \"parameters\";\n        roles[SymbolicFormatRole] = \"symbolicFormat\";\n        roles[ExamplePromptRole] = \"examplePrompt\";\n        roles[ExampleCallRole] = \"exampleCall\";\n        roles[ExampleReplyRole] = \"exampleReply\";\n        return roles;\n    }\n\n    Q_INVOKABLE Tool* get(int index) const\n    {\n        if (index < 0 || index >= m_tools.size()) return nullptr;\n        return m_tools.at(index);\n    }\n\n    Q_INVOKABLE Tool *get(const QString &id) const\n    {\n        if (!m_toolMap.contains(id)) return nullptr;\n        return m_toolMap.value(id);\n    }\n\n    int count() const { return m_tools.size(); }\n\nQ_SIGNALS:\n    void countChanged();\n    void valueChanged(int index, const QString &value);\n\nprotected:\n    bool eventFilter(QObject *obj, QEvent *ev) override;\n\nprivate:\n    explicit ToolModel();\n    ~ToolModel() {}\n    friend class MyToolModel;\n    QList<Tool*> m_tools;\n    QHash<QString, Tool*> m_toolMap;\n};\n\n#endif // TOOLMODEL_H\n"
  },
  {
    "path": "gpt4all-chat/src/utils.h",
    "content": "#pragma once\n\n#include <fmt/base.h>\n#include <fmt/format.h>\n\n#include <QByteArray>\n#include <QJsonValue>\n#include <QLatin1StringView> // IWYU pragma: keep\n#include <QString>\n#include <QStringView>\n#include <QUtf8StringView>\n#include <QVariant>\n\n#include <initializer_list>\n#include <string_view>\n#include <utility> // IWYU pragma: keep\n\n// IWYU pragma: no_forward_declare QJsonValue\nclass QJsonObject;\n\n\n// fmtlib formatters for QString and QVariant\n\n#define MAKE_FORMATTER(type, conversion)                                        \\\n    template <>                                                                 \\\n    struct fmt::formatter<type, char>: fmt::formatter<std::string_view, char> { \\\n        template <typename FmtContext>                                          \\\n        FmtContext::iterator format(const type &value, FmtContext &ctx) const   \\\n        {                                                                       \\\n            auto valueUtf8 = (conversion);                                      \\\n            std::string_view view(valueUtf8.cbegin(), valueUtf8.cend());        \\\n            return formatter<std::string_view, char>::format(view, ctx);        \\\n        }                                                                       \\\n    }\n\nMAKE_FORMATTER(QUtf8StringView, value                    );\nMAKE_FORMATTER(QStringView,     value.toUtf8()           );\nMAKE_FORMATTER(QString,         value.toUtf8()           );\nMAKE_FORMATTER(QVariant,        value.toString().toUtf8());\n\n// alternative to QJsonObject's initializer_list constructor that accepts Latin-1 strings\nQJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args);\n\n#include \"utils.inl\" // IWYU pragma: export\n"
  },
  {
    "path": "gpt4all-chat/src/utils.inl",
    "content": "#include <QJsonObject>\n\n\ninline QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args)\n{\n    QJsonObject obj;\n    for (auto &arg : args)\n        obj.insert(arg.first, arg.second);\n    return obj;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/xlsxtomd.cpp",
    "content": "#include \"xlsxtomd.h\"\n\n#include <xlsxabstractsheet.h>\n#include <xlsxcell.h>\n#include <xlsxcellrange.h>\n#include <xlsxdocument.h>\n#include <xlsxformat.h>\n#include <xlsxworksheet.h>\n\n#include <QChar>\n#include <QDateTime>\n#include <QDebug>\n#include <QLatin1StringView>\n#include <QList>\n#include <QRegularExpression>\n#include <QString>\n#include <QStringList> // IWYU pragma: keep\n#include <QStringView>\n#include <QVariant>\n#include <QtLogging>\n\n#include <memory>\n\nusing namespace Qt::Literals::StringLiterals;\n\n\nstatic QString formatCellText(const QXlsx::Cell *cell)\n{\n    if (!cell) return QString();\n\n    QVariant value = cell->value();\n    QXlsx::Format format = cell->format();\n    QString cellText;\n\n    // Determine the cell type based on format\n    if (cell->isDateTime()) {\n        // Handle DateTime\n        QDateTime dateTime = cell->dateTime().toDateTime();\n        cellText = dateTime.isValid() ? dateTime.toString(QStringView(u\"yyyy-MM-dd\")) : value.toString();\n    } else {\n        cellText = value.toString();\n    }\n\n    if (cellText.isEmpty())\n        return QString();\n\n    // Escape special characters\n    static QRegularExpression special(\n        QStringLiteral(\n            R\"(()([\\\\`*_[\\]<>()!|])|)\"    // special characters\n            R\"(^(\\s*)(#+(?:\\s|$))|)\"      // headings\n            R\"(^(\\s*[0-9])(\\.(?:\\s|$))|)\" // ordered lists (\"1. a\")\n            R\"(^(\\s*)([+-](?:\\s|$)))\"     // unordered lists (\"- a\")\n        ),\n        QRegularExpression::MultilineOption\n    );\n    cellText.replace(special, uR\"(\\1\\\\2)\"_s);\n    cellText.replace(u'&', \"&amp;\"_L1);\n    cellText.replace(u'<', \"&lt;\"_L1);\n    cellText.replace(u'>', \"&gt;\"_L1);\n\n    // Apply Markdown formatting based on font styles\n    if (format.fontUnderline())\n        cellText = u\"_%1_\"_s.arg(cellText);\n    if (format.fontBold())\n        cellText = u\"**%1**\"_s.arg(cellText);\n    if (format.fontItalic())\n        cellText = u\"*%1*\"_s.arg(cellText);\n    if (format.fontStrikeOut())\n        cellText = u\"~~%1~~\"_s.arg(cellText);\n\n    return cellText;\n}\n\nstatic QString getCellValue(QXlsx::Worksheet *sheet, int row, int col)\n{\n    if (!sheet)\n        return QString();\n\n    // Attempt to retrieve the cell directly\n    std::shared_ptr<QXlsx::Cell> cell = sheet->cellAt(row, col);\n\n    // If the cell is part of a merged range and not directly available\n    if (!cell) {\n        for (const QXlsx::CellRange &range : sheet->mergedCells()) {\n            if (row >= range.firstRow() && row <= range.lastRow() &&\n                col >= range.firstColumn() && col <= range.lastColumn()) {\n                cell = sheet->cellAt(range.firstRow(), range.firstColumn());\n                break;\n            }\n        }\n    }\n\n    // Format and return the cell text if available\n    if (cell)\n        return formatCellText(cell.get());\n\n    // Return empty string if cell is not found\n    return QString();\n}\n\nQString XLSXToMD::toMarkdown(QIODevice *xlsxDevice)\n{\n    // Load the Excel document\n    QXlsx::Document xlsx(xlsxDevice);\n    if (!xlsx.load()) {\n        qCritical() << \"Failed to load the Excel from device\";\n        return QString();\n    }\n\n    QString markdown;\n\n    // Retrieve all sheet names\n    QStringList sheetNames = xlsx.sheetNames();\n    if (sheetNames.isEmpty()) {\n        qWarning() << \"No sheets found in the Excel document.\";\n        return QString();\n    }\n\n    // Iterate through each worksheet by name\n    for (const QString &sheetName : sheetNames) {\n        QXlsx::Worksheet *sheet = dynamic_cast<QXlsx::Worksheet *>(xlsx.sheet(sheetName));\n        if (!sheet) {\n            qWarning() << \"Failed to load sheet:\" << sheetName;\n            continue;\n        }\n\n        markdown += u\"### %1\\n\\n\"_s.arg(sheetName);\n\n        // Determine the used range\n        QXlsx::CellRange range = sheet->dimension();\n        int firstRow = range.firstRow();\n        int lastRow = range.lastRow();\n        int firstCol = range.firstColumn();\n        int lastCol = range.lastColumn();\n\n        if (firstRow > lastRow || firstCol > lastCol) {\n            qWarning() << \"Sheet\" << sheetName << \"is empty.\";\n            markdown += QStringView(u\"*No data available.*\\n\\n\");\n            continue;\n        }\n\n        auto appendRow = [&markdown](auto &list) { markdown += u\"|%1|\\n\"_s.arg(list.join(u'|')); };\n\n        // Empty header\n        static QString header(u' ');\n        static QString separator(u'-');\n        QStringList headers;\n        QStringList separators;\n        for (int col = firstCol; col <= lastCol; ++col) {\n            headers << header;\n            separators << separator;\n        }\n        appendRow(headers);\n        appendRow(separators);\n\n        // Iterate through data rows\n        for (int row = firstRow; row <= lastRow; ++row) {\n            QStringList rowData;\n            for (int col = firstCol; col <= lastCol; ++col) {\n                QString cellText = getCellValue(sheet, row, col);\n                rowData << (cellText.isEmpty() ? u\" \"_s : cellText);\n            }\n            appendRow(rowData);\n        }\n\n        markdown += u'\\n'; // Add an empty line between sheets\n    }\n    return markdown;\n}\n"
  },
  {
    "path": "gpt4all-chat/src/xlsxtomd.h",
    "content": "#ifndef XLSXTOMD_H\n#define XLSXTOMD_H\n\nclass QIODevice;\nclass QString;\n\n\nclass XLSXToMD\n{\npublic:\n    static QString toMarkdown(QIODevice *xlsxDevice);\n};\n\n#endif // XLSXTOMD_H\n"
  },
  {
    "path": "gpt4all-chat/system_requirements.md",
    "content": "Below are the recommended and minimum system requirements for GPT4All.\n\n### **Recommended System Requirements**\n| **Component** | **PC (Windows/Linux)**                                | **Apple**                  |\n|---------------|-------------------------------------------------------|----------------------------|\n| **CPU**       | Ryzen 5 3600 or Intel Core i7-10700, or better        | M2 Pro                     |\n| **RAM**       | 16GB                                                  | 16GB                       |\n| **GPU**       | NVIDIA GTX 1080 Ti/RTX 2080 or better, with 8GB+ VRAM | M2 Pro (integrated GPU)    |\n| **OS**        | At least Windows 10 or Ubuntu 24.04 LTS               | macOS Sonoma 14.5 or newer |\n\n### **Minimum System Requirements**\n| **Component** | **PC (Windows/Linux)**                                          | **Apple**           |\n|---------------|-----------------------------------------------------------------|---------------------|\n| **CPU**       | Intel Core: i3-2100, Pentium: 7505, Celeron: 6305; AMD: FX-4100 | M1                  |\n| **RAM**       | 16GB (8GB for 3B LLMs)                                          | 16GB                |\n| **GPU**       | Anything Direct3D 11/12 or OpenGL 2.1 capable                   | M1 (integrated GPU) |\n| **OS**        | Windows 10, Ubuntu 22.04 LTS, or other compatible Linux         | macOS Monterey 12.6 |\n\nNote that Windows and Linux PCs with ARM CPUs are not currently supported.\n"
  },
  {
    "path": "gpt4all-chat/test-requirements.txt",
    "content": "pytest~=8.3\nrequests~=2.32\n"
  },
  {
    "path": "gpt4all-chat/tests/CMakeLists.txt",
    "content": "include(FetchContent)\n\nfind_package(Python3 3.12 REQUIRED COMPONENTS Interpreter)\n\n# Google test download and setup\nFetchContent_Declare(\n    googletest\n    URL https://github.com/google/googletest/archive/refs/tags/v1.15.2.zip\n)\nFetchContent_MakeAvailable(googletest)\n\nconfigure_file(python/config.py.in \"${CMAKE_CURRENT_SOURCE_DIR}/python/config.py\")\n\nadd_test(NAME ChatPythonTests\n    COMMAND ${Python3_EXECUTABLE} -m pytest --color=yes \"${CMAKE_CURRENT_SOURCE_DIR}/python\"\n)\nset_tests_properties(ChatPythonTests PROPERTIES\n    ENVIRONMENT \"CHAT_EXECUTABLE=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/chat;TEST_MODEL_PATH=${TEST_MODEL_PATH}\"\n    TIMEOUT 60\n)\n\nadd_executable(gpt4all_tests\n    cpp/test_main.cpp\n    cpp/basic_test.cpp\n)\n\ntarget_link_libraries(gpt4all_tests PRIVATE gtest gtest_main)\n\ninclude(GoogleTest)\ngtest_discover_tests(gpt4all_tests)\n"
  },
  {
    "path": "gpt4all-chat/tests/cpp/basic_test.cpp",
    "content": "#include <gtest/gtest.h>\n\nTEST(BasicTest, TestInitialization) {\n    EXPECT_TRUE(true);\n}\n"
  },
  {
    "path": "gpt4all-chat/tests/cpp/test_main.cpp",
    "content": "#include <gtest/gtest.h>\n\nint main(int argc, char **argv) {\n    ::testing::InitGoogleTest(&argc, argv);\n    return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "gpt4all-chat/tests/python/__init__.py",
    "content": ""
  },
  {
    "path": "gpt4all-chat/tests/python/config.py.in",
    "content": "APP_VERSION = '@APP_VERSION@'\n"
  },
  {
    "path": "gpt4all-chat/tests/python/test_server_api.py",
    "content": "import os\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\nfrom typing import Any, Iterator\n\nimport pytest\nimport requests\nfrom urllib3 import Retry\n\nfrom . import config\n\n\nclass Requestor:\n    def __init__(self) -> None:\n        self.session = requests.Session()\n        self.http_adapter = self.session.adapters['http://']\n\n    def get(self, path: str, *, raise_for_status: bool = True, wait: bool = False) -> Any:\n        return self._request('GET', path, raise_for_status=raise_for_status, wait=wait)\n\n    def post(self, path: str, data: dict[str, Any] | None, *, raise_for_status: bool = True, wait: bool = False) -> Any:\n        return self._request('POST', path, data, raise_for_status=raise_for_status, wait=wait)\n\n    def _request(\n        self, method: str, path: str, data: dict[str, Any] | None = None, *, raise_for_status: bool, wait: bool,\n    ) -> Any:\n        if wait:\n            retry = Retry(total=None, connect=10, read=False, status=0, other=0, backoff_factor=.01)\n        else:\n            retry = Retry(total=False)\n        self.http_adapter.max_retries = retry  # type: ignore[attr-defined]\n\n        resp = self.session.request(method, f'http://localhost:4891/v1/{path}', json=data)\n        if raise_for_status:\n            resp.raise_for_status()\n            return resp.json()\n\n        try:\n            json_data = resp.json()\n        except ValueError:\n            json_data = None\n        return resp.status_code, json_data\n\n\nrequest = Requestor()\n\n\ndef create_chat_server_config(tmpdir: Path, model_copied: bool = False) -> dict[str, str]:\n    xdg_confdir = tmpdir / 'config'\n    app_confdir = xdg_confdir / 'nomic.ai'\n    app_confdir.mkdir(parents=True)\n    with open(app_confdir / 'GPT4All.ini', 'w') as conf:\n        conf.write(textwrap.dedent(f\"\"\"\\\n            [General]\n            serverChat=true\n\n            [download]\n            lastVersionStarted={config.APP_VERSION}\n\n            [network]\n            isActive=false\n            usageStatsActive=false\n        \"\"\"))\n\n    if model_copied:\n        app_data_dir = tmpdir / 'share' / 'nomic.ai' / 'GPT4All'\n        app_data_dir.mkdir(parents=True)\n        local_env_file_path = Path(os.environ['TEST_MODEL_PATH'])\n        shutil.copy(local_env_file_path, app_data_dir / local_env_file_path.name)\n\n    return dict(\n        os.environ,\n        XDG_CACHE_HOME=str(tmpdir / 'cache'),\n        XDG_DATA_HOME=str(tmpdir / 'share'),\n        XDG_CONFIG_HOME=str(xdg_confdir),\n        APPIMAGE=str(tmpdir),  # hack to bypass SingleApplication\n    )\n\n\n@contextmanager\ndef prepare_chat_server(model_copied: bool = False) -> Iterator[dict[str, str]]:\n    if os.name != 'posix' or sys.platform == 'darwin':\n        pytest.skip('Need non-Apple Unix to use alternate config path')\n\n    with tempfile.TemporaryDirectory(prefix='gpt4all-test') as td:\n        tmpdir = Path(td)\n        config = create_chat_server_config(tmpdir, model_copied=model_copied)\n        yield config\n\n\ndef start_chat_server(config: dict[str, str]) -> Iterator[None]:\n    chat_executable = Path(os.environ['CHAT_EXECUTABLE']).absolute()\n    with subprocess.Popen(chat_executable, env=config) as process:\n        try:\n            yield\n        except:\n            process.kill()\n            raise\n        process.send_signal(signal.SIGINT)\n        if retcode := process.wait():\n            raise CalledProcessError(retcode, process.args)\n\n\n@pytest.fixture\ndef chat_server() -> Iterator[None]:\n    with prepare_chat_server(model_copied=False) as config:\n        yield from start_chat_server(config)\n\n\n@pytest.fixture\ndef chat_server_with_model() -> Iterator[None]:\n    with prepare_chat_server(model_copied=True) as config:\n        yield from start_chat_server(config)\n\n\ndef test_with_models_empty(chat_server: None) -> None:\n    # non-sense endpoint\n    status_code, response = request.get('foobarbaz', wait=True, raise_for_status=False)\n    assert status_code == 404\n    assert response is None\n\n    # empty model list\n    response = request.get('models')\n    assert response == {'object': 'list', 'data': []}\n\n    # empty model info\n    response = request.get('models/foo')\n    assert response == {}\n\n    # POST for model list\n    status_code, response = request.post('models', data=None, raise_for_status=False)\n    assert status_code == 405\n    assert response == {'error': {\n        'code': None,\n        'message': 'Not allowed to POST on /v1/models. (HINT: Perhaps you meant to use a different HTTP method?)',\n        'param': None,\n        'type': 'invalid_request_error',\n    }}\n\n    # POST for model info\n    status_code, response = request.post('models/foo', data=None, raise_for_status=False)\n    assert status_code == 405\n    assert response == {'error': {\n        'code': None,\n        'message': 'Not allowed to POST on /v1/models/*. (HINT: Perhaps you meant to use a different HTTP method?)',\n        'param': None,\n        'type': 'invalid_request_error',\n    }}\n\n    # GET for completions\n    status_code, response = request.get('completions', raise_for_status=False)\n    assert status_code == 405\n    assert response == {'error': {\n        'code': 'method_not_supported',\n        'message': 'Only POST requests are accepted.',\n        'param': None,\n        'type': 'invalid_request_error',\n    }}\n\n    # GET for chat completions\n    status_code, response = request.get('chat/completions', raise_for_status=False)\n    assert status_code == 405\n    assert response == {'error': {\n        'code': 'method_not_supported',\n        'message': 'Only POST requests are accepted.',\n        'param': None,\n        'type': 'invalid_request_error',\n    }}\n\n\nEXPECTED_MODEL_INFO = {\n    'created': 0,\n    'id': 'Llama 3.2 1B Instruct',\n    'object': 'model',\n    'owned_by': 'humanity',\n    'parent': None,\n    'permissions': [\n        {\n            'allow_create_engine': False,\n            'allow_fine_tuning': False,\n            'allow_logprobs': False,\n            'allow_sampling': False,\n            'allow_search_indices': False,\n            'allow_view': True,\n            'created': 0,\n            'group': None,\n            'id': 'placeholder',\n            'is_blocking': False,\n            'object': 'model_permission',\n            'organization': '*',\n        },\n    ],\n    'root': 'Llama 3.2 1B Instruct',\n}\n\nEXPECTED_COMPLETIONS_RESPONSE = {\n    'choices': [\n        {\n            'finish_reason': 'length',\n            'index': 0,\n            'logprobs': None,\n            'text': ' jumps over the lazy dog.\\n',\n        },\n    ],\n    'id': 'placeholder',\n    'model': 'Llama 3.2 1B Instruct',\n    'object': 'text_completion',\n    'usage': {\n        'completion_tokens': 6,\n        'prompt_tokens': 5,\n        'total_tokens': 11,\n    },\n}\n\n\ndef test_with_models(chat_server_with_model: None) -> None:\n    response = request.get('models', wait=True)\n    assert response == {\n        'data': [EXPECTED_MODEL_INFO],\n        'object': 'list',\n    }\n\n    # Test the specific model endpoint\n    response = request.get('models/Llama 3.2 1B Instruct')\n    assert response == EXPECTED_MODEL_INFO\n\n    # Test the completions endpoint\n    status_code, response = request.post('completions', data=None, raise_for_status=False)\n    assert status_code == 400\n    assert response == {'error': {\n        'code': None,\n        'message': 'error parsing request JSON: illegal value',\n        'param': None,\n        'type': 'invalid_request_error',\n    }}\n\n    data = dict(\n        model       = 'Llama 3.2 1B Instruct',\n        prompt      = 'The quick brown fox',\n        temperature = 0,\n        max_tokens  = 6,\n    )\n    response = request.post('completions', data=data)\n    del response['created']  # Remove the dynamic field for comparison\n    assert response == EXPECTED_COMPLETIONS_RESPONSE\n\n\ndef test_with_models_temperature(chat_server_with_model: None) -> None:\n    \"\"\"Fixed by nomic-ai/gpt4all#3202.\"\"\"\n    data = {\n        'model': 'Llama 3.2 1B Instruct',\n        'prompt': 'The quick brown fox',\n        'temperature': 0.5,\n    }\n\n    request.post('completions', data=data, wait=True, raise_for_status=True)\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_en_US.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"en_US\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation>\n            <numerusform>%n Source</numerusform>\n            <numerusform>%n Sources</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n file</numerusform>\n            <numerusform>%n files</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n word</numerusform>\n            <numerusform>%n words</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n file</numerusform>\n            <numerusform>%n files</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n word</numerusform>\n            <numerusform>%n words</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_es_MX.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"es_MX\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← Colecciones existentes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>Agregar colección de documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>Agregue una carpeta que contenga archivos de texto plano, PDFs o Markdown. Configure extensiones adicionales en Configuración.</translation>\n    </message>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Por favor, elija un directorio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>Nombre</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>Nombre de la colección...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>Nombre de la colección a agregar (Requerido)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>Carpeta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>Ruta de la carpeta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>Ruta de la carpeta de documentos (Requerido)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>Explorar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>Crear colección</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"unfinished\">Error de red: no se pudo recuperar %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\">Indicador de ocupado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"unfinished\">Se muestra cuando la solicitud de modelos está en curso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">Archivo del modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">Archivo del modelo a descargar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">Descripción</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">Descripción del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">Reanudar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">Descargar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">Detener/reiniciar/iniciar la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">Eliminar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\">Eliminar modelo del sistema de archivos</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"obsolete\">Instalar</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"obsolete\">Instalar modelo en línea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\">Describe un error que ocurrió durante la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\">Error por hardware incompatible</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\">Barra de progreso de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">Muestra el progreso realizado en la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">Velocidad de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">Velocidad de descarga en bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">Si se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\">Se muestra cuando se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"obsolete\">ingrese $API_KEY</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"obsolete\">ingrese $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"obsolete\">ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"obsolete\">ingrese $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">Tamaño del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation type=\"unfinished\">RAM requerida</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation type=\"unfinished\">%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation type=\"unfinished\">?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation type=\"unfinished\">Parámetros</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">Cuantificación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"unfinished\">Descubre y descarga modelos mediante búsqueda por palabras clave...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"unfinished\">Campo de texto para descubrir y filtrar modelos descargables</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation type=\"unfinished\">Buscando · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"unfinished\">Iniciar descubrimiento y filtrado de modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"unfinished\">Activa el descubrimiento y filtrado de modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation type=\"unfinished\">Predeterminado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation type=\"unfinished\">Me gusta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation type=\"unfinished\">Descargas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation type=\"unfinished\">Reciente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation type=\"unfinished\">Ordenar por: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation type=\"unfinished\">Asc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation type=\"unfinished\">Desc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation type=\"unfinished\">Dirección de ordenamiento: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation type=\"unfinished\">Ninguno</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation type=\"unfinished\">Límite: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">Archivo del modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">Archivo del modelo a descargar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">Descripción</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">Descripción del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">Reanudar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">Descargar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">Detener/reiniciar/iniciar la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">Eliminar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\">Eliminar modelo del sistema de archivos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation type=\"unfinished\">Instalar modelo en línea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\">Describe un error que ocurrió durante la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\">Error por hardware incompatible</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\">Barra de progreso de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">Muestra el progreso realizado en la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">Velocidad de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">Velocidad de descarga en bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">Si se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\">Indicador de ocupado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\">Se muestra cuando se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">ingrese $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">ingrese $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\">ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">ingrese $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">Tamaño del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">Cuantificación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← Modelos existentes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>Explorar modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"vanished\">Descubre y descarga modelos mediante búsqueda por palabras clave...</translation>\n    </message>\n    <message>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"vanished\">Campo de texto para descubrir y filtrar modelos descargables</translation>\n    </message>\n    <message>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"vanished\">Iniciar descubrimiento y filtrado de modelos</translation>\n    </message>\n    <message>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"vanished\">Activa el descubrimiento y filtrado de modelos</translation>\n    </message>\n    <message>\n        <source>Default</source>\n        <translation type=\"vanished\">Predeterminado</translation>\n    </message>\n    <message>\n        <source>Likes</source>\n        <translation type=\"vanished\">Me gusta</translation>\n    </message>\n    <message>\n        <source>Downloads</source>\n        <translation type=\"vanished\">Descargas</translation>\n    </message>\n    <message>\n        <source>Recent</source>\n        <translation type=\"vanished\">Reciente</translation>\n    </message>\n    <message>\n        <source>Asc</source>\n        <translation type=\"vanished\">Asc</translation>\n    </message>\n    <message>\n        <source>Desc</source>\n        <translation type=\"vanished\">Desc</translation>\n    </message>\n    <message>\n        <source>None</source>\n        <translation type=\"vanished\">Ninguno</translation>\n    </message>\n    <message>\n        <source>Searching · %1</source>\n        <translation type=\"vanished\">Buscando · %1</translation>\n    </message>\n    <message>\n        <source>Sort by: %1</source>\n        <translation type=\"vanished\">Ordenar por: %1</translation>\n    </message>\n    <message>\n        <source>Sort dir: %1</source>\n        <translation type=\"vanished\">Dirección de ordenamiento: %1</translation>\n    </message>\n    <message>\n        <source>Limit: %1</source>\n        <translation type=\"vanished\">Límite: %1</translation>\n    </message>\n    <message>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"vanished\">Error de red: no se pudo recuperar %1</translation>\n    </message>\n    <message>\n        <source>Busy indicator</source>\n        <translation type=\"vanished\">Indicador de ocupado</translation>\n    </message>\n    <message>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"vanished\">Se muestra cuando la solicitud de modelos está en curso</translation>\n    </message>\n    <message>\n        <source>Model file</source>\n        <translation type=\"vanished\">Archivo del modelo</translation>\n    </message>\n    <message>\n        <source>Model file to be downloaded</source>\n        <translation type=\"vanished\">Archivo del modelo a descargar</translation>\n    </message>\n    <message>\n        <source>Description</source>\n        <translation type=\"vanished\">Descripción</translation>\n    </message>\n    <message>\n        <source>File description</source>\n        <translation type=\"vanished\">Descripción del archivo</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Cancelar</translation>\n    </message>\n    <message>\n        <source>Resume</source>\n        <translation type=\"vanished\">Reanudar</translation>\n    </message>\n    <message>\n        <source>Download</source>\n        <translation type=\"vanished\">Descargar</translation>\n    </message>\n    <message>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"vanished\">Detener/reiniciar/iniciar la descarga</translation>\n    </message>\n    <message>\n        <source>Remove</source>\n        <translation type=\"vanished\">Eliminar</translation>\n    </message>\n    <message>\n        <source>Remove model from filesystem</source>\n        <translation type=\"vanished\">Eliminar modelo del sistema de archivos</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">Instalar</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Instalar modelo en línea</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ADVERTENCIA: No recomendado para tu hardware. El modelo requiere más memoria (%1 GB) de la que tu sistema tiene disponible (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>%1 GB</source>\n        <translation type=\"vanished\">%1 GB</translation>\n    </message>\n    <message>\n        <source>?</source>\n        <translation type=\"vanished\">?</translation>\n    </message>\n    <message>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"vanished\">Describe un error que ocurrió durante la descarga</translation>\n    </message>\n    <message>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"vanished\">Error por hardware incompatible</translation>\n    </message>\n    <message>\n        <source>Download progressBar</source>\n        <translation type=\"vanished\">Barra de progreso de descarga</translation>\n    </message>\n    <message>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"vanished\">Muestra el progreso realizado en la descarga</translation>\n    </message>\n    <message>\n        <source>Download speed</source>\n        <translation type=\"vanished\">Velocidad de descarga</translation>\n    </message>\n    <message>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"vanished\">Velocidad de descarga en bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <source>Calculating...</source>\n        <translation type=\"vanished\">Calculando...</translation>\n    </message>\n    <message>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"vanished\">Si se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"vanished\">Se muestra cuando se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">ingrese $API_KEY</translation>\n    </message>\n    <message>\n        <source>File size</source>\n        <translation type=\"vanished\">Tamaño del archivo</translation>\n    </message>\n    <message>\n        <source>RAM required</source>\n        <translation type=\"vanished\">RAM requerida</translation>\n    </message>\n    <message>\n        <source>Parameters</source>\n        <translation type=\"vanished\">Parámetros</translation>\n    </message>\n    <message>\n        <source>Quant</source>\n        <translation type=\"vanished\">Cuantificación</translation>\n    </message>\n    <message>\n        <source>Type</source>\n        <translation type=\"vanished\">Tipo</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">ERROR: $API_KEY está vacío.</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">ERROR: $BASE_URL está vacío.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">ingrese $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">ingrese $MODEL_NAME</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>Aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>Diálogo de red</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>optar por compartir comentarios/conversaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>Diálogo de error</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>Configuración de la aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>General</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>Tema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>El esquema de colores de la aplicación.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>Oscuro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>Claro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>ERROR: El sistema de actualización no pudo encontrar la Herramienta de Mantenimiento utilizada para buscar actualizaciones.&lt;br&gt;&lt;br&gt;¿Instaló esta aplicación utilizando el instalador en línea? Si es así, el ejecutable de la Herramienta de Mantenimiento debería estar ubicado un directorio por encima de donde reside esta aplicación en su sistema de archivos.&lt;br&gt;&lt;br&gt;Si no puede iniciarlo manualmente, me temo que tendrá que reinstalar la aplicación.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>Oscuro legado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>Tamaño de fuente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>El tamaño del texto en la aplicación.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>Dispositivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>Pequeño</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>Mediano</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>Grande</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>Idioma y configuración regional</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>El idioma y la configuración regional que deseas usar.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>Modelo predeterminado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>El modelo preferido para nuevos chats. También se utiliza como respaldo del servidor local.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>Modo de sugerencia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>Generar preguntas de seguimiento sugeridas al final de las respuestas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>Al chatear con LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>Siempre que sea posible</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>Nunca</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>Ruta de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>Dónde almacenar los modelos locales y la base de datos de LocalDocs.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>Explorar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>Elegir dónde guardar los archivos del modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>Habilitar Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>Enviar chats y comentarios al Datalake de código abierto de GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>Avanzado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation>Hilos de CPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>El número de hilos de CPU utilizados para inferencia e incrustación.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Save Chat Context</source>\n        <translation type=\"vanished\">Guardar contexto del chat</translation>\n    </message>\n    <message>\n        <source>Save the chat model&apos;s state to disk for faster loading. WARNING: Uses ~2GB per chat.</source>\n        <translation type=\"vanished\">Guardar el estado del modelo de chat en el disco para una carga más rápida. ADVERTENCIA: Usa ~2GB por chat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>Habilitar el servidor API local</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>Exponer un servidor compatible con OpenAI a localhost. ADVERTENCIA: Resulta en un mayor uso de recursos.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>Puerto del servidor API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>El puerto a utilizar para el servidor local. Requiere reinicio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>Buscar actualizaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>Buscar manualmente una actualización para GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>Actualizaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>Regional del sistema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation>El dispositivo de cómputo utilizado para la generación de texto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>Predeterminado de la aplicación</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>Nuevo chat</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>Chat del servidor</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>ERROR: Ocurrió un error de red al conectar con el servidor API</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished obtuvo Error HTTP %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>Cajón</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>Cajón de navegación principal</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ Nuevo chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>Crear un nuevo chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>Seleccionar el chat actual o editar el chat cuando esté en modo de edición</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>Editar nombre del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>Guardar nombre del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>Eliminar chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>Confirmar eliminación del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>Cancelar eliminación del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>Lista de chats</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>Lista de chats en el diálogo del cajón</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation type=\"unfinished\">Tú</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation type=\"unfinished\">respuesta detenida ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"unfinished\">recuperando documentos locales: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"unfinished\">buscando en documentos locales: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation type=\"unfinished\">procesando ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation type=\"unfinished\">generando respuesta ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation type=\"unfinished\">generando preguntas ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">Copiar</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"obsolete\">Copiar mensaje</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"obsolete\">Desactivar markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"obsolete\">Activar markdown</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation type=\"unfinished\">\n            <numerusform>%n Fuente</numerusform>\n            <numerusform>%n Fuentes</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation type=\"unfinished\">Seguimientos sugeridos</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>HOY</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>ESTA SEMANA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>ESTE MES</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>ÚLTIMOS SEIS MESES</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>ESTE AÑO</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>AÑO PASADO</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">Copiar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\">Copiar mensaje</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\">Desactivar markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\">Activar markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;Advertencia&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <source>Switch model dialog</source>\n        <translation type=\"vanished\">Diálogo para cambiar de modelo</translation>\n    </message>\n    <message>\n        <source>Warn the user if they switch models, then context will be erased</source>\n        <translation type=\"vanished\">Advertir al usuario si cambia de modelo, entonces se borrará el contexto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>Conversación copiada al portapapeles.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>Código copiado al portapapeles.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>Panel de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>Panel de chat con opciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>Recargar el modelo actualmente cargado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>Expulsar el modelo actualmente cargado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>No hay modelo instalado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>Error al cargar el modelo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>Esperando al modelo...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>Cambiando contexto...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>Elige un modelo...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>No encontrado: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>El elemento superior es el modelo actual</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation>DocumentosLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>Agregar documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>agregar colecciones de documentos al chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>Cargar el modelo predeterminado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>Carga el modelo predeterminado que se puede cambiar en la configuración</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>No hay modelo instalado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;Se encontró un error al cargar el modelo:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Los fallos en la carga de modelos pueden ocurrir por varias razones, pero las causas más comunes incluyen un formato de archivo incorrecto, una descarga incompleta o corrupta, un tipo de archivo equivocado, RAM del sistema insuficiente o un tipo de modelo incompatible. Aquí hay algunas sugerencias para resolver el problema:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Asegúrate de que el archivo del modelo tenga un formato y tipo compatibles&lt;li&gt;Verifica que el archivo del modelo esté completo en la carpeta de descargas&lt;li&gt;Puedes encontrar la carpeta de descargas en el diálogo de configuración&lt;li&gt;Si has cargado el modelo manualmente, asegúrate de que el archivo no esté corrupto verificando el md5sum&lt;li&gt;Lee más sobre qué modelos son compatibles en nuestra &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentación&lt;/a&gt; para la interfaz gráfica&lt;li&gt;Visita nuestro &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canal de discord&lt;/a&gt; para obtener ayuda</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>Instalar un modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>Muestra la vista de agregar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>Conversación con el modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>pares de pregunta / respuesta de la conversación</translation>\n    </message>\n    <message>\n        <source>GPT4All</source>\n        <translation type=\"vanished\">GPT4All</translation>\n    </message>\n    <message>\n        <source>You</source>\n        <translation type=\"vanished\">Tú</translation>\n    </message>\n    <message>\n        <source>response stopped ...</source>\n        <translation type=\"vanished\">respuesta detenida ...</translation>\n    </message>\n    <message>\n        <source>processing ...</source>\n        <translation type=\"vanished\">procesando ...</translation>\n    </message>\n    <message>\n        <source>generating response ...</source>\n        <translation type=\"vanished\">generando respuesta ...</translation>\n    </message>\n    <message>\n        <source>generating questions ...</source>\n        <translation type=\"vanished\">generando preguntas ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>Copiar</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">Copiar mensaje</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">Desactivar markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">Activar markdown</translation>\n    </message>\n    <message>\n        <source>Thumbs up</source>\n        <translation type=\"vanished\">Me gusta</translation>\n    </message>\n    <message>\n        <source>Gives a thumbs up to the response</source>\n        <translation type=\"vanished\">Da un me gusta a la respuesta</translation>\n    </message>\n    <message>\n        <source>Thumbs down</source>\n        <translation type=\"vanished\">No me gusta</translation>\n    </message>\n    <message>\n        <source>Opens thumbs down dialog</source>\n        <translation type=\"vanished\">Abre el diálogo de no me gusta</translation>\n    </message>\n    <message>\n        <source>Suggested follow-ups</source>\n        <translation type=\"vanished\">Seguimientos sugeridos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>Borrar y reiniciar sesión de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>Copiar sesión de chat al portapapeles</translation>\n    </message>\n    <message>\n        <source>Redo last chat response</source>\n        <translation type=\"vanished\">Rehacer última respuesta del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>Agregar medios</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>Agrega medios al mensaje</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>Detener generación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>Detener la generación de la respuesta actual</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>Adjuntar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>Fila india</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>Recarga el modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>Recargar · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>Cargando · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>Cargar · %1 (predeterminado) →</translation>\n    </message>\n    <message>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"vanished\">recuperando documentos locales: %1 ...</translation>\n    </message>\n    <message>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"vanished\">buscando en documentos locales: %1 ...</translation>\n    </message>\n    <message numerus=\"yes\">\n        <source>%n Source(s)</source>\n        <translation type=\"vanished\">\n            <numerusform>%n Fuente</numerusform>\n            <numerusform>%n Fuentes</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>Enviar un mensaje...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>Carga un modelo para continuar...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>Enviar mensajes/indicaciones al modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>Cortar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>Pegar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>Seleccionar todo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>Enviar mensaje</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>Envía el mensaje/indicación contenido en el campo de texto al modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>GPT4All requiere que instale al menos un\nmodelo para comenzar\n            </translation>\n    </message>\n    <message>\n        <source>restoring from text ...</source>\n        <translation type=\"vanished\">restaurando desde texto ...</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>Advertencia: buscar en colecciones mientras se indexan puede devolver resultados incompletos</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n archivo</numerusform>\n            <numerusform>%n archivos</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n palabra</numerusform>\n            <numerusform>%n palabras</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>Actualizando</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ Agregar documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>Seleccione una colección para hacerla disponible al modelo de chat.</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>El modelo &quot;%1&quot; se ha instalado correctamente.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERROR: $API_KEY está vacía.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>ERROR: $BASE_URL no es válida.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>ERROR: El modelo &quot;%1 (%2)&quot; está en conflicto.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>El modelo &quot;%1 (%2)&quot; se ha instalado correctamente.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>El modelo &quot;%1&quot; ha sido eliminado.</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>Bienvenido a GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>La aplicación de chat LLM que prioriza la privacidad</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>Comenzar a chatear</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>Iniciar chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>Chatear con cualquier LLM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation>DocumentosLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>Chatear con tus archivos locales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>Buscar modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>Explorar y descargar modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>Últimas noticias</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>Últimas noticias de GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>Notas de la versión</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>Documentación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation>Discord</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation>X (Twitter)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation>Github</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>Suscribirse al boletín</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation>DocumentosLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>Configuración de DocumentosLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>Indexación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>Extensiones de archivo permitidas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation>Incrustación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>Usar API de incrustación Nomic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Clave API de Nomic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>Clave API para usar con Nomic Embed. Obtén una en la &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;página de claves API&lt;/a&gt; de Atlas. Requiere reinicio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>Dispositivo de incrustaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>El dispositivo de cómputo utilizado para las incrustaciones. Requiere reinicio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>Lista separada por comas. LocalDocs solo intentará procesar archivos con estas extensiones.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>Incrustar documentos usando la API rápida de Nomic en lugar de un modelo local privado. Requiere reinicio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>Predeterminado de la aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>Visualización</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>Mostrar fuentes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>Mostrar las fuentes utilizadas para cada respuesta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>Avanzado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>Advertencia: Solo para uso avanzado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>Valores demasiado grandes pueden causar fallos en localdocs, respuestas extremadamente lentas o falta de respuesta. En términos generales, los {N caracteres x N fragmentos} se añaden a la ventana de contexto del modelo. Más información &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;aquí&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Número de caracteres por fragmento de documento. Números más grandes aumentan la probabilidad de respuestas verídicas, pero también resultan en una generación más lenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Máximo de N mejores coincidencias de fragmentos de documentos recuperados para añadir al contexto del prompt. Números más grandes aumentan la probabilidad de respuestas verídicas, pero también resultan en una generación más lenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation>Tamaño del fragmento de documento (caracteres)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>Máximo de fragmentos de documento por indicación</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation>DocumentosLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>Chatea con tus archivos locales</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ Agregar colección</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>No hay colecciones instaladas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>Instala una colección de documentos locales para comenzar a usar esta función</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋ Agregar colección de documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>Muestra la vista de agregar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>Barra de progreso de indexación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>Muestra el progreso realizado en la indexación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>ERROR</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>INDEXANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>INCRUSTANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>REQUIERE ACTUALIZACIÓN</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>LISTO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>INSTALANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>Indexación en progreso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>Incrustación en progreso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>Esta colección requiere una actualización después del cambio de versión</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>Reindexación automática al cambiar la carpeta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>Instalación en progreso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n archivo</numerusform>\n            <numerusform>%n archivos</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n palabra</numerusform>\n            <numerusform>%n palabra(s)</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>Eliminar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>Reconstruir</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>Reindexar esta carpeta desde cero. Esto es lento y generalmente no es necesario.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>Actualizar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>Actualizar la colección a la nueva versión. Esta es una operación lenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>&lt;h3&gt;ERROR: No se puede acceder a la base de datos LocalDocs o no es válida.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Nota: Necesitará reiniciar después de intentar cualquiera de las siguientes soluciones sugeridas.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Asegúrese de que la carpeta establecida como &lt;b&gt;Ruta de Descarga&lt;/b&gt; exista en el sistema de archivos.&lt;/li&gt;&lt;li&gt;Verifique la propiedad y los permisos de lectura y escritura de la &lt;b&gt;Ruta de Descarga&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;Si hay un archivo &lt;b&gt;localdocs_v2.db&lt;/b&gt;, verifique también su propiedad y permisos de lectura/escritura.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;Si el problema persiste y hay archivos &apos;localdocs_v*.db&apos; presentes, como último recurso puede&lt;br&gt;intentar hacer una copia de seguridad y eliminarlos. Sin embargo, tendrá que recrear sus colecciones.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Requiere clave API personal de OpenAI.&lt;/li&gt;&lt;li&gt;ADVERTENCIA: ¡Enviará sus chats a OpenAI!&lt;/li&gt;&lt;li&gt;Su clave API se almacenará en el disco&lt;/li&gt;&lt;li&gt;Solo se usará para comunicarse con OpenAI&lt;/li&gt;&lt;li&gt;Puede solicitar una clave API &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;aquí.&lt;/a&gt;&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo ChatGPT GPT-3.5 Turbo de OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Aunque pagues a OpenAI por ChatGPT-4, esto no garantiza el acceso a la clave API. Contacta a OpenAI para más información.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation>&lt;strong&gt;Modelo ChatGPT GPT-4 de OpenAI&lt;/strong&gt;&lt;br&gt; %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Requiere una clave API personal de Mistral.&lt;/li&gt;&lt;li&gt;ADVERTENCIA: ¡Enviará tus chats a Mistral!&lt;/li&gt;&lt;li&gt;Tu clave API se almacenará en el disco&lt;/li&gt;&lt;li&gt;Solo se usará para comunicarse con Mistral&lt;/li&gt;&lt;li&gt;Puedes solicitar una clave API &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;aquí&lt;/a&gt;.&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Tiny&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Small&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Medium&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Creado por %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Publicado el %2.&lt;li&gt;Este modelo tiene %3 me gusta.&lt;li&gt;Este modelo tiene %4 descargas.&lt;li&gt;Más información puede encontrarse &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;aquí.&lt;/a&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1 (%2)</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>no se puede abrir &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>no se puede crear &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Modelo de API compatible con OpenAI&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Clave API: %1&lt;/li&gt;&lt;li&gt;URL base: %2&lt;/li&gt;&lt;li&gt;Nombre del modelo: %3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Requiere una clave API personal y la URL base de la API.&lt;/li&gt;&lt;li&gt;ADVERTENCIA: ¡Enviará sus chats al servidor de API compatible con OpenAI que especificó!&lt;/li&gt;&lt;li&gt;Su clave API se almacenará en el disco&lt;/li&gt;&lt;li&gt;Solo se utilizará para comunicarse con el servidor de API compatible con OpenAI&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Conectar al servidor de API compatible con OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>Configuración del modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>Clonar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>Eliminar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>Nombre</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>Archivo del modelo</translation>\n    </message>\n    <message>\n        <source>System Prompt</source>\n        <translation type=\"vanished\">Indicación del sistema</translation>\n    </message>\n    <message>\n        <source>Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens.</source>\n        <translation type=\"vanished\">Prefijado al inicio de cada conversación. Debe contener los tokens de encuadre apropiados.</translation>\n    </message>\n    <message>\n        <source>Prompt Template</source>\n        <translation type=\"vanished\">Plantilla de indicación</translation>\n    </message>\n    <message>\n        <source>The template that wraps every prompt.</source>\n        <translation type=\"vanished\">La plantilla que envuelve cada indicación.</translation>\n    </message>\n    <message>\n        <source>Must contain the string &quot;%1&quot; to be replaced with the user&apos;s input.</source>\n        <translation type=\"vanished\">Debe contener la cadena &quot;%1&quot; para ser reemplazada con la entrada del usuario.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>Indicación para el nombre del chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>Indicación utilizada para generar automáticamente nombres de chat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>Indicación de seguimiento sugerida</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>Indicación utilizada para generar preguntas de seguimiento sugeridas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>Longitud del contexto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>Número de tokens de entrada y salida que el modelo ve.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>Temperatura</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>Aleatoriedad de la salida del modelo. Mayor -&gt; más variación.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>La temperatura aumenta las probabilidades de elegir tokens menos probables.\nNOTA: Una temperatura más alta da resultados más creativos pero menos predecibles.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation>Top-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>Factor de muestreo de núcleo. Menor -&gt; más predecible.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>Solo se pueden elegir los tokens más probables hasta una probabilidad total de top_p.\nNOTA: Evita elegir tokens altamente improbables.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation>Min-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>Probabilidad mínima del token. Mayor -&gt; más predecible.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>Establece la probabilidad relativa mínima para que un token sea considerado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation>Top-K</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>Tamaño del grupo de selección para tokens.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>Solo se elegirán los K tokens más probables.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>Longitud máxima</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>Longitud máxima de respuesta, en tokens.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>Tamaño del lote de indicaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>El tamaño del lote utilizado para el procesamiento de indicaciones.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>Cantidad de tokens de prompt a procesar de una vez.\nNOTA: Valores más altos pueden acelerar la lectura de prompts, pero usarán más RAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>Penalización por repetición</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>Factor de penalización por repetición. Establecer a 1 para desactivar.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>Tokens de penalización por repetición</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>Número de tokens anteriores utilizados para la penalización.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>Capas de GPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>Número de capas del modelo a cargar en la VRAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Máximo de tokens combinados de pregunta/respuesta antes de que se pierda información.\nUsar más contexto del que el modelo fue entrenado producirá resultados deficientes.\nNOTA: No surtirá efecto hasta que recargue el modelo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Cuántas capas del modelo cargar en la VRAM. Disminuya esto si GPT4All se queda sin VRAM al cargar este modelo.\nValores más bajos aumentan la carga de la CPU y el uso de RAM, y hacen que la inferencia sea más lenta.\nNOTA: No surte efecto hasta que recargue el modelo.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>No hay modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>Instala un modelo para empezar a usar GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ Agregar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>Muestra la vista de agregar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>Modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>Modelos de chat instalados localmente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>Archivo del modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>Archivo del modelo a descargar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>Descripción</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>Descripción del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>Reanudar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Detener/reiniciar/iniciar la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>Eliminar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Eliminar modelo del sistema de archivos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>Instalar modelo en línea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ADVERTENCIA: No recomendado para su hardware. El modelo requiere más memoria (%1 GB) de la que su sistema tiene disponible (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation>?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Describe un error que ocurrió durante la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Error por hardware incompatible</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>Barra de progreso de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Muestra el progreso realizado en la descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>Velocidad de descarga</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Velocidad de descarga en bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Si se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation>Indicador de ocupado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Se muestra cuando se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>ingrese $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>Tamaño del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>RAM requerida</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>Parámetros</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>Cuantificación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>Tipo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERROR: $API_KEY está vacía.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>ERROR: $BASE_URL está vacía.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>ingrese $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>ingrese $MODEL_NAME</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>Enlace elegante</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>Un enlace estilizado</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>Por favor elige un archivo</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>Por favor, elija un directorio</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsStack</name>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Por favor, elija un directorio</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>Restaurar valores predeterminados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>Restaura el diálogo de configuración a su estado predeterminado</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>Contribuir datos al Datalake de código abierto de GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>Al habilitar esta función, podrás participar en el proceso democrático de entrenar un modelo de lenguaje grande contribuyendo con datos para futuras mejoras del modelo.\n\nCuando un modelo GPT4All te responda y hayas aceptado participar, tu conversación se enviará al Datalake de Código Abierto de GPT4All. Además, podrás indicar si te gusta o no su respuesta. Si no te gusta una respuesta, puedes sugerir una alternativa. Estos datos se recopilarán y agregarán en el Datalake de GPT4All.\n\nNOTA: Al activar esta función, estarás enviando tus datos al Datalake de Código Abierto de GPT4All. No debes esperar privacidad en el chat cuando esta función esté habilitada. Sin embargo, puedes esperar una atribución opcional si lo deseas. Tus datos de chat estarán disponibles abiertamente para que cualquiera los descargue y serán utilizados por Nomic AI para mejorar futuros modelos de GPT4All. Nomic AI conservará toda la información de atribución adjunta a tus datos y se te acreditará como contribuyente en cualquier lanzamiento de modelo GPT4All que utilice tus datos.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>Términos para optar por participar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Describe lo que sucederá cuando opte por participar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>Por favor, proporcione un nombre para la atribución (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>Atribución (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>Proporcionar atribución</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>Habilitar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>Habilitar participación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>Cancelar participación</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>Nueva versión disponible</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>Actualizar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>Actualizar a nueva versión</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>Muestra un globo de ayuda de corta duración</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translation>Indicador de ocupado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>Se muestra cuando la ventana emergente está ocupada</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">ingrese $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">Si se está calculando el hash del archivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">ingrese $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\">ERROR: $MODEL_NAME está vacío.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">ingrese $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\">Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation>Configuración</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>Contiene varias configuraciones de la aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>Aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation>DocumentosLocales</translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>¡Bienvenido!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### Notas de la versión\n%1&lt;br/&gt;\n### Colaboradores\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>Notas de la versión</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>Notas de la versión para esta versión</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>Términos para aceptar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Describe lo que sucederá cuando acepte</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>Aceptar estadísticas de uso anónimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>Sí</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>Permitir aceptación de estadísticas de uso anónimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>No</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>Rechazar estadísticas de uso anónimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>Permitir rechazo de estadísticas de uso anónimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>Aceptar para la red</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>Permitir aceptación para la red</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permitir compartir anónimamente los chats con el Datalake de GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>Rechazar para la red</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permitir rechazar el compartir anónimo de chats con el Datalake de GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### Consentimiento para análisis de uso anónimo y lago de datos\nAl habilitar estas funciones, podrá participar en el proceso democrático de entrenar un\nmodelo de lenguaje grande contribuyendo con datos para futuras mejoras del modelo.\n\nCuando un modelo GPT4All le responda y usted haya dado su consentimiento, su conversación se enviará al\nLago de Datos de Código Abierto de GPT4All. Además, puede indicar si le gusta o no su respuesta. Si no le gusta una respuesta,\npuede sugerir una respuesta alternativa. Estos datos se recopilarán y agregarán en el Lago de Datos de GPT4All.\n\nNOTA: Al activar esta función, estará enviando sus datos al Lago de Datos de Código Abierto de GPT4All.\nNo debe esperar privacidad en el chat cuando esta función esté habilitada. Sin embargo, puede\nesperar una atribución opcional si lo desea. Sus datos de chat estarán disponibles abiertamente para que cualquiera\nlos descargue y serán utilizados por Nomic AI para mejorar futuros modelos de GPT4All. Nomic AI conservará toda\nla información de atribución adjunta a sus datos y se le acreditará como contribuyente en cualquier\nlanzamiento de modelo GPT4All que utilice sus datos.</translation>\n    </message>\n</context>\n<context>\n    <name>SwitchModelDialog</name>\n    <message>\n        <source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source>\n        <translation type=\"vanished\">&lt;b&gt;Advertencia:&lt;/b&gt; cambiar el modelo borrará la conversación actual. ¿Deseas continuar?</translation>\n    </message>\n    <message>\n        <source>Continue</source>\n        <translation type=\"vanished\">Continuar</translation>\n    </message>\n    <message>\n        <source>Continue with model loading</source>\n        <translation type=\"vanished\">Continuar con la carga del modelo</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Cancelar</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>Por favor, edite el texto a continuación para proporcionar una mejor\n                respuesta. (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>Por favor, proporcione una mejor respuesta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>Enviar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>Envía la respuesta del usuario</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>Cierra el diálogo de respuesta</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation>GPT4All v%1</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;Se encontró un error al iniciar:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Se detectó hardware incompatible.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Desafortunadamente, tu CPU no cumple con los requisitos mínimos para ejecutar este programa. En particular, no soporta instrucciones AVX, las cuales este programa requiere para ejecutar con éxito un modelo de lenguaje grande moderno. La única solución en este momento es actualizar tu hardware a una CPU más moderna.&lt;br&gt;&lt;br&gt;Consulta aquí para más información: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;Se encontró un error al iniciar:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;No se puede acceder al archivo de configuración.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Desafortunadamente, algo está impidiendo que el programa acceda al archivo de configuración. Esto podría ser causado por permisos incorrectos en el directorio de configuración local de la aplicación donde se encuentra el archivo de configuración. Visita nuestro &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canal de Discord&lt;/a&gt; para obtener ayuda.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>La conexión al datalake falló.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>Guardando chats.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>Diálogo de red</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>optar por compartir comentarios/conversaciones</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>Vista de inicio</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>Vista de inicio de la aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>Inicio</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>Vista de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>Vista de chat para interactuar con modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>Chats</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>Vista de modelos para modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation>Docs\nLocales</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>Vista de DocumentosLocales para configurar y usar documentos locales</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>Config.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>Vista de configuración para la configuración de la aplicación</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>El datalake está habilitado</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>Usando un modelo de red</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>El modo servidor está habilitado</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>Modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>Vista de modelos instalados</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_it_IT.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"it_IT\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← Raccolte esistenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>Aggiungi raccolta documenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>Aggiungi una cartella contenente file di testo semplice, PDF o Markdown. Configura estensioni aggiuntive in Settaggi.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>Nome</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>Nome della raccolta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>Nome della raccolta da aggiungere (Obbligatorio)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>Cartella</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>Percorso cartella...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>Percorso della cartella dei documenti (richiesto)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>Esplora</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>Crea raccolta</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation>Questi modelli sono stati specificamente configurati per l&apos;uso in GPT4All. I primi modelli dell&apos;elenco sono noti per funzionare meglio, ma dovresti utilizzare solo modelli che possano rientrare nella memoria disponibile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation>Errore di rete: impossibile recuperare %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation>Indicatore di occupato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation>Visualizzato quando la richiesta dei modelli è in corso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation>Tutti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation>Ragionamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation>File del modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation>File del modello da scaricare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation>Descrizione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation>Descrizione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation>Riprendi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation>Scarica</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Arresta/riavvia/avvia il download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation>Rimuovi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Rimuovi il modello dal sistema dei file</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">Installa</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Installa il modello online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Errore&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrive un errore che si è verificato durante lo scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVVISO: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Errore per hardware incompatibile</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation>Barra di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Mostra lo stato di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation>Velocità di scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Velocità di scaricamento in byte/kilobyte/megabyte al secondo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation>Calcolo in corso...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Se viene calcolato l&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Visualizzato durante il calcolo dell&apos;hash del file</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">Inserire $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">inserisci $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">inserisci $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation>Dimensione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation>RAM richiesta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation>?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation>Parametri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation>Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation>Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation>Usa la ricerca per trovare e scaricare modelli da HuggingFace. NON C&apos;È ALCUNA GARANZIA che funzioneranno. Molti richiederanno configurazioni aggiuntive prima di poter essere utilizzati.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation>Scopri e scarica i modelli tramite ricerca per parole chiave...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation>Campo di testo per scoprire e filtrare i modelli scaricabili</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation>Ricerca · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation>Avvia rilevamento e filtraggio dei modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation>Attiva la scoperta e il filtraggio dei modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation>Predefinito</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation>Mi piace</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation>Scaricamenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation>Recenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation>Ordina per: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation>Asc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation>Disc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation>Direzione ordinamento: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation>Niente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation>Limite: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation>File del modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation>File del modello da scaricare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation>Descrizione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation>Descrizione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation>Riprendi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation>Scarica</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Arresta/riavvia/avvia il download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation>Rimuovi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Rimuovi il modello dal sistema dei file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation>Installa</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation>Installa il modello online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Errore&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrive un errore che si è verificato durante lo scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVVISO: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Errore per hardware incompatibile</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation>Barra di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Mostra lo stato di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation>Velocità di scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Velocità di scaricamento in byte/kilobyte/megabyte al secondo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation>Calcolo in corso...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Se viene calcolato l&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation>Indicatore di occupato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Visualizzato durante il calcolo dell&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation>Inserire $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation>inserisci $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>inserisci $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation>Dimensione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation>Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation>Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← Modelli esistenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>Esplora modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation>GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation>Fornitori Remoti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation>HuggingFace</translation>\n    </message>\n    <message>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"vanished\">Scopri e scarica i modelli tramite ricerca per parole chiave...</translation>\n    </message>\n    <message>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"vanished\">Campo di testo per scoprire e filtrare i modelli scaricabili</translation>\n    </message>\n    <message>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"vanished\">Avvia rilevamento e filtraggio dei modelli</translation>\n    </message>\n    <message>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"vanished\">Attiva la scoperta e il filtraggio dei modelli</translation>\n    </message>\n    <message>\n        <source>Default</source>\n        <translation type=\"vanished\">Predefinito</translation>\n    </message>\n    <message>\n        <source>Likes</source>\n        <translation type=\"vanished\">Mi piace</translation>\n    </message>\n    <message>\n        <source>Downloads</source>\n        <translation type=\"vanished\">Scaricamenti</translation>\n    </message>\n    <message>\n        <source>Recent</source>\n        <translation type=\"vanished\">Recenti</translation>\n    </message>\n    <message>\n        <source>Asc</source>\n        <translation type=\"vanished\">Asc</translation>\n    </message>\n    <message>\n        <source>Desc</source>\n        <translation type=\"vanished\">Disc</translation>\n    </message>\n    <message>\n        <source>None</source>\n        <translation type=\"vanished\">Niente</translation>\n    </message>\n    <message>\n        <source>Searching · %1</source>\n        <translation type=\"vanished\">Ricerca · %1</translation>\n    </message>\n    <message>\n        <source>Sort by: %1</source>\n        <translation type=\"vanished\">Ordina per: %1</translation>\n    </message>\n    <message>\n        <source>Sort dir: %1</source>\n        <translation type=\"vanished\">Direzione ordinamento: %1</translation>\n    </message>\n    <message>\n        <source>Limit: %1</source>\n        <translation type=\"vanished\">Limite: %1</translation>\n    </message>\n    <message>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"vanished\">Errore di rete: impossibile recuperare %1</translation>\n    </message>\n    <message>\n        <source>Busy indicator</source>\n        <translation type=\"vanished\">Indicatore di occupato</translation>\n    </message>\n    <message>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"vanished\">Visualizzato quando la richiesta dei modelli è in corso</translation>\n    </message>\n    <message>\n        <source>Model file</source>\n        <translation type=\"vanished\">File del modello</translation>\n    </message>\n    <message>\n        <source>Model file to be downloaded</source>\n        <translation type=\"vanished\">File del modello da scaricare</translation>\n    </message>\n    <message>\n        <source>Description</source>\n        <translation type=\"vanished\">Descrizione</translation>\n    </message>\n    <message>\n        <source>File description</source>\n        <translation type=\"vanished\">Descrizione del file</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Annulla</translation>\n    </message>\n    <message>\n        <source>Resume</source>\n        <translation type=\"vanished\">Riprendi</translation>\n    </message>\n    <message>\n        <source>Download</source>\n        <translation type=\"vanished\">Scarica</translation>\n    </message>\n    <message>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"vanished\">Arresta/riavvia/avvia il download</translation>\n    </message>\n    <message>\n        <source>Remove</source>\n        <translation type=\"vanished\">Rimuovi</translation>\n    </message>\n    <message>\n        <source>Remove model from filesystem</source>\n        <translation type=\"vanished\">Rimuovi il modello dal sistema dei file</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">Installa</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Installa il modello online</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">inserisci $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">inserisci $MODEL_NAME</translation>\n    </message>\n    <message>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"vanished\">Descrive un errore che si è verificato durante lo scaricamento</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Errore&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"vanished\">Errore per hardware incompatibile</translation>\n    </message>\n    <message>\n        <source>Download progressBar</source>\n        <translation type=\"vanished\">Barra di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"vanished\">Mostra lo stato di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <source>Download speed</source>\n        <translation type=\"vanished\">Velocità di scaricamento</translation>\n    </message>\n    <message>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"vanished\">Velocità di scaricamento in byte/kilobyte/megabyte al secondo</translation>\n    </message>\n    <message>\n        <source>Calculating...</source>\n        <translation type=\"vanished\">Calcolo in corso...</translation>\n    </message>\n    <message>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"vanished\">Se viene calcolato l&apos;hash del file</translation>\n    </message>\n    <message>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"vanished\">Visualizzato durante il calcolo dell&apos;hash del file</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">Inserire $API_KEY</translation>\n    </message>\n    <message>\n        <source>File size</source>\n        <translation type=\"vanished\">Dimensione del file</translation>\n    </message>\n    <message>\n        <source>RAM required</source>\n        <translation type=\"vanished\">RAM richiesta</translation>\n    </message>\n    <message>\n        <source>Parameters</source>\n        <translation type=\"vanished\">Parametri</translation>\n    </message>\n    <message>\n        <source>Quant</source>\n        <translation type=\"vanished\">Quant</translation>\n    </message>\n    <message>\n        <source>Type</source>\n        <translation type=\"vanished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation>Vari fornitori di modelli remoti che utilizzano risorse di rete per l'inferenza.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation>Groq</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation>Groq offre un motore di inferenza AI ad alte prestazioni progettato per una latenza ridotta ed elaborazione efficiente. Ottimizzata per applicazioni in tempo reale, la tecnologia di Groq è ideale per utenti che necessitano di risposte rapide da modelli linguistici di grandi dimensioni aperti e altri carichi di lavoro AI.&lt;br&gt;&lt;br&gt;Ottieni la tua chiave API: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation>OpenAI</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation>OpenAI fornisce accesso a modelli AI avanzati, tra cui GPT-4, supportando un&#x27;ampia gamma di applicazioni, dall&#x27;AI conversazionale alla generazione di contenuti e al completamento del codice.&lt;br&gt;&lt;br&gt;Ottieni la tua chiave API: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation>Mistral</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation>Mistral AI è specializzata in modelli linguistici open-weight efficienti, ottimizzati per diverse attività di elaborazione del linguaggio naturale. I loro modelli sono progettati per flessibilità e prestazioni, rendendoli una solida opzione per applicazioni che richiedono soluzioni AI scalabili.&lt;br&gt;&lt;br&gt;Ottieni la tua chiave API: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation>Personalizzato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation>L&#x27;opzione fornitore personalizzato consente agli utenti di connettere i propri modelli AI compatibili con OpenAI o servizi di inferenza di terze parti. Questa funzione è utile per organizzazioni con modelli proprietari o per chi utilizza fornitori AI di nicchia non elencati qui.</translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>Applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>Dialogo di rete</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>aderisci per condividere feedback/conversazioni</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>Dialogo d&apos;errore</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>Settaggi applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>Generale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>Tema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>La combinazione di colori dell&apos;applicazione.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>Scuro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>Chiaro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>ERRORE: il sistema di aggiornamento non è riuscito a trovare MaintenanceTool utilizzato per verificare la presenza di aggiornamenti!&lt;br/&gt;&lt;br/&gt;Hai installato questa applicazione tramite l&apos;installer online? In tal caso, l&apos;eseguibile MaintenanceTool dovrebbe trovarsi una directory sopra quella in cui risiede questa applicazione sul tuo file system.&lt;br/&gt;&lt;br/&gt;Se non riesci ad avviarlo manualmente, temo che dovrai reinstallarlo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>Scuro Legacy</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>Dimensioni del Font</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>La dimensione del testo nell&apos;applicazione.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>Piccolo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>Medio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>Grande</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>Lingua e settaggi locali</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>La lingua e i settaggi locali che vuoi utilizzare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>Settaggi locali del sistema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>Dispositivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation>Il dispositivo di calcolo utilizzato per la generazione del testo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>Applicazione predefinita</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>Modello predefinito</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>Il modello preferito per le nuove chat. Utilizzato anche come ripiego del server locale.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>Modalità suggerimento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>Genera le domande di approfondimento suggerite alla fine delle risposte.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>Quando chatti con LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>Quando possibile</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>Mai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>Percorso di scarico</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>Dove archiviare i modelli locali e il database LocalDocs.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>Esplora</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>Scegli dove salvare i file del modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>Abilita Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>Invia chat e commenti al Datalake Open Source GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>Avanzate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translatorcomment>Thread della CPU</translatorcomment>\n        <translation>Tread CPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>Il numero di thread della CPU utilizzati per l&apos;inferenza e l&apos;incorporamento.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation>Abilita la barra delle applicazioni</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation>Quando la finestra viene chiusa, l&apos;applicazione verrà ridotta a icona nella barra delle applicazioni.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>Abilita il server API locale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>Esporre un server compatibile con OpenAI a localhost. ATTENZIONE: comporta un maggiore utilizzo delle risorse.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>Porta del server API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>La porta da utilizzare per il server locale. Richiede il riavvio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>Controlla gli aggiornamenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>Verifica manualmente l&apos;aggiornamento di GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>Aggiornamenti</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>Nuova Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>Chat del server</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>ERRORE: si è verificato un errore di rete durante la connessione al server API</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished ha ricevuto l&apos;errore HTTP %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation>Errore durante l'analisi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation>Elaborazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation>Analisi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation>Elaborato per %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation>secondo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation>secondi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation>Analisi completata</translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>Cassetto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>Cassetto di navigazione principale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ Nuova Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>Crea una nuova chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>Seleziona la chat corrente o modifica la chat in modalità modifica</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>Modifica il nome della chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>Salva il nome della chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>Elimina chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>Conferma l&apos;eliminazione della chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>Annulla l&apos;eliminazione della chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>Elenco delle chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>Elenco delle chat nella finestra di dialogo del cassetto</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation>Tu</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation>risposta interrotta ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation>recupero documenti locali: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation>ricerca in documenti locali: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation>elaborazione ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation>generazione risposta ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation>generazione domande ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation>generazione chiamata strumento ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation>Copia</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">Copia messaggio</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">Disabilita Markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">Abilita Markdown</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation>\n            <numerusform>%n Fonte</numerusform>\n            <numerusform>%n Fonti</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation>Vuoi modificare questo messaggio?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation>Tutti i messaggi successivi verranno cancellati definitivamente.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation>Ripetere questa risposta?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation>Non è possibile modificare la chat senza un modello caricato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation>Impossibile modificare la chat mentre il modello è in fase di generazione.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation>Modifica</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation>Non è possibile ripetere la risposta senza un modello caricato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation>Impossibile ripetere la risposta mentre il modello è in fase di generazione.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation>Ripeti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation>Mi piace la risposta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation>Non mi piace la risposta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation>Approfondimenti suggeriti</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation>Il messaggio era troppo lungo e non è stato possibile elaborarlo (%1 &gt; %2). Riprova con un messaggio più breve.</translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>OGGI</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>QUESTA SETTIMANA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>QUESTO MESE</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>ULTIMI SEI MESI</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>QUEST&apos;ANNO</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>L&apos;ANNO SCORSO</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation>Copia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation>Copia messaggio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation>Disabilita Markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation>Abilita Markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;Avviso&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>Conversazione copiata negli appunti.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>Codice copiato negli appunti.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation>L&apos;intera chat verrà cancellata.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>Pannello chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>Pannello chat con opzioni</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>Ricarica il modello attualmente caricato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>Espelli il modello attualmente caricato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>Nessun modello installato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>Errore di caricamento del modello.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>In attesa del modello...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>Cambio contesto...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>Scegli un modello...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>Non trovato: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>L&apos;elemento in alto è il modello attuale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>Aggiungi documenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>aggiungi raccolte di documenti alla chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>Carica il modello predefinito</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>Carica il modello predefinito che può essere modificato nei settaggi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>Nessun modello installato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>GPT4All richiede l&apos;installazione di almeno un\nmodello per iniziare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>Installa un modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra la vista aggiungi modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>Conversazione con il modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>coppie prompt/risposta dalla conversazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>Il modello di prompt precedente deve essere &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;aggiornato&lt;/a&gt; nei Settaggi.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>Nessun &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;modello di chat&lt;/a&gt; configurato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>Il &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;modello di chat&lt;/a&gt; non può essere vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>Il prompt del sistema precedente deve essere &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;aggiornato&lt;/a&gt; nei Settaggi.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>Copia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>Cancella e ripristina la sessione di chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>Copia la sessione di chat negli appunti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>Aggiungi contenuti multimediali</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>Aggiunge contenuti multimediali al prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>Interrompi la generazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>Arresta la generazione della risposta corrente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>Allegare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>File singolo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>Ricarica il modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;Si è verificato un errore durante il caricamento del modello:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Gli errori di caricamento del modello possono verificarsi per diversi motivi, ma le cause più comuni includono un formato di file non valido, un download incompleto o danneggiato, il tipo di file sbagliato, RAM di sistema insufficiente o un tipo di modello incompatibile. Ecco alcuni suggerimenti per risolvere il problema:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Assicurati che il file del modello abbia un formato e un tipo compatibili&lt;li&gt;Verifica che il file del modello sia completo nella cartella di download&lt;li&gt;Puoi trovare la cartella di download nella finestra di dialogo dei settaggi&lt;li&gt;Se hai scaricato manualmente il modello, assicurati che il file non sia danneggiato controllando md5sum&lt;li&gt;Leggi ulteriori informazioni su quali modelli sono supportati nella nostra &lt;a href=&quot;https://docs.gpt4all.io/ &quot;&gt;documentazione&lt;/a&gt; per la GUI&lt;li&gt;Consulta il nostro &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canale Discord&lt;/a&gt; per assistenza</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation>Cancellare la conversazione?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation>La modifica del modello cancellerà la conversazione corrente.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>Ricarica · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>Caricamento · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>Carica · %1 (predefinito) →</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>Manda un messaggio...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>Carica un modello per continuare...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>Invia messaggi/prompt al modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>Taglia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>Incolla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>Seleziona tutto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>Invia messaggio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>Invia il messaggio/prompt contenuto nel campo di testo al modello</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation>Interprete di codice</translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation>Esegue codice JavaScript utilizzando console.log come output</translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>Avviso: la ricerca nelle raccolte durante l&apos;indicizzazione può restituire risultati incompleti</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n file</numerusform>\n            <numerusform>%n file</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n parola</numerusform>\n            <numerusform>%n parole</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>In aggiornamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ Aggiungi documenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>Seleziona una raccolta per renderla disponibile al modello in chat.</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation>OK</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>Il modello &quot;%1&quot; è stato installato correttamente.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>ERRORE: il modello &quot;%1 (%2)&quot; è in conflitto.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>Il modello &quot;%1 (%2)&quot; è stato installato correttamente.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>Il modello &quot;%1&quot; è stato rimosso.</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>Benvenuto in GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>L&apos;applicazione di chat LLM che mette al primo posto la privacy</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>Inizia a chattare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>Inizia a Chattare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>Chatta con qualsiasi LLM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>Chatta con i tuoi file locali</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>Trova modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>Esplora e scarica i modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>Ultime notizie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>Ultime notizie da GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>Note di rilascio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>Documentazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>Iscriviti alla Newsletter</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>Settaggi LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>Indicizzazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>Estensioni di file consentite</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>Elenco separato da virgole. LocalDocs tenterà di elaborare solo file con queste estensioni.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translatorcomment>Questo termine si dovrebbe tradurre come &quot;Incorporamento&quot;. This term has been translated in other applications like A1111 and InvokeAI as &quot;Incorporamento&quot;</translatorcomment>\n        <translation>Incorporamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>Utilizza l&apos;API di incorporamento Nomic Embed</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>Incorpora documenti utilizzando la veloce API di Nomic invece di un modello locale privato. Richiede il riavvio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Chiave API di Nomic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>Chiave API da utilizzare per Nomic Embed. Ottienine una dalla &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;pagina delle chiavi API&lt;/a&gt; di Atlas. Richiede il riavvio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>Dispositivo per incorporamenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>Il dispositivo di calcolo utilizzato per gli incorporamenti. Richiede il riavvio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>Applicazione predefinita</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>Mostra</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>Mostra le fonti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>Visualizza le fonti utilizzate per ciascuna risposta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>Avanzate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>Avvertenza: solo per uso avanzato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>Valori troppo grandi possono causare errori di LocalDocs, risposte estremamente lente o l&apos;impossibilità di rispondere. In parole povere, {N caratteri x N frammenti} vengono aggiunti alla finestra di contesto del modello. Maggiori informazioni &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;qui&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation>Dimensioni del frammento di documento (caratteri)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Numero di caratteri per frammento di documento. Numeri più grandi aumentano la probabilità di risposte basate sui fatti, ma comportano anche una generazione più lenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>Numero massimo di frammenti di documento per prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Il numero massimo di frammenti di documento recuperati che presentano le migliori corrispondenze, da includere nel contesto del prompt. Numeri più alti aumentano la probabilità di ricevere risposte basate sui fatti, ma comportano anche una generazione più lenta.</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>Chatta con i tuoi file locali</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ Aggiungi raccolta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>&lt;h3&gt;ERRORE: Impossibile accedere al database LocalDocs o non è valido.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Nota: sarà necessario riavviare dopo aver provato una delle seguenti soluzioni suggerite.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Assicurati che la cartella impostata come &lt;b&gt;Percorso di download&lt;/b&gt; esista nel file system.&lt;/li&gt;&lt;li&gt;Controlla la proprietà e i permessi di lettura e scrittura del &lt;b&gt;Percorso di download&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;Se è presente un file &lt;b&gt;localdocs_v2.db&lt;/b&gt;, controlla anche la sua proprietà e i permessi di lettura/scrittura.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;Se il problema persiste e sono presenti file &apos;localdocs_v*.db&apos;, come ultima risorsa puoi&lt;br&gt;provare a eseguirne il backup e a rimuoverli. Tuttavia, dovrai ricreare le tue raccolte.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>Nessuna raccolta installata</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>Installa una raccolta di documenti locali per iniziare a utilizzare questa funzionalità</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋ Aggiungi raccolta di documenti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra la vista aggiungi modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>Barra di avanzamento indicizzazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>Mostra lo stato di avanzamento dell&apos;indicizzazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>ERRORE</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>INDICIZZAZIONE</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>INCORPORAMENTO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>RICHIEDE AGGIORNAMENTO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>PRONTA</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>INSTALLAZIONE</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>Indicizzazione in corso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>Incorporamento in corso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>Questa raccolta richiede un aggiornamento dopo il cambio di versione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>Reindicizza automaticamente in caso di modifiche alla cartella</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>Installazione in corso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n file</numerusform>\n            <numerusform>%n file</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n parola</numerusform>\n            <numerusform>%n parole</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>Rimuovi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>Ricostruisci</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>Reindicizzare questa cartella da zero. Lento e di solito non necessario.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>Aggiorna</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>Aggiorna la raccolta alla nuova versione. Questa è un&apos;operazione lenta.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Richiede una chiave API OpenAI personale.&lt;/li&gt;&lt;li&gt;ATTENZIONE: invierà le tue chat a OpenAI!&lt;/li&gt;&lt;li&gt;La tua chiave API verrà archiviata su disco&lt;/li&gt;&lt;li&gt; Verrà utilizzato solo per comunicare con OpenAI&lt;/li&gt;&lt;li&gt;Puoi richiedere una chiave API &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;qui.&lt;/a&gt; &lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Anche se paghi OpenAI per ChatGPT-4 questo non garantisce l&apos;accesso alla chiave API. Contatta OpenAI per maggiori informazioni.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>impossibile aprire &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>impossibile creare &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1 (%2)</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Modello API compatibile con OpenAI&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Chiave API: %1&lt;/li&gt;&lt;li&gt;URL di base: %2&lt;/li&gt;&lt;li&gt;Nome modello: %3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Richiede una chiave API Mistral personale.&lt;/li&gt;&lt;li&gt;ATTENZIONE: invierà le tue chat a Mistral!&lt;/li&gt;&lt;li&gt;La tua chiave API verrà archiviata su disco&lt;/li&gt;&lt;li&gt; Verrà utilizzato solo per comunicare con Mistral&lt;/li&gt;&lt;li&gt;Puoi richiedere una chiave API &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;qui&lt;/a&gt;. &lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Richiede una chiave API personale e l&apos;URL di base dell&apos;API.&lt;/li&gt;&lt;li&gt;ATTENZIONE: invierà le tue chat al server API compatibile con OpenAI che hai specificato!&lt;/li&gt;&lt;li&gt;La tua chiave API verrà archiviata su disco&lt;/li&gt;&lt;li&gt;Verrà utilizzata solo per comunicare con il server API compatibile con OpenAI&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Connetti al server API compatibile con OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Creato da %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Pubblicato il %2.&lt;li&gt;Questo modello ha %3 Mi piace.&lt;li&gt;Questo modello ha %4 download.&lt;li&gt;Altro informazioni possono essere trovate &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;qui.&lt;/a&gt;&lt;/ul&gt;</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>Modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation>%1 il messaggio di sistema?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation>Cancella</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation>Ripristina</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation>Il messaggio di sistema verrà %1.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation>rimosso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation>ripristinato il valore predefinito</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation>%1 il modello di chat?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation>Il modello di chat verrà %1.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation>cancellato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>Settaggi modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>Clona</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>Rimuovi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>Nome</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>File del modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation>Messaggio di sistema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation>Un messaggio per impostare il contesto o guidare il comportamento del modello. Lasciare vuoto per nessuno. NOTA: da GPT4All 3.5, questo non dovrebbe contenere token di controllo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation>Il messaggio di sistema non è &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;testo normale&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation>Modello di chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation>Questo modello Jinja trasforma la chat in input per il modello.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>Nessun &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;modello di chat&lt;/a&gt; configurato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>Il &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;modello di chat&lt;/a&gt; non può essere vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Errore di sintassi&lt;/a&gt;: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation>Il modello di chat non è in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;formato Jinja&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>Prompt del nome della chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>Prompt utilizzato per generare automaticamente nomi di chat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>Prompt di approfondimento suggerito</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>Prompt utilizzato per generare le domande di approfondimento suggerite.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>Lunghezza del contesto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>Numero di token di input e output visualizzati dal modello.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Numero massimo di token di prompt/risposta combinati prima che le informazioni vengano perse.\nL&apos;utilizzo di un contesto maggiore rispetto a quello su cui è stato addestrato il modello produrrà scarsi risultati.\nNOTA: non ha effetto finché non si ricarica il modello.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>Temperatura</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>Casualità dell&apos;uscita del modello. Più alto -&gt; più variazione.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>La temperatura aumenta le possibilità di scegliere token meno probabili.\nNOTA: una temperatura più elevata offre risultati più creativi ma meno prevedibili.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>Fattore di campionamento del nucleo. Inferiore -&gt; più prevedibile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>Solo i token più probabili, fino a un totale di probabilità di Top-P, possono essere scelti.\nNOTA: impedisce la scelta di token altamente improbabili.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>Probabilità minima del token. Più alto -&gt; più prevedibile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>Imposta la probabilità relativa minima affinché un token venga considerato.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>Dimensione del lotto di selezione per i token.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>Saranno scelti solo i primi K token più probabili.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>Lunghezza massima</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>Lunghezza massima della risposta, in token.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>Dimensioni del lotto di prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>La dimensione del lotto usata per l&apos;elaborazione dei prompt.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>Numero di token del prompt da elaborare contemporaneamente.\nNOTA: valori più alti possono velocizzare la lettura dei prompt ma utilizzeranno più RAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>Penalità di ripetizione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>Fattore di penalità di ripetizione. Impostare su 1 per disabilitare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>Token di penalità ripetizione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>Numero di token precedenti utilizzati per la penalità.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>Livelli GPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>Numero di livelli del modello da caricare nella VRAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Quanti livelli del modello caricare nella VRAM. Diminuirlo se GPT4All esaurisce la VRAM durante il caricamento di questo modello.\nValori più bassi aumentano il carico della CPU e l&apos;utilizzo della RAM e rallentano l&apos;inferenza.\nNOTA: non ha effetto finché non si ricarica il modello.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>Nessun modello installato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>Installa un modello per iniziare a utilizzare GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ Aggiungi Modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra la vista aggiungi modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>Modelli installati</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>Modelli per chat installati localmente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>File del modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>File del modello da scaricare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>Descrizione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>Descrizione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>Riprendi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Arresta/riavvia/avvia il download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>Rimuovi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Rimuovi il modello dal sistema dei file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>Installa</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>Installa il modello online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Errore&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVVISO: non consigliato per il tuo hardware. Il modello richiede più memoria (%1 GB) di quella disponibile nel sistema (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>inserisci $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>inserisci $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrive un errore che si è verificato durante lo scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Errore per hardware incompatibile</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>Barra di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Mostra lo stato di avanzamento dello scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>Velocità di scaricamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Velocità di scaricamento in byte/kilobyte/megabyte al secondo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>Calcolo in corso...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Se viene calcolato l&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation>Indicatore di occupato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Visualizzato durante il calcolo dell&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>Inserire $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>Dimensione del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>RAM richiesta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>Parametri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>Mio link</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>Un link d&apos;esempio</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>Scegli un file</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>Scegli una cartella</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation>Cancella</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation>Ripristina</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation>Ripristinare le impostazioni predefinite?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation>Questa pagina di impostazioni verrà ripristinata ai valori predefiniti.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>Ripristina i valori predefiniti</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>Ripristina la finestra di dialogo dei settaggi a uno stato predefinito</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>Contribuisci  con i tuoi dati al Datalake Open Source di GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>Abilitando questa funzionalità, potrai partecipare al processo democratico di addestramento di un modello linguistico di grandi dimensioni fornendo dati per futuri miglioramenti del modello.\n\nQuando un modello di GPT4All ti risponde e tu hai aderito, la tua conversazione verrà inviata al Datalake Open Source di GPT4All. Inoltre, puoi mettere mi piace/non mi piace alla sua risposta. Se non ti piace una risposta, puoi suggerirne una alternativa. Questi dati verranno raccolti e aggregati nel Datalake di GPT4All.\n\nNOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di GPT4All. Non dovresti avere aspettative sulla privacy della chat quando questa funzione è abilitata. Dovresti, tuttavia, aspettarti un&apos;attribuzione facoltativa, se lo desideri. I tuoi dati di chat saranno liberamente disponibili per essere scaricati da chiunque e verranno utilizzati da Nomic AI per migliorare i futuri modelli GPT4All. Nomic AI conserverà tutte le informazioni di attribuzione allegate ai tuoi dati e verrai accreditato come collaboratore a qualsiasi versione del modello GPT4All che utilizza i tuoi dati!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termini per l&apos;adesione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrive cosa accadrà quando effettuerai l&apos;adesione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>Fornisci un nome per l&apos;attribuzione (facoltativo)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>Attribuzione (facoltativo)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>Fornire attribuzione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>Abilita</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>Abilita l&apos;adesione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>Annulla l&apos;adesione</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>Nuova versione disponibile</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>Aggiorna</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>Aggiorna alla nuova versione</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>Rivela un messaggio di aiuto di breve durata</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translation>Indicatore di occupato</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>Visualizzato quando la finestra a comparsa risulta occupata</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation>Chiave API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRORE: $API_KEY è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation>Inserire $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Se viene calcolato l&apos;hash del file</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation>URL di base</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>ERRORE: $BASE_URL non è valido.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation>inserisci $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation>Nome modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRORE: $MODEL_NAME è vuoto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>inserisci $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation>Modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation>Installa</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation>Installa modello remoto</translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation>Settaggi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>Contiene vari settaggi dell&apos;applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>Applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>Modello</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>Benvenuto!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### Note di rilascio\n%1&lt;br/&gt;\n### Contributori\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>Note di rilascio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>Note di rilascio per questa versione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### Abilitazioni per analisi di utilizzo anonime e datalake\nAbilitando questa funzionalità, potrai partecipare al processo democratico di addestramento di un modello linguistico di grandi dimensioni fornendo dati per futuri miglioramenti del modello.\n\nQuando un modello di GPT4All ti risponde e tu hai aderito, la tua conversazione verrà inviata al Datalake Open Source di GPT4All. Inoltre, puoi mettere mi piace/non mi piace alla sua risposta. Se non ti piace una risposta, puoi suggerirne una alternativa. Questi dati verranno raccolti e aggregati nel Datalake di GPT4All.\n\nNOTA: attivando questa funzione, invierai i tuoi dati al Datalake Open Source di GPT4All. Non dovresti avere aspettative sulla privacy della chat quando questa funzione è abilitata. Dovresti, tuttavia, aspettarti un&apos;attribuzione facoltativa, se lo desideri, . I tuoi dati di chat saranno liberamente disponibili per essere scaricati da chiunque e verranno utilizzati da Nomic AI per migliorare i futuri modelli GPT4All. Nomic AI conserverà tutte le informazioni di attribuzione allegate ai tuoi dati e verrai accreditato come collaboratore a qualsiasi versione del modello GPT4All che utilizza i tuoi dati!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termini per l&apos;adesione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrive cosa accadrà quando effettuerai l&apos;adesione</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation>Acconsenti all&apos;analisi anonima dell&apos;uso per migliorare GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>Attiva le statistiche di utilizzo anonime</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>Si</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>Consenti l&apos;attivazione delle statistiche di utilizzo anonime</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>No</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>Disattiva le statistiche di utilizzo anonime</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>Consenti la disattivazione per le statistiche di utilizzo anonime</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Acconsenti alla condivisione anonima delle chat con il GPT4All Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>Aderisci per la rete</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>Consenti l&apos;adesione per la rete</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Consenti la condivisione anonima delle chat su GPT4All Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>Disattiva per la rete</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Consenti la non adesione alla condivisione anonima delle chat nel GPT4All Datalake</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>Modifica il testo seguente per fornire una risposta migliore. (opzionale)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>Si prega di fornire una risposta migliore...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>Invia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>Invia la risposta dell&apos;utente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>Annulla</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>Chiude la finestra di dialogo della risposta</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;Si è verificato un errore all&apos;avvio:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Rilevato hardware incompatibile.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Sfortunatamente, la tua CPU non soddisfa i requisiti minimi per eseguire questo programma. In particolare, non supporta gli elementi intrinseci AVX richiesti da questo programma per eseguire con successo un modello linguistico moderno e di grandi dimensioni. L&apos;unica soluzione in questo momento è aggiornare il tuo hardware con una CPU più moderna.&lt;br&gt;&lt;br&gt;Vedi qui per ulteriori informazioni: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https ://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation>Ripristina</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation>Esci</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;Si è verificato un errore all&apos;avvio:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Impossibile accedere al file dei settaggi.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Sfortunatamente, qualcosa impedisce al programma di accedere al file dei settaggi. Ciò potrebbe essere causato da autorizzazioni errate nella cartella di configurazione locale dell&apos;app in cui si trova il file dei settaggi. Dai un&apos;occhiata al nostro &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canale Discord&lt;/a&gt; per ricevere assistenza.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>La connessione al Datalake non è riuscita.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>Salvataggio delle chat.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>Dialogo di rete</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>aderisci per condividere feedback/conversazioni</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>Vista iniziale</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>Vista iniziale dell&apos;applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>Inizia</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>Vista chat</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>Vista chat per interagire con i modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>Modelli</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>Vista modelli per i modelli installati</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>Vista LocalDocs per configurare e utilizzare i documenti locali</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>Settaggi</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>Vista dei settaggi per la configurazione dell&apos;applicazione</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>Il Datalake è abilitato</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>Utilizzando un modello di rete</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>La modalità server è abilitata</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>Modelli installati</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>Vista dei modelli installati</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_pt_BR.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"pt_BR\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← Minhas coleções</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>Adicionar Coleção de Documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>Adicione uma pasta contendo arquivos de texto simples, PDFs ou Markdown. Configure extensões adicionais nas Configurações.</translation>\n    </message>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Escolha um diretório</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>Nome</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>Nome da coleção...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>Nome da coleção (obrigatório)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>Pasta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>Caminho da pasta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>Caminho da pasta com os documentos (obrigatório)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>Procurar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>Criar Coleção</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"unfinished\">Erro de rede: não foi possível obter %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"unfinished\">xibido enquanto os modelos estão sendo carregados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">Arquivo do modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">Arquivo do modelo a ser baixado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">Descrição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">Descrição do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">Retomar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">Baixar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">Parar/reiniciar/iniciar o download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">Remover</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"obsolete\">Instalar</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"obsolete\">Instalar modelo online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Erro&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">Mostra o progresso do download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">Velocidade de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">Velocidade de download em bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"obsolete\">inserir $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"obsolete\">ERRO: A $BASE_URL está vazia.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"obsolete\">inserir a $BASE_URL</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"obsolete\">inserir o $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">Tamanho do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation type=\"unfinished\">RAM necessária</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation type=\"unfinished\">%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation type=\"unfinished\">?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation type=\"unfinished\">Parâmetros</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"unfinished\">Pesquisar modelos...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"unfinished\">Campo de texto para descobrir e filtrar modelos para download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation type=\"unfinished\">Pesquisando · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"unfinished\">Pesquisar e filtrar modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"unfinished\">Aciona a descoberta e filtragem de modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation type=\"unfinished\">Padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation type=\"unfinished\">Curtidas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation type=\"unfinished\">Downloads</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation type=\"unfinished\">Recentes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation type=\"unfinished\">Ordenar por: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation type=\"unfinished\">Asc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation type=\"unfinished\">Desc</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation type=\"unfinished\">Ordenar diretório: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation type=\"unfinished\">Nenhum</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation type=\"unfinished\">Limite: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">Arquivo do modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">Arquivo do modelo a ser baixado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">Descrição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">Descrição do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">Retomar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">Baixar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">Parar/reiniciar/iniciar o download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">Remover</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation type=\"unfinished\">Instalar modelo online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Erro&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">Mostra o progresso do download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">Velocidade de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">Velocidade de download em bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">inserir $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\">ERRO: A $BASE_URL está vazia.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">inserir a $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">inserir o $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">Tamanho do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← Meus Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>Descobrir Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"vanished\">Pesquisar modelos...</translation>\n    </message>\n    <message>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"vanished\">Campo de texto para descobrir e filtrar modelos para download</translation>\n    </message>\n    <message>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"vanished\">Pesquisar e filtrar modelos</translation>\n    </message>\n    <message>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"vanished\">Aciona a descoberta e filtragem de modelos</translation>\n    </message>\n    <message>\n        <source>Default</source>\n        <translation type=\"vanished\">Padrão</translation>\n    </message>\n    <message>\n        <source>Likes</source>\n        <translation type=\"vanished\">Curtidas</translation>\n    </message>\n    <message>\n        <source>Downloads</source>\n        <translation type=\"vanished\">Downloads</translation>\n    </message>\n    <message>\n        <source>Recent</source>\n        <translation type=\"vanished\">Recentes</translation>\n    </message>\n    <message>\n        <source>Asc</source>\n        <translation type=\"vanished\">Asc</translation>\n    </message>\n    <message>\n        <source>Desc</source>\n        <translation type=\"vanished\">Desc</translation>\n    </message>\n    <message>\n        <source>None</source>\n        <translation type=\"vanished\">Nenhum</translation>\n    </message>\n    <message>\n        <source>Searching · %1</source>\n        <translation type=\"vanished\">Pesquisando · %1</translation>\n    </message>\n    <message>\n        <source>Sort by: %1</source>\n        <translation type=\"vanished\">Ordenar por: %1</translation>\n    </message>\n    <message>\n        <source>Sort dir: %1</source>\n        <translation type=\"vanished\">Ordenar diretório: %1</translation>\n    </message>\n    <message>\n        <source>Limit: %1</source>\n        <translation type=\"vanished\">Limite: %1</translation>\n    </message>\n    <message>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"vanished\">Erro de rede: não foi possível obter %1</translation>\n    </message>\n    <message>\n        <source>Busy indicator</source>\n        <translation type=\"vanished\">Indicador de processamento</translation>\n    </message>\n    <message>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"vanished\">xibido enquanto os modelos estão sendo carregados</translation>\n    </message>\n    <message>\n        <source>Model file</source>\n        <translation type=\"vanished\">Arquivo do modelo</translation>\n    </message>\n    <message>\n        <source>Model file to be downloaded</source>\n        <translation type=\"vanished\">Arquivo do modelo a ser baixado</translation>\n    </message>\n    <message>\n        <source>Description</source>\n        <translation type=\"vanished\">Descrição</translation>\n    </message>\n    <message>\n        <source>File description</source>\n        <translation type=\"vanished\">Descrição do arquivo</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Cancelar</translation>\n    </message>\n    <message>\n        <source>Resume</source>\n        <translation type=\"vanished\">Retomar</translation>\n    </message>\n    <message>\n        <source>Download</source>\n        <translation type=\"vanished\">Baixar</translation>\n    </message>\n    <message>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"vanished\">Parar/reiniciar/iniciar o download</translation>\n    </message>\n    <message>\n        <source>Remove</source>\n        <translation type=\"vanished\">Remover</translation>\n    </message>\n    <message>\n        <source>Remove model from filesystem</source>\n        <translation type=\"vanished\">Remover modelo do sistema</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">Instalar</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Instalar modelo online</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ATENÇÃO: Este modelo não é recomendado para seu hardware. Ele exige mais memória (%1 GB) do que seu sistema possui (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">ERRO: A $API_KEY está vazia.</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">ERRO: A $BASE_URL está vazia.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">inserir a $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">ERRO: O $MODEL_NAME está vazio.</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">inserir o $MODEL_NAME</translation>\n    </message>\n    <message>\n        <source>%1 GB</source>\n        <translation type=\"vanished\">%1 GB</translation>\n    </message>\n    <message>\n        <source>?</source>\n        <translation type=\"vanished\">?</translation>\n    </message>\n    <message>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"vanished\">Mostra informações sobre o erro no download</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Erro&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"vanished\">Aviso: Hardware não compatível</translation>\n    </message>\n    <message>\n        <source>Download progressBar</source>\n        <translation type=\"vanished\">Progresso do download</translation>\n    </message>\n    <message>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"vanished\">Mostra o progresso do download</translation>\n    </message>\n    <message>\n        <source>Download speed</source>\n        <translation type=\"vanished\">Velocidade de download</translation>\n    </message>\n    <message>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"vanished\">Velocidade de download em bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <source>Calculating...</source>\n        <translation type=\"vanished\">Calculando...</translation>\n    </message>\n    <message>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"vanished\">Quando o hash do arquivo está sendo calculado</translation>\n    </message>\n    <message>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"vanished\">Exibido durante o cálculo do hash do arquivo</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">inserir $API_KEY</translation>\n    </message>\n    <message>\n        <source>File size</source>\n        <translation type=\"vanished\">Tamanho do arquivo</translation>\n    </message>\n    <message>\n        <source>RAM required</source>\n        <translation type=\"vanished\">RAM necessária</translation>\n    </message>\n    <message>\n        <source>Parameters</source>\n        <translation type=\"vanished\">Parâmetros</translation>\n    </message>\n    <message>\n        <source>Quant</source>\n        <translation type=\"vanished\">Quant</translation>\n    </message>\n    <message>\n        <source>Type</source>\n        <translation type=\"vanished\">Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>Aplicativo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>Mensagens de rede</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>Compartilhar feedback e conversas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>Mensagens de erro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>Configurações</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>Geral</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>Tema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>Esquema de cores.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>Modo Escuro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>Modo Claro</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>ERRO: O sistema de atualização não encontrou a Ferramenta de Manutenção necessária para verificar atualizações!&lt;br&gt;&lt;br&gt;Você instalou este aplicativo usando o instalador online? Se sim, o executável da Ferramenta de Manutenção deve estar localizado um diretório acima de onde este aplicativo está instalado.&lt;br&gt;&lt;br&gt;Se você não conseguir iniciá-lo manualmente, será necessário reinstalar o aplicativo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>Modo escuro (legado)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>Tamanho da Fonte</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>Tamanho do texto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>Pequeno</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>Médio</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>Grande</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>Idioma e Região</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>Selecione seu idioma e região.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>Local do Sistema</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>Processador</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translatorcomment>I chose to use &quot;Processador&quot; instead of &quot;Dispositivo&quot; (Device) or &quot;Dispositivo de Computação&quot; (Compute Device) to simplify the terminology and make it more straightforward and understandable. &quot;Dispositivo&quot; can be vague and could refer to various types of hardware, whereas &quot;Processador&quot; clearly and specifically indicates the component responsible for processing tasks. This improves usability by avoiding the ambiguity that might arise from using more generic terms like &quot;Dispositivo.&quot;</translatorcomment>\n        <translation>Processador usado para gerar texto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>Aplicativo padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>Modelo Padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>Modelo padrão para novos chats e em caso de falha do modelo principal.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>Modo de sugestões</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>Sugerir perguntas após as respostas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>Ao conversar com o LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>Sempre que possível</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>Nunca</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>Diretório de Download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>Pasta para modelos e banco de dados do LocalDocs.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>Procurar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>Local para armazenar os modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>Habilitar Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>Contribua para o Datalake de código aberto do GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>Avançado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation>Threads de CPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>Quantidade de núcleos (threads) do processador usados para processar e responder às suas perguntas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Save Chat Context</source>\n        <translatorcomment>I used &quot;Histórico do Chat&quot; (Chat History) instead of &quot;Contexto do Chat&quot; (Chat Context) to clearly convey that it refers to saving past messages, making it more intuitive and avoiding potential confusion with abstract terms.</translatorcomment>\n        <translation type=\"vanished\">Salvar Histórico do Chat</translation>\n    </message>\n    <message>\n        <source>Save the chat model&apos;s state to disk for faster loading. WARNING: Uses ~2GB per chat.</source>\n        <translation type=\"vanished\">Salvar histórico do chat para carregamento mais rápido. (Usa aprox. 2GB por chat).</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>Ativar servidor de API local</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>Ativar servidor local compatível com OpenAI (uso de recursos elevado).</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>Porta da API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>Porta de acesso ao servidor local. (requer reinicialização).</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>Procurar por Atualizações</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>Verifica se há novas atualizações para o GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>Atualizações</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>Novo Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>Chat com o Servidor</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>ERRO: Ocorreu um erro de rede ao conectar-se ao servidor da API</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished recebeu erro HTTP %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>Menu Lateral</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>Menu de navegação principal</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ Novo Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>Criar um novo chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>Selecione o chat atual ou edite o chat quando estiver no modo de edição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>Editar nome do chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>Salvar nome do chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>Excluir chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>Confirmar exclusão do chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>Cancelar exclusão do chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>Lista de chats</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>Lista de chats na caixa de diálogo do menu lateral</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation type=\"unfinished\">Você</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation type=\"unfinished\">resposta interrompida...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"unfinished\">Recuperando dados em LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"unfinished\">Buscando em LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation type=\"unfinished\">processando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation type=\"unfinished\">gerando resposta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation type=\"unfinished\">gerando perguntas...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">Copiar</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"obsolete\">Copiar Mensagem</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"obsolete\">Desativar markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"obsolete\">Ativar markdown</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation type=\"unfinished\">\n            <numerusform>%n Origem</numerusform>\n            <numerusform>%n Origens</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\">LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation type=\"unfinished\">Perguntas relacionadas</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>HOJE</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>ESTA SEMANA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>ESTE MÊS</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>ÚLTIMOS SEIS MESES</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>ESTE ANO</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>ANO PASSADO</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">Copiar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\">Copiar Mensagem</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\">Desativar markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\">Ativar markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;Aviso&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <source>Switch model dialog</source>\n        <translation type=\"vanished\">Mensagem ao troca de modelo</translation>\n    </message>\n    <message>\n        <source>Warn the user if they switch models, then context will be erased</source>\n        <translation type=\"vanished\">Ao trocar de modelo, o contexto da conversa será apagado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>Conversa copiada.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>Código copiado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>Painel de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>Painel de chat com opções</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>Recarregar modelo atual</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>Ejetar o modelo carregado atualmente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>Nenhum modelo instalado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>Erro ao carregar o modelo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>Aguardando modelo...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>Mudando de contexto...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>Escolha um modelo...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>Não encontrado: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>O modelo atual é exibido no topo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>Adicionar documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>Adicionar Coleção de Documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>Carregar o modelo padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>Carrega o modelo padrão (personalizável nas configurações)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>Nenhum Modelo Instalado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>O GPT4All precisa de pelo menos um modelo\nmodelo instalado para funcionar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>Instalar um Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra a visualização para adicionar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>Conversa com o modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>Pares de pergunta/resposta da conversa</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>GPT4All</source>\n        <translation type=\"vanished\">GPT4All</translation>\n    </message>\n    <message>\n        <source>You</source>\n        <translation type=\"vanished\">Você</translation>\n    </message>\n    <message>\n        <source>response stopped ...</source>\n        <translation type=\"vanished\">resposta interrompida...</translation>\n    </message>\n    <message>\n        <source>processing ...</source>\n        <translation type=\"vanished\">processando...</translation>\n    </message>\n    <message>\n        <source>generating response ...</source>\n        <translation type=\"vanished\">gerando resposta...</translation>\n    </message>\n    <message>\n        <source>generating questions ...</source>\n        <translation type=\"vanished\">gerando perguntas...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>Copiar</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">Copiar Mensagem</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">Desativar markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">Ativar markdown</translation>\n    </message>\n    <message>\n        <source>Thumbs up</source>\n        <translation type=\"vanished\">Resposta boa</translation>\n    </message>\n    <message>\n        <source>Gives a thumbs up to the response</source>\n        <translation type=\"vanished\">Curte a resposta</translation>\n    </message>\n    <message>\n        <source>Thumbs down</source>\n        <translation type=\"vanished\">Resposta ruim</translation>\n    </message>\n    <message>\n        <source>Opens thumbs down dialog</source>\n        <translation type=\"vanished\">Abrir diálogo de joinha para baixo</translation>\n    </message>\n    <message>\n        <source>Suggested follow-ups</source>\n        <translation type=\"vanished\">Perguntas relacionadas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>Apagar e redefinir sessão de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>Copiar histórico da conversa</translation>\n    </message>\n    <message>\n        <source>Redo last chat response</source>\n        <translation type=\"vanished\">Refazer última resposta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>Adicionar mídia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>Adiciona mídia ao prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>Parar de gerar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>Parar a geração da resposta atual</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>Anexar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>Arquivo Único</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>Recarrega modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;Ocorreu um erro ao carregar o modelo:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Falhas no carregamento do modelo podem acontecer por vários motivos, mas as causas mais comuns incluem um formato de arquivo incorreto, um download incompleto ou corrompido, o tipo de arquivo errado, memória RAM do sistema insuficiente ou um tipo de modelo incompatível. Aqui estão algumas sugestões para resolver o problema:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Certifique-se de que o arquivo do modelo tenha um formato e tipo compatíveis&lt;li&gt;Verifique se o arquivo do modelo está completo na pasta de download&lt;li&gt;Você pode encontrar a pasta de download na caixa de diálogo de configurações&lt;li&gt;Se você carregou o modelo, certifique-se de que o arquivo não esteja corrompido verificando o md5sum&lt;li&gt;Leia mais sobre quais modelos são suportados em nossa &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentação&lt;/a&gt; para a interface gráfica&lt;li&gt;Confira nosso &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canal do Discord&lt;/a&gt; para obter ajuda</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>Recarregar · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>Carregando · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>Carregar · %1 (padrão) →</translation>\n    </message>\n    <message>\n        <source>restoring from text ...</source>\n        <translation type=\"vanished\">Recuperando do texto...</translation>\n    </message>\n    <message>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"vanished\">Recuperando dados em LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"vanished\">Buscando em LocalDocs: %1 ...</translation>\n    </message>\n    <message numerus=\"yes\">\n        <source>%n Source(s)</source>\n        <translation type=\"vanished\">\n            <numerusform>%n Origem</numerusform>\n            <numerusform>%n Origens</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>Enviar uma mensagem...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>Carregue um modelo para continuar...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>Enviar mensagens/prompts para o modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>Recortar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>Colar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>Selecionar tudo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>Enviar mensagem</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>Envia a mensagem/prompt contida no campo de texto para o modelo</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>Aviso: pesquisar coleções durante a indexação pode retornar resultados incompletos</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n arquivo(s)</numerusform>\n            <numerusform>%n arquivo(s)</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n palavra(s)</numerusform>\n            <numerusform>%n palavra(s)</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>Atualizando</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ Adicionar Documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>Selecione uma coleção para disponibilizá-la ao modelo de chat.</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">Cancelar</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>Modelo &quot;%1&quot; instalado com sucesso.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRO: O nome do modelo ($MODEL_NAME) está vazio.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRO: A chave da API ($API_KEY) está vazia.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>ERRO: A URL base ($BASE_URL) é inválida.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>ERRO: Conflito com o modelo &quot;%1 (%2)&quot;.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>Modelo &quot;%1 (%2)&quot; instalado com sucesso.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>Modelo &quot;%1&quot; removido.</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>Bem-vindo ao GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>O aplicativo de chat LLM que prioriza a privacidade</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>Iniciar chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>Iniciar Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>Converse com qualquer LLM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>Converse com seus arquivos locais</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>Encontrar Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>Descubra e baixe modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>Últimas novidades</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>Últimas novidades do GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>Notas de versão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>Documentação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation>Discord</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation>X (Twitter)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation>Github</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>Assine nossa Newsletter</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>Configurações do LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>Indexação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>Extensões de Arquivo Permitidas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>Lista separada por vírgulas. O LocalDocs tentará processar apenas arquivos com essas extensões.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation>Incorporação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>Usar a API Nomic Embed</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>Incorporar documentos usando a API Nomic rápida em vez de um modelo local privado. Requer reinicialização.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Chave da API Nomic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>Chave da API a ser usada para Nomic Embed. Obtenha uma na página de &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;chaves de API do Atlas&lt;/a&gt;. Requer reinicialização.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>Processamento de Incorporações</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>Dispositivo usado para processar as incorporações. Requer reinicialização.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>Aplicativo padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>Exibir</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>Mostrar Fontes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>Mostra as fontes usadas para cada resposta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>Apenas para usuários avançados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>Atenção: Apenas para usuários avançados.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>Valores muito altos podem causar falhas no LocalDocs, respostas extremamente lentas ou até mesmo nenhuma resposta. De forma geral, o valor {Número de Caracteres x Número de Trechos} é adicionado à janela de contexto do modelo. Clique &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;aqui&lt;/a&gt; para mais informações.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translatorcomment>I translated &quot;snippet&quot; as &quot;trecho&quot; to make the term feel more natural and understandable in Portuguese. &quot;Trecho&quot; effectively conveys the idea of a portion or section of a document, fitting well within the context, whereas a more literal translation might sound less intuitive or awkward for users.</translatorcomment>\n        <translation>Tamanho do trecho de documento (caracteres)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Número de caracteres por trecho de documento. Valores maiores aumentam a chance de respostas factuais, mas também tornam a geração mais lenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>Máximo de Trechos de Documento por Prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Número máximo de trechos de documentos a serem adicionados ao contexto do prompt. Valores maiores aumentam a chance de respostas factuais, mas também tornam a geração mais lenta.</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>Converse com seus arquivos locais</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ Adicionar Coleção</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>&lt;h3&gt;ERRO: Não foi possível acessar o banco de dados do LocalDocs ou ele não é válido.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Observação: Será necessário reiniciar o aplicativo após tentar qualquer uma das seguintes correções sugeridas.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Certifique-se de que a pasta definida como &lt;b&gt;Caminho de Download&lt;/b&gt; existe no sistema de arquivos.&lt;/li&gt;&lt;li&gt;Verifique a propriedade, bem como as permissões de leitura e gravação do &lt;b&gt;Caminho de Download&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;Se houver um arquivo &lt;b&gt;localdocs_v2.db&lt;/b&gt;, verifique também sua propriedade e permissões de leitura/gravação.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;Se o problema persistir e houver algum arquivo &apos;localdocs_v*.db&apos; presente, como último recurso, você pode&lt;br&gt;tentar fazer backup deles e removê-los. No entanto, você terá que recriar suas coleções.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>Nenhuma Coleção Instalada</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>Instale uma coleção de documentos locais para começar a usar este recurso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋ Adicionar Coleção de Documentos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra a visualização para adicionar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>Barra de progresso de indexação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>Mostra o progresso da indexação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>ERRO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>INDEXANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>INCORPORANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>REQUER ATUALIZAÇÃO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>PRONTO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>INSTALANDO</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>Indexação em andamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>Incorporação em andamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>Esta coleção precisa ser atualizada após a mudança de versão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>Reindexa automaticamente após alterações na pasta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>Instalação em andamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n arquivo(s)</numerusform>\n            <numerusform>%n arquivo(s)</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n palavra(s)</numerusform>\n            <numerusform>%n palavra(s)</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>Remover</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>Reconstruir</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>eindexar pasta do zero. Lento e geralmente desnecessário.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>Atualizar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>Atualizar coleção para nova versão. Pode demorar.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;É necessária uma chave de API da OpenAI.&lt;/li&gt;&lt;li&gt;AVISO: Seus chats serão enviados para a OpenAI!&lt;/li&gt;&lt;li&gt;Sua chave de API será armazenada localmente&lt;/li&gt;&lt;li&gt;Ela será usada apenas para comunicação com a OpenAI&lt;/li&gt;&lt;li&gt;Você pode solicitar uma chave de API &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;aqui.&lt;/a&gt;&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo ChatGPT GPT-3.5 Turbo da OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation>&lt;strong&gt;Modelo ChatGPT GPT-4 da OpenAI&lt;/strong&gt;&lt;br&gt; %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Tiny&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Small&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelo Mistral Medium&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Mesmo que você pague pelo ChatGPT-4 da OpenAI, isso não garante acesso à chave de API. Contate a OpenAI para mais informações.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>não é possível abrir &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>não é possível criar &quot;%1&quot;: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1 (%2)</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Modelo de API Compatível com OpenAI&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Chave da API: %1&lt;/li&gt;&lt;li&gt;URL Base: %2&lt;/li&gt;&lt;li&gt;Nome do Modelo: %3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;É necessária uma chave de API da Mistral.&lt;/li&gt;&lt;li&gt;AVISO: Seus chats serão enviados para a Mistral!&lt;/li&gt;&lt;li&gt;Sua chave de API será armazenada localmente&lt;/li&gt;&lt;li&gt;Ela será usada apenas para comunicação com a Mistral&lt;/li&gt;&lt;li&gt;Você pode solicitar uma chave de API &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;aqui&lt;/a&gt;.&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;É necessária uma chave de API e a URL da API.&lt;/li&gt;&lt;li&gt;AVISO: Seus chats serão enviados para o servidor de API compatível com OpenAI que você especificou!&lt;/li&gt;&lt;li&gt;Sua chave de API será armazenada no disco&lt;/li&gt;&lt;li&gt;Será usada apenas para comunicação com o servidor de API compatível com OpenAI&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Conectar a um servidor de API compatível com OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Criado por %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Publicado em %2.&lt;li&gt;Este modelo tem %3 curtidas.&lt;li&gt;Este modelo tem %4 downloads.&lt;li&gt;Mais informações podem ser encontradas &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;aqui.&lt;/a&gt;&lt;/ul&gt;</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>Configurações do Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>Clonar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>Remover</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>Nome</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>Arquivo do Modelo</translation>\n    </message>\n    <message>\n        <source>System Prompt</source>\n        <translation type=\"vanished\">Prompt do Sistema</translation>\n    </message>\n    <message>\n        <source>Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens.</source>\n        <translation type=\"vanished\">Prefixado no início de cada conversa. Deve conter os tokens de enquadramento apropriados.</translation>\n    </message>\n    <message>\n        <source>Prompt Template</source>\n        <translation type=\"vanished\">Modelo de Prompt</translation>\n    </message>\n    <message>\n        <source>The template that wraps every prompt.</source>\n        <translation type=\"vanished\">Modelo para cada prompt.</translation>\n    </message>\n    <message>\n        <source>Must contain the string &quot;%1&quot; to be replaced with the user&apos;s input.</source>\n        <translation type=\"vanished\">Deve incluir &quot;%1&quot; para a entrada do usuário.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>Prompt para Nome do Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>Prompt usado para gerar automaticamente nomes de chats.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>Prompt de Sugestão de Acompanhamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>Prompt usado para gerar sugestões de perguntas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>Tamanho do Contexto</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>Tamanho da Janela de Contexto.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Máximo de tokens combinados (prompt + resposta) antes da perda de informações.\nUsar mais contexto do que o modelo foi treinado pode gerar resultados ruins.\nObs.: Só entrará em vigor após recarregar o modelo.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>Temperatura</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>Aleatoriedade das respostas. Quanto maior, mais variadas.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>Aumenta a chance de escolher tokens menos prováveis.\nObs.: Uma temperatura mais alta gera resultados mais criativos, mas menos previsíveis.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation>Top-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>Amostragem por núcleo. Menor valor, respostas mais previsíveis.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>Apenas tokens com probabilidade total até o valor de top_p serão escolhidos.\nObs.: Evita tokens muito improváveis.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation>Min-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>Probabilidade mínima do token. Quanto maior -&gt; mais previsível.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>Define a probabilidade relativa mínima para um token ser considerado.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation>Top-K</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>Número de tokens considerados na amostragem.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>Serão escolhidos apenas os K tokens mais prováveis.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>Comprimento Máximo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>Comprimento máximo da resposta, em tokens.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>Tamanho do Lote de Processamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>Tokens processados por lote.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>Quantidade de tokens de prompt para processar de uma vez.\nOBS.: Valores mais altos podem acelerar a leitura dos prompts, mas usarão mais RAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>Penalidade de Repetição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>Penalidade de Repetição (1 para desativar).</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>Tokens para penalizar repetição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>Número de tokens anteriores usados para penalidade.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>Camadas na GPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>Camadas Carregadas na GPU.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Número de camadas do modelo carregadas na VRAM. Diminua se faltar VRAM ao carregar o modelo.\n Valores menores aumentam o uso de CPU e RAM, e deixam a inferência mais lenta.\nObs.: Só entrará em vigor após recarregar o modelo.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>Nenhum Modelo Instalado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>Instale um modelo para começar a usar o GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ Adicionar Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>Mostra a visualização para adicionar modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>Modelos Instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>Modelos de chat instalados localmente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>Arquivo do modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>Arquivo do modelo a ser baixado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>Descrição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>Descrição do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>Retomar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Parar/reiniciar/iniciar o download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>Remover</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Remover modelo do sistema de arquivos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>Instalar modelo online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Erro&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;AVISO: Não recomendado para seu hardware. O modelo requer mais memória (%1 GB) do que seu sistema tem disponível (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>ERRO: A $API_KEY está vazia.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>ERRO: A $BASE_URL está vazia.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>inserir a $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>ERRO: O $MODEL_NAME está vazio.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>inserir o $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation>?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descreve um erro que ocorreu durante o download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Erro para hardware incompatível</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>Barra de progresso do download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Mostra o progresso do download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>Velocidade de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Velocidade de download em bytes/kilobytes/megabytes por segundo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>Calculando...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Se o hash do arquivo está sendo calculado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation>Indicador de ocupado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Exibido quando o hash do arquivo está sendo calculado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>inserir $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>Tamanho do arquivo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>RAM necessária</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>Parâmetros</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>Quant</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>Tipo</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>Link personalizado</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>Um link personalizado</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>Por favor escolha um arquivo</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>Escolha um diretório</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsStack</name>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Escolha um diretório</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>Restaurar Configurações Padrão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>Restaura as configurações para o estado padrão</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>Contribuir com dados para o Datalake de código aberto GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>Ao habilitar este recurso, você poderá participar do processo democrático de treinamento de um grande modelo de linguagem, contribuindo com dados para futuras melhorias do modelo.\n\nQuando um modelo GPT4All responder a você e você tiver optado por participar, sua conversa será enviada para o Datalake de Código Aberto do GPT4All. Além disso, você pode curtir/não curtir a resposta. Se você não gostar de uma resposta, pode sugerir uma resposta alternativa. Esses dados serão coletados e agregados no Datalake do GPT4All.\n\nOBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake de Código Aberto do GPT4All. Você não deve ter nenhuma expectativa de privacidade no chat quando este recurso estiver ativado. No entanto, você deve ter a expectativa de uma atribuição opcional, se desejar. Seus dados de chat estarão disponíveis para qualquer pessoa baixar e serão usados pela Nomic AI para melhorar os futuros modelos GPT4All. A Nomic AI manterá todas as informações de atribuição anexadas aos seus dados e você será creditado como colaborador em qualquer versão do modelo GPT4All que utilize seus dados!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termos de participação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrição do que acontece ao participar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>Forneça um nome para atribuição (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>Atribuição (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>Fornecer atribuição</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>Habilitar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>Ativar participação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>Cancelar participação</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>Atualização disponível</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>Atualizar agora</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>Baixa e instala a última versão do GPT4All</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>Exibe uma dica rápida</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translatorcomment>The literal translation of &quot;busy indicator&quot; as &quot;indicador de ocupado&quot; might create ambiguity in Portuguese, as it doesn&apos;t clearly convey whether the system is processing something or simply unavailable. &quot;Progresso&quot; (progress) was chosen to more clearly indicate that an activity is in progress and that the user should wait for its completion.</translatorcomment>\n        <translation>Indicador de progresso</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>Visível durante o processamento</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">inserir $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\">ERRO: A $BASE_URL está vazia.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">inserir a $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">inserir o $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\">Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">Instalar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translatorcomment>I used &quot;Config&quot; instead of &quot;Configurações&quot; to keep the UI concise and visually balanced. &quot;Config&quot; is a widely recognized abbreviation that maintains clarity while saving space, making the interface cleaner and more user-friendly, especially in areas with limited space.</translatorcomment>\n        <translation>Config</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>Acessar as configurações do aplicativo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>Aplicativo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>Modelo</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>Bem-vindo(a)!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### Notas de lançamento\n%1&lt;br/&gt;\n### Colaboradores\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>Notas de lançamento</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>Notas de lançamento desta versão</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### Opções para análise de uso anônimo e banco de dados\nAo habilitar esses recursos, você poderá participar do processo democrático de treinamento de um\ngrande modelo de linguagem, contribuindo com dados para futuras melhorias do modelo.\n\nQuando um modelo GPT4All responder a você e você tiver optado por participar, sua conversa será enviada para o Datalake de\nCódigo Aberto do GPT4All. Além disso, você pode curtir/não curtir a resposta. Se você não gostar de uma resposta,\npode sugerir uma resposta alternativa. Esses dados serão coletados e agregados no Datalake do GPT4All.\n\nOBS.: Ao ativar este recurso, você estará enviando seus dados para o Datalake de Código Aberto do GPT4All.\nVocê não deve ter nenhuma expectativa de privacidade no chat quando este recurso estiver ativado. No entanto,\nvocê deve ter a expectativa de uma atribuição opcional, se desejar. Seus dados de chat estarão disponíveis para\nqualquer pessoa baixar e serão usados pela Nomic AI para melhorar os futuros modelos GPT4All. A Nomic AI manterá\ntodas as informações de atribuição anexadas aos seus dados e você será creditado como colaborador em qualquer\nversão do modelo GPT4All que utilize seus dados!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termos de participação</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrição do que acontece ao participar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>Enviar estatísticas de uso anônimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>Sim</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>Permitir o envio de estatísticas de uso anônimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>Não</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>Recusar envio de estatísticas de uso anônimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>Permitir recusar envio de estatísticas de uso anônimas</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>Aceitar na rede</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>Permitir aceitação na rede</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permitir compartilhamento anônimo de chats no Datalake GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>Recusar na rede</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permitir recusar compartilhamento anônimo de chats no Datalake GPT4All</translation>\n    </message>\n</context>\n<context>\n    <name>SwitchModelDialog</name>\n    <message>\n        <source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source>\n        <translation type=\"vanished\">&lt;b&gt;Atenção:&lt;/b&gt; Ao trocar o modelo a conversa atual será perdida. Continuar?</translation>\n    </message>\n    <message>\n        <source>Continue</source>\n        <translation type=\"vanished\">Continuar</translation>\n    </message>\n    <message>\n        <source>Continue with model loading</source>\n        <translation type=\"vanished\">Confirma a troca do modelo</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Cancelar</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>Editar resposta (opcional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>Digite sua resposta...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>Enviar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>Enviar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>Cancelar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>Fecha a caixa de diálogo de resposta</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;Ocorreu um erro ao iniciar:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Hardware incompatível detectado.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Infelizmente, seu processador não atende aos requisitos mínimos para executar este programa. Especificamente, ele não possui suporte às instruções AVX, que são necessárias para executar modelos de linguagem grandes e modernos. A única solução, no momento, é atualizar seu hardware para um processador mais recente.&lt;br&gt;&lt;br&gt;Para mais informações, consulte: &lt;a href=&quot;https://pt.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://pt.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation>GPT4All v%1</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;Ocorreu um erro ao iniciar:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Não foi possível acessar o arquivo de configurações.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Infelizmente, algo está impedindo o programa de acessar o arquivo de configurações. Isso pode acontecer devido a permissões incorretas na pasta de configurações do aplicativo. Para obter ajuda, acesse nosso &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canal no Discord&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>Falha na conexão com o datalake.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>Salvando chats.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>Avisos de rede</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>permitir compartilhamento de feedback/conversas</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>Tela inicial</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>Tela inicial do aplicativo</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>Início</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>Visualização do Chat</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>Visualização do chat para interagir com os modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>Chats</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>Modelos</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>Tela de modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>Tela de configuração e uso de documentos locais do LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>Config</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>Tela de configurações do aplicativo</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>O datalake está ativado</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>Usando um modelo de rede</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>Modo servidor ativado</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>Modelos instalados</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>Exibe os modelos instalados</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_ro_RO.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"ro_RO\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← Colecţiile curente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>Adaugă o Colecţie de documente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>Adaugă un folder cu fişiere în format text, PDF sau Markdown. Alte extensii pot fi specificate în Configurare.</translation>\n    </message>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Selectează un director/folder</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>Denumire</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>Denumirea Colecţiei...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>Denumirea Colecţiei de adăugat (necesar)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>Folder</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>Calea spre folder...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>Calea spre documente (necesar)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>Căutare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>Creează Colecţia</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation>Aceste modele au fost configurate special pentru utilizarea în GPT4All. Primele câteva modele din listă sunt cunoscute ca fiind cele mai bune, dar ar trebui să încercați să utilizați doar modele care se încadrează în RAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation>Eroare de reţea: nu se poate prelua %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation>Indicator de activitate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation>Afişat în timpul solicitării modelului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation>Fişierul modelului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation>Fişierul modelului ce va fi descărcat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation>Descriere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation>Descrierea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation>Continuare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation>Download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Oprirea/Repornirea/Iniţierea descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation>Şterg</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Şterg modelul din sistemul de fişiere</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">Instalare</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Instalez un model din online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Eroare&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrie eroarea apărută în timpul descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ATENŢIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are acest sistem (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Eroare: hardware incompatibil</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation>Progresia descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Afişează progresia descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation>Viteza de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Viteza de download în bytes/kilobytes/megabytes pe secundă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation>Calculare...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Dacă se calculează hash-ul fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Se afişează când se calculează hash-ul fişierului</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">EROARE: $API_KEY absentă.</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">introdu cheia $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">EROARE: $BASE_URL absentă.</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">introdu $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">EROARE: $MODEL_NAME absent</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">introdu $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation>Dimensiunea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation>RAM necesară</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation>?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation>Parametri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation>Quant(ificare)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation>Tip</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation>Utilizați funcția de căutare pentru a găsi și descărca modele de pe HuggingFace. NU E GARANTAT că acestea vor funcționa. Multe dintre ele vor necesita configurări suplimentare înainte de a putea fi utilizate.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation>Caută şi descarcă modele după un cuvânt-cheie...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation>Câmp pentru căutarea şi filtrarea modelelor ce pot fi descărcate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation>Căutare · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation>Iniţiază căutarea şi filtrarea modelelor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation>Activează căutarea şi filtrarea modelelor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation>Implicit</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation>Likes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation>Download-uri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation>Recent/e</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation>Ordonare după: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation>Asc. (A-&gt;Z)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation>Desc. (Z-&gt;A)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation>Sensul ordonării: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation>Niciunul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation>Límită: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation>Fişierul modelului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation>Fişierul modelului ce va fi descărcat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation>Descriere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation>Descrierea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation>Continuare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation>Download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Oprirea/Repornirea/Iniţierea descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation>Şterg</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Şterg modelul din sistemul de fişiere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation>Instalare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation>Instalez un model din online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Eroare&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrie o eroare aparută la download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ATENŢIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are acest sistem (%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Eroare - hardware incompatibil</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation>Bara de progresie a descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Afişează progresia descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation>Viteza de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Viteza de download în bytes/kilobytes/megabytes pe secundă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation>Calculare...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Dacă se calculează hash-ul fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation>Indicator de activitate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Afişat la calcularea hash-ului fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>EROARE: $API_KEY absentă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation>introdu cheia $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>EROARE: $BASE_URL absentă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation>introdu $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>introdu $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation>Dimensiunea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation>Quant(ificare)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation>Tip</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← Modelele curente/instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>Caută modele</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation>GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation>HuggingFace</translation>\n    </message>\n    <message>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"vanished\">Caută şi descarcă modele după un cuvânt-cheie...</translation>\n    </message>\n    <message>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"vanished\">Câmp pentru căutarea şi filtrarea modelelor ce pot fi descărcate</translation>\n    </message>\n    <message>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"vanished\">Iniţiază căutarea şi filtrarea modelelor</translation>\n    </message>\n    <message>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"vanished\">Activează căutarea şi filtrarea modelelor</translation>\n    </message>\n    <message>\n        <source>Default</source>\n        <translation type=\"vanished\">Implicit</translation>\n    </message>\n    <message>\n        <source>Likes</source>\n        <translation type=\"vanished\">Likes</translation>\n    </message>\n    <message>\n        <source>Downloads</source>\n        <translation type=\"vanished\">Download-uri</translation>\n    </message>\n    <message>\n        <source>Recent</source>\n        <translation type=\"vanished\">Recent/e</translation>\n    </message>\n    <message>\n        <source>Asc</source>\n        <translation type=\"vanished\">Asc. (A-&gt;Z)</translation>\n    </message>\n    <message>\n        <source>Desc</source>\n        <translation type=\"vanished\">Desc. (Z-&gt;A)</translation>\n    </message>\n    <message>\n        <source>None</source>\n        <translation type=\"vanished\">Niciunul</translation>\n    </message>\n    <message>\n        <source>Searching · %1</source>\n        <translation type=\"vanished\">Căutare · %1</translation>\n    </message>\n    <message>\n        <source>Sort by: %1</source>\n        <translation type=\"vanished\">Ordonare după: %1</translation>\n    </message>\n    <message>\n        <source>Sort dir: %1</source>\n        <translation type=\"vanished\">Sensul ordonării: %1</translation>\n    </message>\n    <message>\n        <source>Limit: %1</source>\n        <translation type=\"vanished\">Límită: %1</translation>\n    </message>\n    <message>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"vanished\">Eroare de reţea: nu se poate prelua %1</translation>\n    </message>\n    <message>\n        <source>Busy indicator</source>\n        <translation type=\"vanished\">Indicator de activitate</translation>\n    </message>\n    <message>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"vanished\">Afişat în timpul solicitării modelului</translation>\n    </message>\n    <message>\n        <source>Model file</source>\n        <translation type=\"vanished\">Fişierul modelului</translation>\n    </message>\n    <message>\n        <source>Model file to be downloaded</source>\n        <translation type=\"vanished\">Fişierul modelului de descărcat</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">Instalez un model din online</translation>\n    </message>\n    <message>\n        <source>%1 GB</source>\n        <translation type=\"vanished\">%1 GB</translation>\n    </message>\n    <message>\n        <source>?</source>\n        <translation type=\"vanished\">?</translation>\n    </message>\n    <message>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"vanished\">Afişează progresia descărcării</translation>\n    </message>\n    <message>\n        <source>Download speed</source>\n        <translation type=\"vanished\">Viteza de download</translation>\n    </message>\n    <message>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"vanished\">Viteza de download în bytes/kilobytes/megabytes pe secundă</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">introdu cheia $API_KEY</translation>\n    </message>\n    <message>\n        <source>File size</source>\n        <translation type=\"vanished\">Dimensiunea fişierului</translation>\n    </message>\n    <message>\n        <source>RAM required</source>\n        <translation type=\"vanished\">RAM necesară</translation>\n    </message>\n    <message>\n        <source>Parameters</source>\n        <translation type=\"vanished\">Parametri</translation>\n    </message>\n    <message>\n        <source>Quant</source>\n        <translation type=\"vanished\">Quant(ificare)</translation>\n    </message>\n    <message>\n        <source>Type</source>\n        <translation type=\"vanished\">Tip</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>Program</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>Reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>optional: partajarea (share) de comentarii/conversaţii</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>EROARE: Sistemul de Update nu poate găsi componenta MaintenanceTool necesară căutării de versiuni noi!&lt;br&gt;&lt;br&gt; Ai instalat acest program folosind kitul online? Dacă da, atunci MaintenanceTool trebuie să fie un nivel mai sus de folderul unde ai instalat programul.&lt;br&gt;&lt;br&gt; Dacă nu poate fi lansată manual, atunci programul trebuie reinstalat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>Eroare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>Configurarea programului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>General</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>Tema pentru interfaţă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>Schema de culori a programului.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>Întunecat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>Luminos</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>Întunecat-vechi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>Dimensiunea textului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>Dimensiunea textului în program.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>Dispozitiv/Device</translation>\n    </message>\n    <message>\n        <source>The compute device used for text generation. &quot;Auto&quot; uses Vulkan or\n                Metal.</source>\n        <translation type=\"vanished\">Dispozitivul de calcul utilizat pentru generarea de text. &quot;Auto&quot; apelează la Vulkan sau la Metal.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>Mic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>Mediu</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>Mare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>Limbă şi Localizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>Limba şi Localizarea de utilizat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>Localizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation>Dispozitivul de calcul utilizat pentru generarea de text.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>Implicit</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>Modelul implicit</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>Modelul preferat pentru noile conversaţii. Va fi folosit drept rezervă pentru serverul local.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>Modul de sugerare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>Generarea de întrebări în continuarea replicilor.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>Când se discută cu LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>Oricând e posibil</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>Niciodată</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>Calea pentru download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>Unde să fie plasate modelele şi baza de date LocalDocs.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>Căutare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>Selectează locul unde vor fi plasate fişierele modelelor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>Activează DataLake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>Trimite conversaţii şi comentarii către componenta Open-source DataLake a GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>Avansate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation>Thread-uri CPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>Numărul de thread-uri CPU utilizate pentru inferenţă şi embedding.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation>Trimit pe SysTray (pe bara)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation>Programul va fi minimizat pe bara de jos</translation>\n    </message>\n    <message>\n        <source>Save Chat Context</source>\n        <translation type=\"vanished\">Salvarea contextului conversaţiei</translation>\n    </message>\n    <message>\n        <source>Save the chat model&apos;s state to disk for faster loading. WARNING: Uses ~2GB per chat.</source>\n        <translation type=\"vanished\">Salvează pe disc starea modelului pentru încărcare mai rapidă. ATENŢIE: Consumă ~2GB/conversaţie.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>Activez Serverul API local</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>Activează pe localhost un Server compatibil cu OpenAI. ATENŢIE: Creşte consumul de resurse.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>Portul Serverului API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>Portul utilizat pentru Serverul local. Necesită repornirea programului.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>Caută update-uri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>Caută manual update-uri pentru GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>Update-uri/Actualizări</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>Conversaţie Nouă</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>Conversaţie cu Serverul</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>EROARE: Eroare de reţea - conectarea la serverul API</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished - eroare: HTTP Error %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>Sertar</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>Sertarul principal de navigare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ Conversaţie nouă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>Creează o Conversaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>Selectează conversaţia curentă sau editeaz-o în modul editare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>Editează denumirea conversaţiei</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>Salvează denumirea conversaţiei</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>Şterge conversaţia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>CONFIRM ştergerea conversaţiei</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>ANULEZ ştergerea conversaţiei</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>Lista conversaţiilor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>Lista conversaţiilor în secţiunea-sertar</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation>GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation>Tu</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation>replică întreruptă...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation>se preia din LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation>se caută în LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation>procesare...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation>se generează replica...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation>se generează întrebări...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation>Copiere</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">Copiez mesajul</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">Dezactivez markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">Activez markdown</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation>\n            <numerusform>%n Sursa</numerusform>\n            <numerusform>%n Surse</numerusform>\n            <numerusform>%n de Surse</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation>Editez mesajul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation>Toate aceste mesajevor fi şterse</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation>Refă raspunsul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation>Nu se poate edita conversaţia fără un model incărcat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation>Nu se poate edita conversaţia când un model generează text</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation>Editare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation>Nu se poate reface un răspuns fără un model incărcat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation>Nu se poate reface un răspuns când un model generează text</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation>Refacere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation>Imi Place râspunsul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation>NU Îmi Place râspunsul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation>Continuări sugerate</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation>Mesajul tău e prea lung şi nu poate fi procesat.  (%1 &gt; %2). Încearca iar cu un mesaj mai scurt</translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>ASTĂZI</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>SĂPTĂMÂNA ACEASTA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>LUNA ACEASTA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>ULTIMELE ŞASE LUNI</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>ANUL ACESTA</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>ANUL TRECUT</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">Copiere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\">Copiez mesajul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\">Dezactivez markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\">Activez markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;Atenţie&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <source>Switch model dialog</source>\n        <translation type=\"vanished\">Schimbarea modelului</translation>\n    </message>\n    <message>\n        <source>Warn the user if they switch models, then context will be erased</source>\n        <translation type=\"vanished\">Avertizează utilizatorul că la schimbarea modelului va fi şters contextul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>Conversaţia a fost plasată în Clipboard.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>Codul a fost plasat în Clipboard.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation>Toată conversaţia va fi ŞTEARSĂ</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>Secţiunea de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>Secţiunea de chat cu opţiuni</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>Reîncarcă modelul curent</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>Ejectează modelul curent</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>Niciun model instalat.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>Eroare la încărcarea modelului.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>Se aşteaptă modelul...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>Se schimbă contextul...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>Selectează un model...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>Absent: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>Primul element e modelul curent</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>Adaug documente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>adaugă Colecţii de documente la conversaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>Încarcă modelul implicit</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>Încarcă modelul implicit care poate fi stabilit în Configurare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>Niciun model instalat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>Vechiul Prompt-Template trebuie să fie &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;actualizat&lt;/a&gt; în Configurare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>Nu e configurat niciun &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;model de conversaţie&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Modelul de conversaţie&lt;/a&gt; nu poate lipsi.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>Vechiul System Prompt trebuie să fie &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;actualizat&lt;/a&gt; în Configurare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;EROARE la încărcarea\n                modelului:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Astfel\n                de erori pot apărea din mai multe cauze, dintre care cele mai comune\n                includ un format inadecvat al fişierului, un download incomplet sau întrerupt,\n                un tip inadecvat de fişier, RAM insuficientă, sau un tip incompatibil de model.\n                Sugestii pentru rezolvarea problemei: verifică dacă fişierul modelului are\n                un format şi un tip compatibile; verifică dacă fişierul modelului este complet\n                în folderul dedicat - acest folder este afişat în secţiunea Configurare; \n                dacă ai descărcat modelul dinafara programului, asigură-te că fişierul nu e corupt\n                după ce îi verifici amprenta MD5 (md5sum)&lt;li&gt;Află mai mult despre modelele compatibile\n                în pagina unde am plasat &lt;a\n                href=&quot;https://docs.gpt4all.io/&quot;&gt;documentaţia&lt;/a&gt; pentru\n                interfaţa gráfică&lt;li&gt;poţi găsi &lt;a\n                href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canalul nostru Discord&lt;/a&gt; unde\n                se oferă ajutor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation>ŞTERG conversaţia</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation>Schimbarea modelului va ŞTERGE conversaţia curenta.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>GPT4All necesită cel puţin un model pentru a putea rula</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>Instalează un model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>Afişează secţiunea de adăugare a unui model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>Conversaţie cu modelul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>perechi prompt/replică din conversaţie</translation>\n    </message>\n    <message>\n        <source>GPT4All</source>\n        <translation type=\"vanished\">GPT4All</translation>\n    </message>\n    <message>\n        <source>You</source>\n        <translation type=\"vanished\">Tu</translation>\n    </message>\n    <message>\n        <source>response stopped ...</source>\n        <translation type=\"vanished\">replică întreruptă...</translation>\n    </message>\n    <message>\n        <source>processing ...</source>\n        <translation type=\"vanished\">procesare...</translation>\n    </message>\n    <message>\n        <source>generating response ...</source>\n        <translation type=\"vanished\">se generează replica...</translation>\n    </message>\n    <message>\n        <source>generating questions ...</source>\n        <translation type=\"vanished\">se generează întrebări...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>Copiere</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">Copiez mesajul</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">Dezactivez markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">Activez markdown</translation>\n    </message>\n    <message>\n        <source>Thumbs up</source>\n        <translation type=\"vanished\">Bravo</translation>\n    </message>\n    <message>\n        <source>Gives a thumbs up to the response</source>\n        <translation type=\"vanished\">Dă un Bravo acestei replici</translation>\n    </message>\n    <message>\n        <source>Thumbs down</source>\n        <translation type=\"vanished\">Aiurea</translation>\n    </message>\n    <message>\n        <source>Opens thumbs down dialog</source>\n        <translation type=\"vanished\">Deschide reacţia Aiurea</translation>\n    </message>\n    <message>\n        <source>Suggested follow-ups</source>\n        <translation type=\"vanished\">Continuări sugerate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>Şterge şi resetează sesiunea de chat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>Copiez sesiunea de chat (conversaţia) în Clipboard</translation>\n    </message>\n    <message>\n        <source>Redo last chat response</source>\n        <translation type=\"vanished\">Reface ultima replică</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>Adaugă media (un fişier)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>Adaugă media (un fişier) la prompt</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>Opreşte generarea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>Opreşte generarea replicii curente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>Ataşează</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>Un singur fişier</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>Reîncarc modelul</translation>\n    </message>\n    <message>\n        <source>&lt;h3&gt;Encountered an error loading\n                model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model\n                loading failures can happen for a variety of reasons, but the most common causes\n                include a bad file format, an incomplete or corrupted download, the wrong file type,\n                not enough system RAM or an incompatible model type. Here are some suggestions for\n                resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a\n                compatible format and type&lt;li&gt;Check the model file is complete in the download\n                folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If\n                you&apos;ve sideloaded the model ensure the file is not corrupt by checking\n                md5sum&lt;li&gt;Read more about what models are supported in our &lt;a\n                href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the\n                gui&lt;li&gt;Check out our &lt;a\n                href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation type=\"vanished\">&lt;h3&gt;EROARE la Încărcarea\n                modelului:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Astfel\n                de erori pot apărea din mai multe cauze, dintre care cele mai comune\n                includ un format inadecvat al fişierului, un download incomplet sau întrerupt,\n                un tip inadecvat de fişier, RAM insuficientă, sau un tip incompatibil de model.\n                Sugestii pentru rezolvarea problemei: verifică dacă fişierul modelului are\n                un format si un tip compatibile; verifică dacă fişierul modelului este complet\n                în folderul dedicat - acest folder este afişat în secţiunea Configurare; \n                dacă ai descărcat modelul dinafara programului, asigură-te că fişierul nu e corupt\n                după ce îi verifici amprenta MD5 (md5sum)&lt;li&gt;Află mai multe despre care modele sunt compatibile\n                în pagina unde am plasat &lt;a\n                href=&quot;https://docs.gpt4all.io/&quot;&gt;documentaţia&lt;/a&gt; pentru\n                interfaţa gráfică&lt;li&gt;poţi parcurge &lt;a\n                href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;canalul nostru Discord&lt;/a&gt; unde\n                se oferă ajutor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>Reîncărcare · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>Încărcare · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>Încarcă · %1 (implicit) →</translation>\n    </message>\n    <message>\n        <source>restoring from text ...</source>\n        <translation type=\"vanished\">restaurare din text...</translation>\n    </message>\n    <message>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"vanished\">se preia din LocalDocs: %1 ...</translation>\n    </message>\n    <message>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"vanished\">se caută în LocalDocs: %1 ...</translation>\n    </message>\n    <message numerus=\"yes\">\n        <source>%n Source(s)</source>\n        <translation type=\"vanished\">\n            <numerusform>%n Sursa</numerusform>\n            <numerusform>%n Surse</numerusform>\n            <numerusform>%n de Surse</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>Trimite un mesaj...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>Încarcă un model pentru a continua...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>Trimite mesaje/prompt-uri către model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>Decupare (Cut)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>Alipire (Paste)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>Selectez tot</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>Trimit mesajul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>Trimite modelului mesajul/prompt-ul din câmpul-text</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>Atenţie: căutarea în Colecţii în timp ce sunt Indexate poate întoarce rezultate incomplete</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n fişier</numerusform>\n            <numerusform>%n fişiere</numerusform>\n            <numerusform>%n de fişiere</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n cuvânt</numerusform>\n            <numerusform>%n cuvinte</numerusform>\n            <numerusform>%n de cuvinte</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>Actualizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ Adaug documente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>Selectează o Colecţie pentru ca modelul să o poată accesa.</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation>OK</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>Modelul &quot;%1&quot; - instalat cu succes.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>EROARE: $MODEL_NAME absent.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>EROARE: $API_KEY absentă</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>EROARE: $API_KEY incorecta</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>EROARE: Model &quot;%1 (%2)&quot; conflictual.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>Modelul &quot;%1 (%2)&quot; - instalat cu succes.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>Modelul &quot;%1&quot; - îndepărtat</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>Bun venit în GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>Programul ce Prioritizează Confidenţialitatea (Privacy)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>Începe o conversaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>Începe o conversaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>Dialoghează cu orice LLM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>Dialoghează cu fişiere locale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>Caută modele</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>Explorează şi descarcă modele</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>Ultimele ştiri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>Ultimele ştiri de la GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>Despre această versiune</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>Documentaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation>Discord</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation>X (Twitter)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation>GitHub</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>Abonare la Newsletter</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>Configurarea LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>Indexare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>Extensii compatibile de fişier</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation>Embedding</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>Folosesc API: Nomic Embed</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Cheia API Nomic</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>Dispozitivul pentru Embeddings</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>Dispozitivul pentru Embeddings. Necesită repornire.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>Extensiile, separate prin virgulă. LocalDocs va încerca procesarea numai a fişierelor cu aceste extensii.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>Embedding pe documente folosind API de la Nomic în locul unui model local. Necesită repornire.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>Cheia API de utilizat cu Nomic Embed. Obţine o cheie prin Atlas: &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;pagina cheilor API&lt;/a&gt; Necesită repornire.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>Implicit</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>Vizualizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>Afişarea Surselor</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>Afişează Sursele utilizate pentru fiecare replică.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>Avansate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>Atenţie: Numai pentru utilizare avansată.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>Valori prea mari pot cauza erori cu LocalDocs, replici foarte lente sau chiar absenţa lor. În mare, numărul {N caractere x N citate} este adăugat la Context Window/Size/Length a modelului. Mai multe informaţii: &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;aici&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Numărul caracterelor din fiecare citat. Numere mari amplifică probabilitatea unor replici corecte, dar de asemenea cauzează generare lentă.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>Numărul maxim al citatelor ce corespund şi care vor fi adăugate la contextul pentru prompt. Numere mari amplifică probabilitatea unor replici corecte, dar de asemenea cauzează generare lentă.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation>Lungimea (în caractere) a citatelor din documente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>Numărul maxim de citate per prompt</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>Dialoghează cu fişiere locale</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ Adaugă o Colecţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>EROARE: Baza de date LocalDocs nu poate fi accesată sau nu e validă. Programul trebuie repornit după ce se încearcă oricare din următoarele remedii sugerate.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Asigură-te că folderul pentru &lt;b&gt;Download Path&lt;/b&gt; există în sistemul de fişiere.&lt;/li&gt;&lt;li&gt;Verifică permisiunile şi apartenenţa folderului pentru &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;Dacă există fişierul &lt;b&gt;localdocs_v2.db&lt;/b&gt;, verifică-i apartenenţa şi permisiunile citire/scriere (read/write).&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;Dacă problema persistă şi există vreun fişier &apos;localdocs_v*.db&apos;, ca ultimă soluţie poţi&lt;br&gt;încerca duplicarea (backup) şi apoi ştergerea lor. Oricum, va trebui să re-creezi Colecţiile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>Nu există Colecţii instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>Instalează o Colecţie de documente pentru a putea utiliza funcţionalitatea aceasta</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋ Adaug o Colecţie de documente</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>Afişează secţiunea de adăugare a unui model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>Bara de progresie a Indexării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>Afişează progresia Indexării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>EROARE</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>...INDEXARE...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>...EMBEDDINGs...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>NECESITĂ UPDATE</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>GATA</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>...INSTALARE...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>...Se Indexează...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>...Se calculează Embeddings...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>Colecţia necesită update după schimbarea versiunii</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>Se reindexează automat după schimbări ale folderului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>...Instalare în curs...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n fişier</numerusform>\n            <numerusform>%n fişiere</numerusform>\n            <numerusform>%n de fişiere</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n cuvânt</numerusform>\n            <numerusform>%n cuvinte</numerusform>\n            <numerusform>%n de cuvinte</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>Şterg</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>Reconstrucţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>Reindexează de la zero acest folder. Procesul e lent şi de obicei inutil.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>Update/Actualizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>Actualizează Colecţia la noua versiune. Această procedură e lentă.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>nu se poate deschide „%1”: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>nu se poate crea „%1”: %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1 (%2)</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Model API compatibil cu OpenAI&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Cheia API: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Numele modelului: %3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Necesită o cheie API OpenAI personală. &lt;/li&gt;&lt;li&gt;ATENŢIE: Conversaţiile tale vor fi trimise la OpenAI!&lt;/li&gt;&lt;li&gt;Cheia ta API va fi stocată pe disc (local) &lt;/li&gt;&lt;li&gt;Va fi utilizată numai pentru comunicarea cu OpenAI&lt;/li&gt;&lt;li&gt;Poţi solicita o cheie API aici: &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;aici.&lt;/a&gt;&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelul OpenAI&apos;s ChatGPT GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Chiar dacă plăteşti la OpenAI pentru ChatGPT-4, aceasta nu garantează accesul la cheia API. Contactează OpenAI pentru mai multe informaţii.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation>&lt;strong&gt;Modelul ChatGPT GPT-4 al OpenAI&lt;/strong&gt;&lt;br&gt; %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Necesită cheia personală Mistral API. &lt;/li&gt;&lt;li&gt;ATENŢIE: Conversaţiile tale vor fi trimise la Mistral!&lt;/li&gt;&lt;li&gt;Cheia ta API va fi stocată pe disc (local)&lt;/li&gt;&lt;li&gt;Va fi utilizată numai pentru comunicarea cu Mistral&lt;/li&gt;&lt;li&gt;Poţi solicita o cheie API aici: &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;aici&lt;/a&gt;.&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelul Mistral Tiny&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelul Mistral Small&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Modelul Mistral Medium&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Necesită cheia personală API si base-URL a API.&lt;/li&gt;&lt;li&gt;ATENŢIE: Conversaţiile tale vor fi trimise la serverul API compatibil cu OpenAI specificat!&lt;/li&gt;&lt;li&gt;Cheia ta API va fi stocată pe disc (local)&lt;/li&gt;&lt;li&gt;Va fi utilizată numai pentru comunicarea cu serverul API compatibil cu OpenAI&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Conectare la un server API compatibil cu OpenAI&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Creat de către %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Publicat in: %2.&lt;li&gt;Acest model are %3 Likes.&lt;li&gt;Acest model are %4 download-uri.&lt;li&gt;Mai multe informaţii pot fi găsite la: &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;aici.&lt;/a&gt;&lt;/ul&gt;</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>Model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation>%1 mesajul de la sistem?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation>Ştergere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation>Resetare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation>Mesajul de la sistem va fi %1.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation>îndepărtat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation>resetare la valoarea implicită</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation>%1 modelul de conversaţie?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation>Modelul de conversaţie va fi %1.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation>şters</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>Configurez modelul</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>Clonez</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>Şterg</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>Denumire</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>Fişierul modelului</translation>\n    </message>\n    <message>\n        <source>System Prompt</source>\n        <translation type=\"vanished\">System Prompt</translation>\n    </message>\n    <message>\n        <source>Prompt Template</source>\n        <translation type=\"vanished\">Prompt Template</translation>\n    </message>\n    <message>\n        <source>The template that wraps every prompt.</source>\n        <translation type=\"vanished\">Standardul de formulare a fiecărui prompt.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>Denumirea conversaţiei</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>Standardul de formulare a denumirii conversaţiilor.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>Prompt-ul sugerat pentru a continua</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>Prompt-ul folosit pentru generarea întrebărilor de continuare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>Lungimea Contextului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>Numărul token-urilor de input şi de output văzute de model.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>Temperatura</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>Libertatea/Confuzia din replica modelului. Mai mare -&gt; mai multă libertate.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation>Top-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>Factorul de Nucleus Sampling. Mai mic -&gt; predictibilitate mai mare.</translation>\n    </message>\n    <message>\n        <source>Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens.</source>\n        <translation type=\"vanished\">Plasat la începutul fiecărei conversaţii. Trebuie să conţină token-uri(le) adecvate de încadrare.</translation>\n    </message>\n    <message>\n        <source>Must contain the string &quot;%1&quot; to be replaced with the user&apos;s input.</source>\n        <translation type=\"vanished\">Trebuie să conţină textul &quot;%1&quot; care va fi înlocuit cu ceea ce scrie utilizatorul.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Numărul maxim combinat al token-urilor în prompt+replică înainte de a se pierde informaţie. Utilizarea unui context mai mare decât cel cu care a fost instruit modelul va întoarce rezultate mai slabe. NOTĂ: Nu are efect până la reîncărcarea modelului.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>Temperatura creşte probabilitatea de alegere a unor token-uri puţin probabile. NOTĂ: O temperatură tot mai înaltă determină replici tot mai creative şi mai puţin predictibile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>Pot fi alese numai cele mai probabile token-uri a căror probabilitate totală este Top-P. NOTĂ: Evită selectarea token-urilor foarte improbabile.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation>Min-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>Probabilitatea mínimă a unui token. Mai mare -&gt; mai predictibil.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>Stabileşte probabilitatea minimă relativă a unui token de luat în considerare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation>Top-K</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>Dimensiunea setului de token-uri.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>Se va alege numai din cele mai probabile K token-uri.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>Lungimea maximă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>Lungimea maximă - în token-uri - a replicii.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>Prompt Batch Size</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>Dimensiunea setului de token-uri citite simultan din prompt.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>Numărul token-urilor procesate simultan. NOTĂ: Valori tot mai mari pot accelera citirea prompt-urilor, dar şi utiliza mai multă RAM.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>Cât de multe layere ale modelului să fie încărcate în VRAM. Valori mici trebuie folosite dacă GPT4All rămâne fără VRAM în timp ce încarcă modelul. Valorile tot mai mici cresc utilizarea CPU şi a RAM şi încetinesc inferenţa. NOTĂ: Nu are efect până la reîncărcarea modelului.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>Penalizarea pentru repetare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation>Mesaj de la Sistem</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation>Un mesaj pentru stabilirea contextului sau ghidarea comportamentului modelului. Poate fi nespecificat.  NOTĂ: De la GPT4All 3.5, acesta nu trebuie să conţină tokenuri de control.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation>Mesajul de la Sistem nu e &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;text-simplu&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation>Model de conversaţie</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation>Acest model Jinja transformă conversaţia în input pentru model.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>Nu e configurat niciun &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;model de conversaţie&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Modelul de conversaţie&lt;/a&gt; nu poate lipsi.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation>Modelul de conversaţie nu este in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;format Jinja&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>Factorul de penalizare a repetării ce se dezactivează cu valoarea 1.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>Token-uri pentru penalizare a repetării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>Numărul token-urilor anterioare considerate pentru penalizare.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>Layere în GPU</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>Numărul layerelor modelului ce vor fi Încărcate în VRAM.</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>Nu există modele instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>Instalează un model pentru a începe să foloseşti GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ Adaugă un model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>Afişează secţiunea de adăugare a unui model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>Modele instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>Modele conversaţionale instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>Fişierul modelului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>Fişierul modelului ce va fi descărcat</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>Descriere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>Descrierea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>Continuare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>Oprirea/Repornirea/Iniţierea descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>Şterg</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>Şterg modelul din sistemul de fişiere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>Instalez</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>Instalez un model din online</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation>?</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>Descrie o eroare apărută la download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#eroare&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;ATENŢIE: Nerecomandat pentru acest hardware. Modelul necesită mai multă memorie (%1 GB) decât are sistemul tău(%2).&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>Eroare - hardware incompatibil</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>Bara de progresie a descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>Afişează progresia descărcării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>Viteza de download</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>Viteza de download în bytes/kilobytes/megabytes pe secundă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>...Se calculează...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>Dacă se va calcula hash-ul fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation>Indicator de activitate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>Afişat când se calculează hash-ul unui fişier</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>EROARE: $API_KEY absentă.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>introdu cheia $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>EROARE: $BASE_URL absentă.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>introdu $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>EROARE: $MODEL_NAME absent.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>introdu $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>Dimensiunea fişierului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>RAM necesară</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>Parametri</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>Quant(ificare)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>Tip</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>Link haios</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>Un link cu stil</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>Selectează un fişier</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>Selectează un director (folder)</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation>Ştergere</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation>Resetare</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsStack</name>\n    <message>\n        <source>Please choose a directory</source>\n        <translation type=\"vanished\">Selectează un director (folder)</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation>Restaurare la implicite</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation>Setările de pe această pagină vor fi resetate la valorile implicite.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>Restaurez valorile implicite</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>Restaurez secţiunea Configurare la starea sa implicită</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>Contribui cu date/informaţii la componenta Open-source DataLake a GPT4All.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>Dacă activezi această funcţionalitate, vei participa la procesul democratic de instruire a unui model LLM prin contribuţia ta cu date la îmbunătăţirea modelului.\n\nCând un model în GPT4All îţi răspunde şi îi accepţi replica, atunci conversaţia va fi trimisă la componenta Open-source DataLake a GPT4All. Mai mult - îi poţi aprecia replica, Dacă răspunsul Nu Îti Place, poţi sugera unul alternativ. Aceste date vor fi colectate şi agregate în componenta DataLake a GPT4All.\n\nNOTĂ: Dacă activezi această funcţionalitate, vei trimite datele tale la componenta DataLake a GPT4All. Atunci nu te vei putea aştepta la intimitatea (privacy) conversaţiei dacă activezi această funcţionalitate. Totuşi, te poţi aştepta la a beneficia de apreciere - opţional, dacă doreşti. Datele din conversaţie vor fi disponibile pentru oricine vrea să le descarce şi vor fi utilizate de către Nomic AI pentru a îmbunătăţi modele viitoare în GPT4All. Nomic AI va păstra toate informaţiile despre atribuire asociate datelor tale şi vei fi menţionat ca participant contribuitor la orice lansare a unui model GPT4All care foloseşte datele tale!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termenii participării</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrie ce se întâmplă când participi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>Specifică un nume pentru această apreciere (opţional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>Apreciere (opţional)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>Apreciază</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>Activează</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>Activez participarea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>Anulez participarea</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>O nouă versiune disponibilă!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>Update/Actualizare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>Actualizez la noua versiune</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>Afişează un mesaj scurt de asistenţă</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translation>Indicator de activitate</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>Se afişează când procedura este în desfăşurare</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">introdu cheia $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">introdu $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">introdu $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\">Modele</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation>Configurare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>Conţine setări ale programului</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>Program</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>Model</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>Bun venit!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>Despre versiune</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>Despre această versiune</translation>\n    </message>\n    <message>\n        <source>### Opt-ins for anonymous usage analytics and datalake\n                By enabling these features, you will be able to participate in the democratic\n                process of training a\n                large language model by contributing data for future model improvements.\n\n                When a GPT4All model responds to you and you have opted-in, your conversation will\n                be sent to the GPT4All\n                Open Source Datalake. Additionally, you can like/dislike its response. If you\n                dislike a response, you\n                can suggest an alternative response. This data will be collected and aggregated in\n                the GPT4All Datalake.\n\n                NOTE: By turning on this feature, you will be sending your data to the GPT4All Open\n                Source Datalake.\n                You should have no expectation of chat privacy when this feature is enabled. You\n                should; however, have\n                an expectation of an optional attribution if you wish. Your chat data will be openly\n                available for anyone\n                to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI\n                will retain all\n                attribution information attached to your data and you will be credited as a\n                contributor to any GPT4All\n                model release that uses your data!</source>\n        <translation type=\"vanished\">### Acceptul pentru analizarea utilizării anonime şi pentru DataLake\n                Activând aceste functionalităţi vei putea participa la procesul democratic\n                de instruire a unui\n                model conversaţional prin contribuirea cu date/informaţii pentru îmbunătăţirea unor modele.\n                Când un model în GPT4All îţi răspunde şi îi accepţi răspunsul, conversaţia este\n                trimisă la componenta\n                Open-source DataLake a GPT4All. Mai mult - poţi aprecia (Like/Dislike) răspunsul. Dacă\n                un răspuns Nu Îţi Place (e &quot;Aiurea&quot;). poţi\n                sugera un răspuns alternativ. Aceste date vor fi colectate şi agregate în\n                componenta DataLake a GPT4All.\n\n                NOTă: Dacă activezi această funcţionalitate, vei trimite datele tale la componenta\n                DataLake a GPT4All.\n                Atunci nu te vei putea aştepta la intimitatea (privacy) conversaţiei dacă activezi această funcţionalitate.\n                Totuşi, te poţi aştepta la a beneficia de apreciere - \n                opţional, dacă doreşti. Datele din conversaţie vor fi disponibile \n                pentru oricine vrea să le descarce şi vor fi utilizate de către Nomic AI\n                pentru a îmbunătăţi modele viitoare în GPT4All.\n                Nomic AI va păstra \n                toate informaţiile despre atribuire asociate datelor tale şi vei fi menţionat ca\n                participant contribuitor la orice lansare a unui model GPT4All\n                care foloseşte datele tale!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### Despre versiune\n%1&lt;br/&gt;\n### Contributori\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### Acordul pentru analizarea utilizării anonime şi pentru DataLake\n                Activând aceste functionalităţi vei putea participa la procesul democratic de instruire \na unui model conversaţional prin contribuirea cu date/informaţii pentru îmbunătăţirea unor modele.\n\nCând un model în GPT4All îţi răspunde şi îi accepţi răspunsul, conversaţia este\ntrimisă la componenta Open-source DataLake a GPT4All. Mai mult - poţi aprecia (Bravo/Aiurea) răspunsul. Dacă\nun răspuns e Aiurea. poţi sugera un răspuns alternativ. Aceste date vor fi colectate şi agregate în\ncomponenta DataLake a GPT4All.\n\nNOTĂ: Dacă activezi această funcţionalitate, vei trimite datele tale la componenta DataLake a GPT4All.\nAtunci nu te vei putea aştepta la confidenţialitatea (privacy) conversaţiei dacă activezi această funcţionalitate.\nTotuşi, te poţi aştepta la a beneficia de apreciere - \nopţional, dacă doreşti. Datele din conversaţie vor fi disponibile \npentru oricine vrea să le descarce şi vor fi utilizate de către Nomic AI\npentru a îmbunătăţi modele viitoare în GPT4All.\nNomic AI va păstra \ntoate informaţiile despre atribuire asociate datelor tale şi vei fi menţionat ca\nparticipant contribuitor la orice lansare a unui model GPT4All\ncare foloseşte datele tale!</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>Termenii pentru participare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>Descrie ce se întâmplă când participi</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation>Optați pentru trimiterea anonimă a evidenței utilizării, folosite pentru a îmbunătăți GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>Acceptă colectarea de statistici despre utilizare -anonimă-</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>Da</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>Acceptă participarea la colectarea de statistici despre utilizare -anonimă-</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>Nu</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>Anulează participarea la colectarea de statistici despre utilizare -anonimă-</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>Permite anularea participării la colectarea de statistici despre utilizare -anonimă-</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Optați pentru partajarea anonimă a conversațiilor în GPT4All Datalake</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>Acceptă pentru reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>Permite participarea pentru reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permite participarea la partajarea (share) -anonimă- a conversaţiilor către DataLake a GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>Refuz participarea, pentru reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>Permite anularea participării la partajarea -anonimă- a conversaţiilor către DataLake a GPT4All</translation>\n    </message>\n</context>\n<context>\n    <name>SwitchModelDialog</name>\n    <message>\n        <source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source>\n        <translation type=\"vanished\">&lt;b&gt;Atenţie:&lt;/b&gt; schimbarea modelului va şterge conversaţia curentă. Confirmi aceasta?</translation>\n    </message>\n    <message>\n        <source>Continue</source>\n        <translation type=\"vanished\">Continuă</translation>\n    </message>\n    <message>\n        <source>Continue with model loading</source>\n        <translation type=\"vanished\">Continuă încărcarea modelului</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">Anulare</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>Te rog, editează textul de mai jos pentru a oferi o replică mai bună (opţional).</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>Te rog, oferă o replică mai bună...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>Trimite</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>Trimite răspunsul dat de utilizator</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>Anulare</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>Închide afişarea răspunsului</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation>GPT4All v%1</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation>Restaurare</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation>Abandon</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;A apărut o eroare la iniţializare:; &lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Hardware incompatibil. &quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Din păcate, procesorul (CPU) nu întruneşte condiţiile minime pentru a rula acest program. În particular, nu suportă instrucţiunile AVX pe care programul le necesită pentru a integra un model conversaţional modern. În acest moment, unica soluţie este să îţi aduci la zi sistemul hardware cu un CPU mai recent.&lt;br&gt;&lt;br&gt;Aici sunt mai multe informaţii: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;A apărut o eroare la iniţializare:; &lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Nu poate fi accesat fişierul de configurare a programului.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Din păcate, ceva împiedică programul în a accesa acel fişier. Cauza poate fi un set de permisiuni incorecte pe directorul/folderul local de configurare unde se află acel fişier. Poţi parcurge canalul nostru &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;Discord&lt;/a&gt; unde vei putea primi asistenţă.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>Conectarea la DataLake a eşuat.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>Se salvează conversaţiile.</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>Dialogul despre reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>acceptă partajarea (share) de comentarii/conversaţii</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>Secţiunea de Început</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>Secţiunea de Început a programului</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>Prima&lt;br&gt;pagină</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>Secţiunea conversaţiilor</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>Secţiunea de chat pentru interacţiune cu modele</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>Conversaţii</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>Modele</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>Secţiunea modelelor instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation>LocalDocs</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>Secţiunea LocalDocs de configurare şi folosire a Documentelor Locale</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>Configurare</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>Secţiunea de Configurare a programului</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>DataLake: ACTIV</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>Se foloseşte un model pe reţea</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>Modul Server: ACTIV</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>Modele instalate</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>Secţiunea modelelor instalate</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_zh_CN.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"zh_CN\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← 存在集合</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>添加文档集合</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>添加一个包含纯文本文件、PDF或Markdown的文件夹。在“设置”中配置其他扩展。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>名称</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>集合名称...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>集合名称 (必须)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>目录</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>目录地址...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>文档的目录地址（必须）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>查看</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>创建集合</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation>这些模型已专门为GPT4All配置。列表前几个模型的效果最好，但你最好只使用那些满足内存的模型。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation>网络错误：无法检索 %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation>繁忙程度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation>当模型请求处于进行中时显示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation>全选</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation>推理</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation>模型文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation>待下载的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation>描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation>文件描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation>继续</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation>下载</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>停止/重启/开始下载</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation>从系统中删除模型</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">安装</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">安装在线模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;错误&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>描述下载时发生的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推荐用于您的硬件。模型需要的内存（%1 GB）超过了您系统的可用内存（%2）&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>硬件不兼容的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation>下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>显示下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation>下载速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>下载速度  b/kb/mb 每秒</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation>计算中...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>是否正在计算文件哈希</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>在计算文件哈希时显示</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">错误：$API_KEY为空</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">输入 $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">错误：$BASE_URL 为空</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">输入 $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">错误：$MODEL_NAME 为空</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">输入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation>文件大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation>需要 RAM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation>？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation>参数</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation>量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation>类型</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation>在 Hugging Face 上查找并下载模型。不能保证这些模型可以正常工作。许多模型在使用前需要额外的配置。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation>通过关键词查找并下载模型 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation>用于发现和筛选可下载模型的文本字段</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation>搜索中 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation>启动模型发现和过滤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation>触发模型发现和过滤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation>默认</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation>热门</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation>下载量</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation>最近</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation>排序: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation>升序</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation>降序</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation>排序目录: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation>无</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation>数量: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation>模型文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation>待下载的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation>描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation>文件描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation>继续</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation>下载</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>停止/重启/开始下载</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation>从系统中删除模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation>安装</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation>安装在线模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;错误&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>描述下载时发生的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推荐用于您的硬件。模型需要的内存（%1 GB）超过了您系统的可用内存（%2）&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>硬件不兼容的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation>下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>显示下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation>下载速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>下载速度  b/kb/mb 每秒</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation>计算中...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>是否正在计算文件哈希</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation>繁忙程度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>在计算文件哈希时显示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>错误：$API_KEY为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation>输入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>错误：$BASE_URL 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation>输入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>错误：$MODEL_NAME 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>输入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation>文件大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation>量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation>类型</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← 已安装的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>发现模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation>GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\">远程提供商</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation>HuggingFace</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\">使用网络资源进行推理的各种远程模型提供商。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\">Groq</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\">Groq 提供高性能 AI 推理引擎，专为低延迟和高效处理而设计。其技术经过优化，适用于实时应用，非常适合需要快速响应的大型开源语言模型和其他 AI 任务的用户。&lt;br&gt;&lt;br&gt;获取您的 API 密钥：&lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\">OpenAI</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\">OpenAI 提供先进的 AI 模型访问权限，包括支持广泛应用的 GPT-4，涵盖对话 AI、内容生成和代码补全等场景。&lt;br&gt;&lt;br&gt;获取您的 API 密钥：&lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\">Mistral</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\">Mistral AI 专注于高效的开源语言模型，针对各种自然语言处理任务进行了优化。其模型具备灵活性和高性能，是需要可扩展 AI 解决方案的应用的理想选择。&lt;br&gt;&lt;br&gt;获取您的 API 密钥：&lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\">自定义</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\">自定义提供商选项允许用户连接自己的 OpenAI 兼容 AI 模型或第三方推理服务。这对于拥有专有模型的组织，或使用此处未列出的特定 AI 提供商的用户非常有用。</translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>应用</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>网络对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>选择加入以共享反馈/对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>错误对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>应用设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>通用设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>主题</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>应用的主题颜色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>深色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>亮色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>错误：更新系统无法找到用于检查更新的 MaintenanceTool！&lt;br&gt;&lt;br&gt;您是否使用在线安装程序安装了此应用程序？如果是的话，MaintenanceTool 可执行文件应该位于文件系统中此应用程序所在目录的上一级目录。&lt;br&gt;&lt;br&gt;如果无法手动启动它，那么恐怕您需要重新安装。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>LegacyDark</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>字体大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>应用中的文本大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>大</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>语言和本地化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>你想使用的语言</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>系统语言</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>设备</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation>设备用于文本生成</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>程序默认</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>默认模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>新聊天的首选模式。也用作本地服务器回退。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>建议模式</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>在答复结束时生成建议的后续问题。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>本地文档检索</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>只要有可能</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>从不</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>下载目录</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>本地模型和本地文档数据库存储目录</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>查看</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>模型下载目录</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>开启数据湖</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>发送对话和反馈给GPT4All 的开源数据湖。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>高级</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation>CPU线程</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>用于推理和嵌入的CPU线程数</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation>启用系统托盘</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation>当窗口关闭时，应用程序将最小化到系统托盘。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>开启本地 API 服务</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>将OpenAI兼容服务器暴露给本地主机。警告：导致资源使用量增加。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>API 服务端口</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>使用本地服务的端口，需要重启</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>检查更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>手动检查更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>更新</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>新对话</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>服务器对话</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>错误：连接到 API 服务器时发生网络错误</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished 收到 HTTP 错误 %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\">分析时遇到错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\">思考中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\">分析中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\">思考耗时 %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\">秒</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\">秒</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\">分析完成</translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>抽屉</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>导航</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ 新对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>新对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>选择当前的聊天或在编辑模式下编辑聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>修改对话名称</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>保存对话名称</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>删除对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>确认删除对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>取消删除对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>对话列表</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>对话框中的聊天列表</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation>GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation>您</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation>响应停止...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation>检索本地文档: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation>搜索本地文档: %1 ...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation>处理中...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation>正在生成回复…</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation>正在生成问题…</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation>正在生成工具调用…</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation>复制</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation>\n            <numerusform>%n 资源</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation>编辑这条消息？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation>所有后续消息将被永久删除。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation>重新生成这条回复？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation>未加载模型时无法编辑聊天。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation>生成模型时无法编辑聊天。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation>编辑</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation>未加载模型时无法重新生成回复。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation>生成模型时无法重新生成回复。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation>重做</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation>点赞这条回复</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation>点踩这条回复</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation>建议的后续步骤</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation>您的消息过长，无法处理（%1 &gt; %2）。请尝试简短内容。</translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>今天</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>本周</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>本月</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>半年内</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>今年内</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>去年</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">复制</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\">复制消息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\">禁用 Markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\">禁用 Markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;警告&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>复制对话到剪切板。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>复制代码到剪切板。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation>全部聊天记录将被清除。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>对话面板</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>对话面板选项</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>重载当前模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>弹出当前加载的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>没有安装模型。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>模型加载错误。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>等待模型...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>切换上下文...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>选择模型...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>没找到: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>当前模型的最佳选项</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>添加文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>将文档集合添加到聊天中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>载入默认模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>加载默认模型，可以在设置中更改</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>没有下载模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>GPT4All要求您至少安装一个模型才能开始</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>下载模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>查看添加的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>使用此模型对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>对话中的提示/响应对</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>旧版提示模板需要在设置中&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;更新&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>未配置&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;对话模板&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;对话模板&lt;/a&gt;不能为空。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation>旧系统提示需要在设置中&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;更新&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>复制</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>清空并重置聊天会话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>复制对话到剪切板</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>新增媒體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>將媒體加入提示中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>停止生成</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>停止当前响应</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>附</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>單一文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>重载模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;加载模型时遇到错误：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&lt;%1&gt;&lt;/i&gt;&lt;br&gt;&lt;br&gt;模型加载失败可能由多种原因引起，但最常见的原因包括文件格式错误、下载不完整或损坏、文件类型错误、系统 RAM 不足或模型类型不兼容。以下是一些解决问题的建议：&lt;br&gt;&lt;ul&gt;&lt;li&gt;确保模型文件具有兼容的格式和类型&lt;li&gt;检查下载文件夹中的模型文件是否完整&lt;li&gt;您可以在设置对话框中找到下载文件夹&lt;li&gt;如果您已侧载模型，请通过检查 md5sum 确保文件未损坏&lt;li&gt;在我们的 &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;文档&lt;/a&gt; 中了解有关 gui 支持哪些模型的更多信息&lt;li&gt;查看我们的 &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord 频道&lt;/a&gt; 以获取帮助</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation>清空对话？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation>更换模型将清除当前对话。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>重载 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>载入中 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>载入 · %1 (默认) →</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>发送消息...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>选择模型并继续...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>发送消息/提示词给模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>剪切</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>粘贴</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>全选</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>发送消息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>将文本框中包含的消息/提示发送给模型</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\">代码解释器</translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\">使用 console.log 计算 JavaScript 代码并输出结果</translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>提示: 索引时搜索集合可能会返回不完整的结果</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform></numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform></numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>更新中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ 添加文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>选择一个集合，使其可用于聊天模型。</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation>好</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>模型 &quot;%1&quot; 安装成功</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>错误：$MODEL_NAME 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>错误：$API_KEY为空</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>错误：$BASE_URL 非法</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>错误: 模型 &quot;%1 (%2)&quot; 有冲突.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>模型 &quot;%1 (%2)&quot; 安装成功.</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>模型 &quot;%1&quot; 已删除.</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>欢迎</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>隐私至上的大模型咨询应用程序</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>开始聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>开始聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>大语言模型聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>本地文件聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>查找模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>发现并下载模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>新闻</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>GPT4All新闻</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>发布日志</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation>Discord</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation>X (Twitter)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation>Github</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>订阅信息</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>本地文档设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>索引中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>添加文档扩展名</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>逗号分隔的列表。LocalDocs 只会尝试处理具有这些扩展名的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation>Embedding</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>使用 Nomic 内部 API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>使用快速的 Nomic API 嵌入文档，而不是使用私有本地模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Nomic API Key</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>Nomic Embed 使用的 API 密钥。请访问官网获取，需要重启。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>Embeddings 设备</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>技术设备用于embeddings. 需要重启.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>程序默认</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>显示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>查看源码</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>显示每个响应所使用的源。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>高级</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>提示: 仅限高级使用。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>值过大可能会导致 localdocs 失败、响应速度极慢或根本无法响应。粗略地说，{N 个字符 x N 个片段} 被添加到模型的上下文窗口中。更多信息请见&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;此处&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation>文档粘贴大小 (字符)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>每个文档片段的字符数。较大的数值增加了事实性响应的可能性，但也会导致生成速度变慢。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>每个提示的最大文档片段数</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>检索到的文档片段最多添加到提示上下文中的前 N 个最佳匹配项。较大的数值增加了事实性响应的可能性，但也会导致生成速度变慢。</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>和本地文件对话</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ 添加集合</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <location filename=\"../../build_gpt4all-chat_Desktop_Qt_6_7_2/gpt4all/qml/LocalDocsView.qml\" line=\"85\"/>\n        <source></source>\n        <translation>&lt;h3&gt;错误：无法访问 LocalDocs 数据库或该数据库无效。&lt;/h3&gt;&lt;br&gt;&lt;i&gt;注意：尝试以下任何建议的修复方法后，您将需要重新启动。&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;确保设置为&lt;b&gt;下载路径&lt;/b&gt;的文件夹存在于文件系统中。&lt;/li&gt;&lt;li&gt;检查&lt;b&gt;下载路径&lt;/b&gt;的所有权以及读写权限。&lt;/li&gt;&lt;li&gt;如果有&lt;b&gt;localdocs_v2.db&lt;/b&gt;文件，请检查其所有权和读/写权限。&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;如果问题仍然存在，并且存在任何“localdocs_v*.db”文件，作为最后的手段，您可以&lt;br&gt;尝试备份并删除它们。但是，您必须重新创建您的收藏。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>&lt;h3&gt;错误：无法访问 LocalDocs 数据库或该数据库无效。&lt;/h3&gt;&lt;br&gt;&lt;i&gt;注意：尝试以下任何建议的修复方法后，您将需要重新启动。&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;确保设置为&lt;b&gt;下载路径&lt;/b&gt;的文件夹存在于文件系统中。&lt;/li&gt;&lt;li&gt;检查&lt;b&gt;下载路径&lt;/b&gt;的所有权以及读写权限。&lt;/li&gt;&lt;li&gt;如果有&lt;b&gt;localdocs_v2.db&lt;/b&gt;文件，请检查其所有权和读/写权限。&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;如果问题仍然存在，并且存在任何“localdocs_v*.db”文件，作为最后的手段，您可以&lt;br&gt;尝试备份并删除它们。但是，您必须重新创建您的收藏。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>没有集合</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>安装一组本地文档以开始使用此功能</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋  添加文档集合</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>查看添加的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>索引进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>显示索引进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>索引</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>EMBEDDING</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>需更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>准备</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>安装中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>构建索引中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>Embedding进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>此集合需要在版本更改后进行更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>在文件夹变动时自动重新索引</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>安装进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n 文件</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n 词</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>重新构建</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>从头开始重新索引此文件夹。这个过程较慢，通常情况下不需要。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>将集合更新为新版本。这是一个缓慢的操作。</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>无法打开“%1”：%2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>无法创建“%1”：%2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1 (%2)</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;与 OpenAI 兼容的 API 模型&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API 密钥：%1&lt;/li&gt;&lt;li&gt;基本 URL：%2&lt;/li&gt;&lt;li&gt;模型名称：%3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;需要个人 OpenAI API 密钥。&lt;/li&gt;&lt;li&gt;警告：将把您的聊天内容发送给 OpenAI！&lt;/li&gt;&lt;li&gt;您的 API 密钥将存储在磁盘上&lt;/li&gt;&lt;li&gt;仅用于与 OpenAI 通信&lt;/li&gt;&lt;li&gt;您可以在此处&lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;申请 API 密钥。&lt;/a&gt;&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;需要个人 API 密钥和 API 基本 URL。&lt;/li&gt;&lt;li&gt;警告：将把您的聊天内容发送到您指定的与 OpenAI 兼容的 API 服务器！&lt;/li&gt;&lt;li&gt;您的 API 密钥将存储在磁盘上&lt;/li&gt;&lt;li&gt;仅用于与与 OpenAI 兼容的 API 服务器通信&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;连接到与 OpenAI 兼容的 API 服务器&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* 即使您为ChatGPT-4向OpenAI付款，这也不能保证API密钥访问。联系OpenAI获取更多信息。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation>%1系统消息？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation>清除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation>重置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation>系统消息将被%1。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation>重置为初始状态</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation>%1 对话模板？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation>对话模板将被%1。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation>清除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>模型设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>克隆</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>名称</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>模型文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation>系统消息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation>用于设定上下文或引导模型行为的消息。若无则留空。注意：自GPT4All 3.5版本开始，此信息中不应包含控制符。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation>系统消息不是&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;纯文本&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation>对话模板</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation>该Jinja模板会将聊天内容转换为模型的输入。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation>未配置&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;对话模板&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;对话模板&lt;/a&gt;不能为空。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;语法错误&lt;/a&gt;: %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation>对话模板不是&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja格式&lt;/a&gt;.</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>聊天名称提示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>用于自动生成聊天名称的提示。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>建议的后续提示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>用于生成建议的后续问题的提示。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>上下文长度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>模型看到的输入和输出令牌的数量。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>信息丢失前的最大组合提示/响应令牌。\n        使用比模型训练时更多的上下文将产生较差的结果。\n        注意：在重新加载模型之前不会生效。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>温度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>模型输出的随机性。更高-&gt;更多的变化。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>温度增加了选择不太可能的token的机会。\n        注：温度越高，输出越有创意，但预测性越低。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation>Top-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>核子取样系数。较低-&gt;更具可预测性。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>只能选择总概率高达top_p的最有可能的令牌。\n        注意：防止选择极不可能的token。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation>Min-P</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>最小令牌概率。更高 -&gt; 更可预测。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>设置被考虑的标记的最小相对概率。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation>Top-K</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>令牌选择池的大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>仅从最可能的前 K 个标记中选择</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>最大长度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>最大响应长度（以令牌为单位）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>提示词大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>用于快速处理的批量大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>一次要处理的提示令牌数量。\n        注意：较高的值可以加快读取提示，但会使用更多的RAM。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>重复惩罚</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>重复处罚系数。设置为1可禁用。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>重复惩罚数</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>用于惩罚的先前令牌数量。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>GPU 层</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>要加载到VRAM中的模型层数。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>将多少模型层加载到VRAM中。如果GPT4All在加载此模型时耗尽VRAM，请减少此值。\n        较低的值会增加CPU负载和RAM使用率，并使推理速度变慢。\n        注意：在重新加载模型之前不会生效。</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>无模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>安装模型并开始使用</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ 添加模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>查看增加到模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>已安装的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>本地安装的聊天</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>模型文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>待下载的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>文件描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>继续</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>停止/重启/开始下载</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>删除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>从系统中删除模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>安装</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>安装在线模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;错误&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推荐用于您的硬件。模型需要的内存（%1 GB）超过了您系统的可用内存（%2）&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>错误：$API_KEY 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>错误：$BASE_URL 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>输入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>错误：$MODEL_NAME为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>输入：$MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation>？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>描述下载时发生的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>硬件不兼容的错误</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>显示下载进度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>下载速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>下载速度  b/kb/mb /s</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>计算中...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>是否正在计算文件哈希</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translation>繁忙程度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>在计算文件哈希时显示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>输入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>文件大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>需要 RAM</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>参数</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>类型</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>精选链接</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>样式化链接</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>請選擇一個文件</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>請選擇目錄</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation>清除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation>重置</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation>恢复初始化？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation>该页面的设置项将重置为默认值。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>恢复初始化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>将设置对话框恢复为默认状态</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>向GPT4All开源数据湖贡献数据</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>通过启用此功能，您将能够通过为未来的模型改进贡献数据来参与训练大型语言模型的民主过程。\n\n        当 GPT4All 模型回复您并且您已选择加入时，您的对话将被发送到 GPT4All 开源数据湖。此外，您可以喜欢/不喜欢它的回复。如果您不喜欢某个回复，您可以建议其他回复。这些数据将在 GPT4All 数据湖中收集和汇总。\n        \n        注意：通过启用此功能，您将把数据发送到 GPT4All 开源数据湖。启用此功能后，您不应该期望聊天隐私。但是，如果您愿意，您应该期望可选的归因。您的聊天数据将公开供任何人下载，并将被 Nomic AI 用于改进未来的 GPT4All 模型。Nomic AI 将保留与您的数据相关的所有归因信息，并且您将被视为使用您的数据的任何 GPT4All 模型发布的贡献者！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>选择加入的条款</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>描述选择加入时会发生的情况</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>填写名称属性 (可选)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>属性 (可选)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>提供属性</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>启用</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>启用选择加入</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>取消加入</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>新版本可选</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>更新到新版本</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>显示一个短暂的帮助气球</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translation>繁忙程度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>在弹出窗口显示忙碌时显示</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\">API 密钥</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\">错误：$API_KEY 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">输入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">是否正在计算文件哈希</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\">基础 URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\">错误：$BASE_URL 为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">输入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\">模型名称</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\">错误：$MODEL_NAME为空</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">输入：$MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\">模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">安装</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\">安装远程模型</translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation>设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>包含各种应用程序设置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>应用</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>欢迎！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### 发布日志\n%1&lt;br/&gt;\n### 贡献者\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>发布日志</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>本版本发布日志</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### 选择加入匿名使用分析和数据湖\n        通过启用这些功能，您将能够通过为未来的模型改进贡献数据来参与训练大型语言模型的民主过程。\n        当 GPT4All 模型回复您并且您已选择加入时，您的对话将被发送到 GPT4All\n        开源数据湖。此外，您可以喜欢/不喜欢它的回复。如果您不喜欢某个回复，您可以建议其他回复。这些数据将在 GPT4All 数据湖中收集和汇总。\n        注意：通过启用此功能，您将把您的数据发送到 GPT4All 开源数据湖。\n        启用此功能后，您不应该期望聊天隐私。但是，如果您愿意，您应该期望可选的归因。您的聊天数据将公开供任何人下载，并将由 Nomic AI 用于改进未来的 GPT4All 模型。 Nomic AI 将保留与您的数据相关的所有\n        归因信息，并且您将被视为使用您的数据的任何 GPT4All\n        模型发布的贡献者！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>选择加入选项</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>描述选择加入时会发生的情况</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation>选择加入匿名使用分析，以帮助改进GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>允许选择加入匿名使用统计数据</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>是</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>允许选择加入匿名使用统计数据</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>否</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>退出匿名使用统计数据</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>允许选择退出匿名使用统计数据</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>选择匿名共享聊天记录到GPT4All数据池</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>选择加入网络</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>允许选择加入网络</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>允许选择加入匿名共享聊天至 GPT4All 数据湖</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>选择退出网络</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>允许选择退出将聊天匿名共享至 GPT4All 数据池</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>请编辑下方文本以提供更好的回复。（可选）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>提供更好回答...</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>提交</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>提交用户响应</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>关闭的对话</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation>GPT4All v%1</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation>恢复</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation>退出</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;启动时遇到错误：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;“检测到不兼容的硬件。”&lt;/i&gt;&lt;br&gt;&lt;br&gt;很遗憾，您的 CPU 不满足运行此程序的最低要求。特别是，它不支持此程序成功运行现代大型语言模型所需的 AVX 内在函数。目前唯一的解决方案是将您的硬件升级到更现代的 CPU。&lt;br&gt;&lt;br&gt;有关更多信息，请参阅此处：&lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&gt;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;启动时遇到错误：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;“无法访问设置文件。”&lt;/i&gt;&lt;br&gt;&lt;br&gt;不幸的是，某些东西阻止程序访问设置文件。这可能是由于设置文件所在的本地应用程序配置目录中的权限不正确造成的。请查看我们的&lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord 频道&lt;/a&gt; 以获取帮助。</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>链接数据湖失败</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>保存对话</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>网络对话</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>选择加入以共享反馈/对话</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>主页</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>主页</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>主页</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>对话视图</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>聊天视图可与模型互动</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>对话</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>已安装模型的页面</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation>本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>LocalDocs视图可配置和使用本地文档</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>设置</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>设置页面</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>数据湖已开启</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>使用联网模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>服务器模式已开</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>安装模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>查看已安装模型</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-chat/translations/gpt4all_zh_TW.ts",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE TS>\n<TS version=\"2.1\" language=\"zh_TW\">\n<context>\n    <name>AddCollectionView</name>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"45\"/>\n        <source>← Existing Collections</source>\n        <translation>← 現有收藏</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"68\"/>\n        <source>Add Document Collection</source>\n        <translation>新增收藏文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"78\"/>\n        <source>Add a folder containing plain text files, PDFs, or Markdown. Configure additional extensions in Settings.</source>\n        <translation>新增一個含有純文字檔案、PDF 與 Markdown 文件的資料夾。可在設定上增加文件副檔名。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"99\"/>\n        <source>Name</source>\n        <translation>名稱</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"114\"/>\n        <source>Collection name...</source>\n        <translation>收藏名稱......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"116\"/>\n        <source>Name of the collection to add (Required)</source>\n        <translation>新增的收藏名稱（必填）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"132\"/>\n        <source>Folder</source>\n        <translation>資料夾</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"149\"/>\n        <source>Folder path...</source>\n        <translation>資料夾路徑......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"152\"/>\n        <source>Folder path to documents (Required)</source>\n        <translation>文件所屬的資料夾路徑（必填）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"164\"/>\n        <source>Browse</source>\n        <translation>瀏覽</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddCollectionView.qml\" line=\"177\"/>\n        <source>Create Collection</source>\n        <translation>建立收藏</translation>\n    </message>\n</context>\n<context>\n    <name>AddGPT4AllModelView</name>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"31\"/>\n        <source>These models have been specifically configured for use in GPT4All. The first few models on the list are known to work the best, but you should only attempt to use models that will fit in your available memory.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"45\"/>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"unfinished\">網路錯誤：無法取得 %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"55\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"343\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\">忙線指示器</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"56\"/>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"unfinished\">當模型請求正在進行時顯示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"65\"/>\n        <source>All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"85\"/>\n        <source>Reasoning</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"142\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"143\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">即將下載的模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"166\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"167\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">檔案描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">恢復</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"200\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">下載</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"208\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">停止/重啟/開始下載</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"220\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">移除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"227\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\">從檔案系統移除模型</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"obsolete\">安裝</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"obsolete\">安裝線上模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"240\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;錯誤&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"246\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\">解釋下載時發生的錯誤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"259\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推薦在您的硬體上運作。模型需要比較多的記憶體（%1 GB），但您的系統記憶體空間不足（%2）。&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"265\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\">錯誤，不相容的硬體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"303\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\">下載進度條</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"304\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">顯示下載進度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"314\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">下載速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"315\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">下載速度每秒 bytes/kilobytes/megabytes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"332\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">計算中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"336\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">是否正在計算檔案雜湊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"344\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\">計算檔案雜湊值時顯示</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"obsolete\">錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"obsolete\">請輸入 $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"obsolete\">錯誤：$BASE_URL 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"obsolete\">請輸入 $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"obsolete\">錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"obsolete\">請輸入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"364\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">檔案大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"386\"/>\n        <source>RAM required</source>\n        <translation type=\"unfinished\">所需的記憶體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <source>%1 GB</source>\n        <translation type=\"unfinished\">%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"391\"/>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"413\"/>\n        <source>?</source>\n        <translation type=\"unfinished\">？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"408\"/>\n        <source>Parameters</source>\n        <translation type=\"unfinished\">參數</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"430\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddGPT4AllModelView.qml\" line=\"452\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">類型</translation>\n    </message>\n</context>\n<context>\n    <name>AddHFModelView</name>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"32\"/>\n        <source>Use the search to find and download models from HuggingFace. There is NO GUARANTEE that these will work. Many will require additional configuration before they can be used.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"52\"/>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"unfinished\">透過關鍵字搜尋探索並下載模型......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"55\"/>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"unfinished\">用於探索與過濾可下載模型的文字字段</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"61\"/>\n        <source>Searching · %1</source>\n        <translation type=\"unfinished\">搜尋 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"131\"/>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"unfinished\">探索與過濾模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"132\"/>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"unfinished\">觸發探索與過濾模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"151\"/>\n        <source>Default</source>\n        <translation type=\"unfinished\">預設</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"152\"/>\n        <source>Likes</source>\n        <translation type=\"unfinished\">讚</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"153\"/>\n        <source>Downloads</source>\n        <translation type=\"unfinished\">下載次數</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"154\"/>\n        <source>Recent</source>\n        <translation type=\"unfinished\">最新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"162\"/>\n        <source>Sort by: %1</source>\n        <translation type=\"unfinished\">排序依據：%1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"176\"/>\n        <source>Asc</source>\n        <translation type=\"unfinished\">升序</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"177\"/>\n        <source>Desc</source>\n        <translation type=\"unfinished\">降序</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"190\"/>\n        <source>Sort dir: %1</source>\n        <translation type=\"unfinished\">排序順序：%1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"212\"/>\n        <source>None</source>\n        <translation type=\"unfinished\">無</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"234\"/>\n        <source>Limit: %1</source>\n        <translation type=\"unfinished\">上限：%1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"297\"/>\n        <source>Model file</source>\n        <translation type=\"unfinished\">模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"298\"/>\n        <source>Model file to be downloaded</source>\n        <translation type=\"unfinished\">即將下載的模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"321\"/>\n        <source>Description</source>\n        <translation type=\"unfinished\">描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"322\"/>\n        <source>File description</source>\n        <translation type=\"unfinished\">檔案描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Resume</source>\n        <translation type=\"unfinished\">恢復</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"355\"/>\n        <source>Download</source>\n        <translation type=\"unfinished\">下載</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"363\"/>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"unfinished\">停止/重啟/開始下載</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"375\"/>\n        <source>Remove</source>\n        <translation type=\"unfinished\">移除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"382\"/>\n        <source>Remove model from filesystem</source>\n        <translation type=\"unfinished\">從檔案系統移除模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"396\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"430\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">安裝</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"431\"/>\n        <source>Install online model</source>\n        <translation type=\"unfinished\">安裝線上模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"441\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;錯誤&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"447\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"unfinished\">解釋下載時發生的錯誤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"460\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"unfinished\">&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推薦在您的硬體上運作。模型需要比較多的記憶體（%1 GB），但您的系統記憶體空間不足（%2）。&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"466\"/>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"unfinished\">錯誤，不相容的硬體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"504\"/>\n        <source>Download progressBar</source>\n        <translation type=\"unfinished\">下載進度條</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"505\"/>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"unfinished\">顯示下載進度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"515\"/>\n        <source>Download speed</source>\n        <translation type=\"unfinished\">下載速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"516\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"unfinished\">下載速度每秒 bytes/kilobytes/megabytes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"533\"/>\n        <source>Calculating...</source>\n        <translation type=\"unfinished\">計算中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"537\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"567\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"588\"/>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"609\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">是否正在計算檔案雜湊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"544\"/>\n        <source>Busy indicator</source>\n        <translation type=\"unfinished\">忙線指示器</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"545\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"unfinished\">計算檔案雜湊值時顯示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"558\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\">錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"564\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">請輸入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"579\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\">錯誤：$BASE_URL 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"585\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">請輸入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"600\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\">錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"606\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">請輸入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"628\"/>\n        <source>File size</source>\n        <translation type=\"unfinished\">檔案大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"650\"/>\n        <source>Quant</source>\n        <translation type=\"unfinished\">量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddHFModelView.qml\" line=\"672\"/>\n        <source>Type</source>\n        <translation type=\"unfinished\">類型</translation>\n    </message>\n</context>\n<context>\n    <name>AddModelView</name>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"55\"/>\n        <source>← Existing Models</source>\n        <translation>← 現有模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"75\"/>\n        <source>Explore Models</source>\n        <translation>探索模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"86\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"93\"/>\n        <source>Remote Providers</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddModelView.qml\" line=\"100\"/>\n        <source>HuggingFace</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <source>Discover and download models by keyword search...</source>\n        <translation type=\"vanished\">透過關鍵字搜尋探索並下載模型......</translation>\n    </message>\n    <message>\n        <source>Text field for discovering and filtering downloadable models</source>\n        <translation type=\"vanished\">用於探索與過濾可下載模型的文字字段</translation>\n    </message>\n    <message>\n        <source>Searching · %1</source>\n        <translation type=\"vanished\">搜尋 · %1</translation>\n    </message>\n    <message>\n        <source>Initiate model discovery and filtering</source>\n        <translation type=\"vanished\">探索與過濾模型</translation>\n    </message>\n    <message>\n        <source>Triggers discovery and filtering of models</source>\n        <translation type=\"vanished\">觸發探索與過濾模型</translation>\n    </message>\n    <message>\n        <source>Default</source>\n        <translation type=\"vanished\">預設</translation>\n    </message>\n    <message>\n        <source>Likes</source>\n        <translation type=\"vanished\">讚</translation>\n    </message>\n    <message>\n        <source>Downloads</source>\n        <translation type=\"vanished\">下載次數</translation>\n    </message>\n    <message>\n        <source>Recent</source>\n        <translation type=\"vanished\">最新</translation>\n    </message>\n    <message>\n        <source>Sort by: %1</source>\n        <translation type=\"vanished\">排序依據：%1</translation>\n    </message>\n    <message>\n        <source>Asc</source>\n        <translation type=\"vanished\">升序</translation>\n    </message>\n    <message>\n        <source>Desc</source>\n        <translation type=\"vanished\">降序</translation>\n    </message>\n    <message>\n        <source>Sort dir: %1</source>\n        <translation type=\"vanished\">排序順序：%1</translation>\n    </message>\n    <message>\n        <source>None</source>\n        <translation type=\"vanished\">無</translation>\n    </message>\n    <message>\n        <source>Limit: %1</source>\n        <translation type=\"vanished\">上限：%1</translation>\n    </message>\n    <message>\n        <source>Network error: could not retrieve %1</source>\n        <translation type=\"vanished\">網路錯誤：無法取得 %1</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;錯誤&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation type=\"vanished\">&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推薦在您的硬體上運作。模型需要比較多的記憶體（%1 GB），但您的系統記憶體空間不足（%2）。&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <source>%1 GB</source>\n        <translation type=\"vanished\">%1 GB</translation>\n    </message>\n    <message>\n        <source>?</source>\n        <translation type=\"vanished\">？</translation>\n    </message>\n    <message>\n        <source>Busy indicator</source>\n        <translatorcomment>參考自 https://terms.naer.edu.tw</translatorcomment>\n        <translation type=\"vanished\">忙線指示器</translation>\n    </message>\n    <message>\n        <source>Displayed when the models request is ongoing</source>\n        <translation type=\"vanished\">當模型請求正在進行時顯示</translation>\n    </message>\n    <message>\n        <source>Model file</source>\n        <translation type=\"vanished\">模型檔案</translation>\n    </message>\n    <message>\n        <source>Model file to be downloaded</source>\n        <translation type=\"vanished\">即將下載的模型檔案</translation>\n    </message>\n    <message>\n        <source>Description</source>\n        <translation type=\"vanished\">描述</translation>\n    </message>\n    <message>\n        <source>File description</source>\n        <translation type=\"vanished\">檔案描述</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">取消</translation>\n    </message>\n    <message>\n        <source>Resume</source>\n        <translation type=\"vanished\">恢復</translation>\n    </message>\n    <message>\n        <source>Download</source>\n        <translation type=\"vanished\">下載</translation>\n    </message>\n    <message>\n        <source>Stop/restart/start the download</source>\n        <translation type=\"vanished\">停止/重啟/開始下載</translation>\n    </message>\n    <message>\n        <source>Remove</source>\n        <translation type=\"vanished\">移除</translation>\n    </message>\n    <message>\n        <source>Remove model from filesystem</source>\n        <translation type=\"vanished\">從檔案系統移除模型</translation>\n    </message>\n    <message>\n        <source>Install</source>\n        <translation type=\"vanished\">安裝</translation>\n    </message>\n    <message>\n        <source>Install online model</source>\n        <translation type=\"vanished\">安裝線上模型</translation>\n    </message>\n    <message>\n        <source>Describes an error that occurred when downloading</source>\n        <translation type=\"vanished\">解釋下載時發生的錯誤</translation>\n    </message>\n    <message>\n        <source>Error for incompatible hardware</source>\n        <translation type=\"vanished\">錯誤，不相容的硬體</translation>\n    </message>\n    <message>\n        <source>Download progressBar</source>\n        <translation type=\"vanished\">下載進度條</translation>\n    </message>\n    <message>\n        <source>Shows the progress made in the download</source>\n        <translation type=\"vanished\">顯示下載進度</translation>\n    </message>\n    <message>\n        <source>Download speed</source>\n        <translation type=\"vanished\">下載速度</translation>\n    </message>\n    <message>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation type=\"vanished\">下載速度每秒 bytes/kilobytes/megabytes</translation>\n    </message>\n    <message>\n        <source>Calculating...</source>\n        <translation type=\"vanished\">計算中......</translation>\n    </message>\n    <message>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"vanished\">是否正在計算檔案雜湊</translation>\n    </message>\n    <message>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation type=\"vanished\">計算檔案雜湊值時顯示</translation>\n    </message>\n    <message>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"vanished\">錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $API_KEY</source>\n        <translation type=\"vanished\">請輸入 $API_KEY</translation>\n    </message>\n    <message>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"vanished\">錯誤：$BASE_URL 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $BASE_URL</source>\n        <translation type=\"vanished\">請輸入 $BASE_URL</translation>\n    </message>\n    <message>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"vanished\">錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"vanished\">請輸入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <source>File size</source>\n        <translation type=\"vanished\">檔案大小</translation>\n    </message>\n    <message>\n        <source>RAM required</source>\n        <translation type=\"vanished\">所需的記憶體</translation>\n    </message>\n    <message>\n        <source>Parameters</source>\n        <translation type=\"vanished\">參數</translation>\n    </message>\n    <message>\n        <source>Quant</source>\n        <translation type=\"vanished\">量化</translation>\n    </message>\n    <message>\n        <source>Type</source>\n        <translation type=\"vanished\">類型</translation>\n    </message>\n</context>\n<context>\n    <name>AddRemoteModelView</name>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"31\"/>\n        <source>Various remote model providers that use network resources for inference.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"55\"/>\n        <source>Groq</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"57\"/>\n        <source>Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://console.groq.com/keys&quot;&gt;https://groq.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"79\"/>\n        <source>OpenAI</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"81\"/>\n        <source>OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://platform.openai.com/signup&quot;&gt;https://openai.com/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"96\"/>\n        <source>Mistral</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"98\"/>\n        <source>Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.&lt;br&gt;&lt;br&gt;Get your API key: &lt;a href=&quot;https://mistral.ai/&quot;&gt;https://mistral.ai/&lt;/a&gt;</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"141\"/>\n        <source>Custom</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/AddRemoteModelView.qml\" line=\"143\"/>\n        <source>The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ApplicationSettings</name>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"16\"/>\n        <source>Application</source>\n        <translation>應用程式</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"25\"/>\n        <source>Network dialog</source>\n        <translation>資料湖泊計畫對話視窗</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"26\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>分享回饋/對話計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"48\"/>\n        <source>Error dialog</source>\n        <translation>錯誤對話視窗</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"72\"/>\n        <source>Application Settings</source>\n        <translation>應用程式設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"85\"/>\n        <source>General</source>\n        <translation>一般</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"97\"/>\n        <source>Theme</source>\n        <translation>主題</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"98\"/>\n        <source>The application color scheme.</source>\n        <translation>應用程式的配色方案。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"113\"/>\n        <source>Dark</source>\n        <translation>暗色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"112\"/>\n        <source>Light</source>\n        <translation>亮色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"114\"/>\n        <source>LegacyDark</source>\n        <translation>傳統暗色</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"136\"/>\n        <source>Font Size</source>\n        <translation>字體大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"137\"/>\n        <source>The size of text in the application.</source>\n        <translation>應用程式中的字體大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"151\"/>\n        <source>Small</source>\n        <translation>小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"152\"/>\n        <source>Medium</source>\n        <translation>中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"153\"/>\n        <source>Large</source>\n        <translation>大</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"176\"/>\n        <source>Language and Locale</source>\n        <translation>語言與區域設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"177\"/>\n        <source>The language and locale you wish to use.</source>\n        <translation>您希望使用的語言與區域設定。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"196\"/>\n        <source>System Locale</source>\n        <translation>系統語系</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"223\"/>\n        <source>Device</source>\n        <translation>裝置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"275\"/>\n        <source>Default Model</source>\n        <translation>預設模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"276\"/>\n        <source>The preferred model for new chats. Also used as the local server fallback.</source>\n        <translation>用於新交談的預設模型。也用於作為本機伺服器後援使用。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"339\"/>\n        <source>Suggestion Mode</source>\n        <translation>建議模式</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"353\"/>\n        <source>When chatting with LocalDocs</source>\n        <translation>當使用「我的文件」交談時</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"354\"/>\n        <source>Whenever possible</source>\n        <translation>視情況允許</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"355\"/>\n        <source>Never</source>\n        <translation>永不</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"491\"/>\n        <source>Enable System Tray</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"492\"/>\n        <source>The application will minimize to the system tray when the window is closed.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"508\"/>\n        <source>Enable Local API Server</source>\n        <translation>啟用本機 API 伺服器</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"340\"/>\n        <source>Generate suggested follow-up questions at the end of responses.</source>\n        <translation>在回覆末尾生成後續建議的問題。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"39\"/>\n        <source>ERROR: Update system could not find the MaintenanceTool used to check for updates!&lt;br/&gt;&lt;br/&gt;Did you install this application using the online installer? If so, the MaintenanceTool executable should be located one directory above where this application resides on your filesystem.&lt;br/&gt;&lt;br/&gt;If you can&apos;t start it manually, then I&apos;m afraid you&apos;ll have to reinstall.</source>\n        <translation>錯誤：更新系統找不到可使用的維護工具來檢查更新！&lt;br&gt;&lt;br&gt;您是否使用了線上安裝程式安裝了本應用程式？若是如此，維護工具的執行檔（MaintenanceTool）應位於安裝資料夾中。&lt;br&gt;&lt;br&gt;請試著手動開啟它。&lt;br&gt;&lt;br&gt;如果您無法順利啟動，您可能得重新安裝本應用程式。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"224\"/>\n        <source>The compute device used for text generation.</source>\n        <translation>用於生成文字的計算裝置。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"242\"/>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"297\"/>\n        <source>Application default</source>\n        <translation>應用程式預設值</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"368\"/>\n        <source>Download Path</source>\n        <translation>下載路徑</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"369\"/>\n        <source>Where to store local models and the LocalDocs database.</source>\n        <translation>儲存本機模型與「我的文件」資料庫的位置。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"401\"/>\n        <source>Browse</source>\n        <translation>瀏覽</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"402\"/>\n        <source>Choose where to save model files</source>\n        <translation>選擇儲存模型檔案的位置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"413\"/>\n        <source>Enable Datalake</source>\n        <translation>啟用資料湖泊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"414\"/>\n        <source>Send chats and feedback to the GPT4All Open-Source Datalake.</source>\n        <translation>將交談與回饋傳送到 GPT4All 開放原始碼資料湖泊。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"447\"/>\n        <source>Advanced</source>\n        <translation>進階</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"459\"/>\n        <source>CPU Threads</source>\n        <translation>中央處理器線程</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"460\"/>\n        <source>The number of CPU threads used for inference and embedding.</source>\n        <translation>用於推理與嵌入的中央處理器線程數。</translation>\n    </message>\n    <message>\n        <source>Save Chat Context</source>\n        <translation type=\"vanished\">儲存交談語境</translation>\n    </message>\n    <message>\n        <source>Save the chat model&apos;s state to disk for faster loading. WARNING: Uses ~2GB per chat.</source>\n        <translation type=\"vanished\">將交談模型的狀態儲存到磁碟以加快載入速度。警告：每次交談使用約 2GB。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"509\"/>\n        <source>Expose an OpenAI-Compatible server to localhost. WARNING: Results in increased resource usage.</source>\n        <translation>將 OpenAI 相容伺服器公開給本機。警告：導致資源使用增加。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"525\"/>\n        <source>API Server Port</source>\n        <translation>API 伺服器埠口</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"526\"/>\n        <source>The port to use for the local server. Requires restart.</source>\n        <translation>用於本機伺服器的埠口。需要重新啟動。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"578\"/>\n        <source>Check For Updates</source>\n        <translation>檢查更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"579\"/>\n        <source>Manually check for an update to GPT4All.</source>\n        <translation>手動檢查 GPT4All 的更新。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ApplicationSettings.qml\" line=\"588\"/>\n        <source>Updates</source>\n        <translation>更新</translation>\n    </message>\n</context>\n<context>\n    <name>Chat</name>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"33\"/>\n        <location filename=\"../src/chat.h\" line=\"84\"/>\n        <source>New Chat</source>\n        <translation>新的交談</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chat.cpp\" line=\"46\"/>\n        <source>Server Chat</source>\n        <translation>伺服器交談</translation>\n    </message>\n</context>\n<context>\n    <name>ChatAPIWorker</name>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"263\"/>\n        <source>ERROR: Network error occurred while connecting to the API server</source>\n        <translation>錯誤：網路錯誤，無法連線到目標 API 伺服器</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatapi.cpp\" line=\"276\"/>\n        <source>ChatAPIWorker::handleFinished got HTTP Error %1 %2</source>\n        <translation>ChatAPIWorker::handleFinished 遇到一個 HTTP 錯誤 %1 %2</translation>\n    </message>\n</context>\n<context>\n    <name>ChatCollapsibleItem</name>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"37\"/>\n        <source>Analysis encountered error</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Thinking</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"39\"/>\n        <source>Analyzing</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"41\"/>\n        <source>Thought for %1 %2</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>second</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"43\"/>\n        <source>seconds</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatCollapsibleItem.qml\" line=\"44\"/>\n        <source>Analyzed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatDrawer</name>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"37\"/>\n        <source>Drawer</source>\n        <translation>側邊欄</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"38\"/>\n        <source>Main navigation drawer</source>\n        <translation>主要導航側邊欄</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"49\"/>\n        <source>＋ New Chat</source>\n        <translation>＋ 新的交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"50\"/>\n        <source>Create a new chat</source>\n        <translation>建立新的交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"199\"/>\n        <source>Select the current chat or edit the chat when in edit mode</source>\n        <translation>選擇目前交談或在編輯模式下編輯交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"216\"/>\n        <source>Edit chat name</source>\n        <translation>修改對話名稱</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"229\"/>\n        <source>Save chat name</source>\n        <translation>儲存對話名稱</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"246\"/>\n        <source>Delete chat</source>\n        <translation>刪除對話</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"283\"/>\n        <source>Confirm chat deletion</source>\n        <translation>確定刪除對話</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"305\"/>\n        <source>Cancel chat deletion</source>\n        <translation>取消刪除對話</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"317\"/>\n        <source>List of chats</source>\n        <translation>交談列表</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatDrawer.qml\" line=\"318\"/>\n        <source>List of chats in the drawer dialog</source>\n        <translation>側邊欄對話視窗的交談列表</translation>\n    </message>\n</context>\n<context>\n    <name>ChatItemView</name>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"83\"/>\n        <source>GPT4All</source>\n        <translation type=\"unfinished\">GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"84\"/>\n        <source>You</source>\n        <translation type=\"unfinished\">您</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"107\"/>\n        <source>response stopped ...</source>\n        <translation type=\"unfinished\">回覆停止......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"108\"/>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"unfinished\">檢索本機文件中：%1 ......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"109\"/>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"unfinished\">搜尋本機文件中：%1 ......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"110\"/>\n        <source>processing ...</source>\n        <translation type=\"unfinished\">處理中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"111\"/>\n        <source>generating response ...</source>\n        <translation type=\"unfinished\">生成回覆......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"112\"/>\n        <source>generating questions ...</source>\n        <translation type=\"unfinished\">生成問題......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"113\"/>\n        <source>generating toolcall ...</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"545\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">複製</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"obsolete\">複製訊息</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"obsolete\">停用 Markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"obsolete\">啟用 Markdown</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/ChatItemView.qml\" line=\"283\"/>\n        <source>%n Source(s)</source>\n        <translation type=\"unfinished\">\n            <numerusform>%n 來源</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"430\"/>\n        <source>LocalDocs</source>\n        <translation type=\"unfinished\">我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"460\"/>\n        <source>Edit this message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"461\"/>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"472\"/>\n        <source>All following messages will be permanently erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"471\"/>\n        <source>Redo this response?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"495\"/>\n        <source>Cannot edit chat without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"497\"/>\n        <source>Cannot edit chat while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"506\"/>\n        <source>Edit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"517\"/>\n        <source>Cannot redo response without a loaded model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"519\"/>\n        <source>Cannot redo response while the model is generating.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"528\"/>\n        <source>Redo</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"565\"/>\n        <source>Like response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"594\"/>\n        <source>Dislike response</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatItemView.qml\" line=\"657\"/>\n        <source>Suggested follow-ups</source>\n        <translation type=\"unfinished\">後續建議</translation>\n    </message>\n</context>\n<context>\n    <name>ChatLLM</name>\n    <message>\n        <location filename=\"../src/chatllm.cpp\" line=\"1047\"/>\n        <source>Your message was too long and could not be processed (%1 &gt; %2). Please try again with something shorter.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>ChatListModel</name>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"94\"/>\n        <source>TODAY</source>\n        <translation>今天</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"96\"/>\n        <source>THIS WEEK</source>\n        <translation>這星期</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"98\"/>\n        <source>THIS MONTH</source>\n        <translation>這個月</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"100\"/>\n        <source>LAST SIX MONTHS</source>\n        <translation>前六個月</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"102\"/>\n        <source>THIS YEAR</source>\n        <translation>今年</translation>\n    </message>\n    <message>\n        <location filename=\"../src/chatlistmodel.h\" line=\"104\"/>\n        <source>LAST YEAR</source>\n        <translation>去年</translation>\n    </message>\n</context>\n<context>\n    <name>ChatTextItem</name>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"67\"/>\n        <source>Copy</source>\n        <translation type=\"unfinished\">複製</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"73\"/>\n        <source>Copy Message</source>\n        <translation type=\"unfinished\">複製訊息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Disable markdown</source>\n        <translation type=\"unfinished\">停用 Markdown</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatTextItem.qml\" line=\"83\"/>\n        <source>Enable markdown</source>\n        <translation type=\"unfinished\">啟用 Markdown</translation>\n    </message>\n</context>\n<context>\n    <name>ChatView</name>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;Warning&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</source>\n        <translation>&lt;h3&gt;警告&lt;/h3&gt;&lt;p&gt;%1&lt;/p&gt;</translation>\n    </message>\n    <message>\n        <source>Switch model dialog</source>\n        <translation type=\"vanished\">切換模型對話視窗</translation>\n    </message>\n    <message>\n        <source>Warn the user if they switch models, then context will be erased</source>\n        <translation type=\"vanished\">警告使用者如果切換模型，則語境將被刪除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"99\"/>\n        <source>Conversation copied to clipboard.</source>\n        <translation>對話已複製到剪貼簿。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"106\"/>\n        <source>Code copied to clipboard.</source>\n        <translation>程式碼已複製到剪貼簿。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"113\"/>\n        <source>The entire chat will be erased.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"248\"/>\n        <source>Chat panel</source>\n        <translation>交談面板</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"249\"/>\n        <source>Chat panel with options</source>\n        <translation>具有選項的交談面板</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"356\"/>\n        <source>Reload the currently loaded model</source>\n        <translation>重新載入目前已載入的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"370\"/>\n        <source>Eject the currently loaded model</source>\n        <translation>彈出目前載入的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"382\"/>\n        <source>No model installed.</source>\n        <translation>沒有已安裝的模型。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"384\"/>\n        <source>Model loading error.</source>\n        <translation>模型載入時發生錯誤。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"386\"/>\n        <source>Waiting for model...</source>\n        <translation>等待模型中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"388\"/>\n        <source>Switching context...</source>\n        <translation>切換語境中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"390\"/>\n        <source>Choose a model...</source>\n        <translation>選擇一個模型......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"392\"/>\n        <source>Not found: %1</source>\n        <translation>不存在：%1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"394\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"1005\"/>\n        <source>Reload · %1</source>\n        <translation>重新載入 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"396\"/>\n        <source>Loading · %1</source>\n        <translation>載入中 · %1</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"732\"/>\n        <source>Load · %1 (default) →</source>\n        <translation>載入 · %1 (預設) →</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1095\"/>\n        <source>Legacy prompt template needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1099\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1102\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1105\"/>\n        <source>Legacy system prompt needs to be &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;updated&lt;/a&gt; in Settings.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"480\"/>\n        <source>The top item is the current model</source>\n        <translation>最上面的那項是目前使用的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"92\"/>\n        <location filename=\"../qml/ChatView.qml\" line=\"112\"/>\n        <source>Erase conversation?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"93\"/>\n        <source>Changing the model will erase the current conversation.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"566\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"584\"/>\n        <source>Add documents</source>\n        <translation>新增文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"585\"/>\n        <source>add collections of documents to the chat</source>\n        <translation>將文件集合新增至交談中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"756\"/>\n        <source>Load the default model</source>\n        <translation>載入預設模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"757\"/>\n        <source>Loads the default model which can be changed in settings</source>\n        <translation>預設模型可於設定中變更</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"768\"/>\n        <source>No Model Installed</source>\n        <translation>沒有已安裝的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"777\"/>\n        <source>GPT4All requires that you install at least one\nmodel to get started</source>\n        <translation>GPT4All 要求您至少安裝一個\n模型開始</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"789\"/>\n        <source>Install a Model</source>\n        <translation>安裝一個模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"794\"/>\n        <source>Shows the add model view</source>\n        <translation>顯示新增模型視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"819\"/>\n        <source>Conversation with the model</source>\n        <translation>與模型對話</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"820\"/>\n        <source>prompt / response pairs from the conversation</source>\n        <translation>對話中的提示詞 / 回覆組合</translation>\n    </message>\n    <message>\n        <source>GPT4All</source>\n        <translation type=\"vanished\">GPT4All</translation>\n    </message>\n    <message>\n        <source>You</source>\n        <translation type=\"vanished\">您</translation>\n    </message>\n    <message>\n        <source>response stopped ...</source>\n        <translation type=\"vanished\">回覆停止......</translation>\n    </message>\n    <message>\n        <source>retrieving localdocs: %1 ...</source>\n        <translation type=\"vanished\">檢索本機文件中：%1 ......</translation>\n    </message>\n    <message>\n        <source>searching localdocs: %1 ...</source>\n        <translation type=\"vanished\">搜尋本機文件中：%1 ......</translation>\n    </message>\n    <message>\n        <source>processing ...</source>\n        <translation type=\"vanished\">處理中......</translation>\n    </message>\n    <message>\n        <source>generating response ...</source>\n        <translation type=\"vanished\">生成回覆......</translation>\n    </message>\n    <message>\n        <source>generating questions ...</source>\n        <translation type=\"vanished\">生成問題......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1293\"/>\n        <source>Copy</source>\n        <translation>複製</translation>\n    </message>\n    <message>\n        <source>Copy Message</source>\n        <translation type=\"vanished\">複製訊息</translation>\n    </message>\n    <message>\n        <source>Disable markdown</source>\n        <translation type=\"vanished\">停用 Markdown</translation>\n    </message>\n    <message>\n        <source>Enable markdown</source>\n        <translation type=\"vanished\">啟用 Markdown</translation>\n    </message>\n    <message>\n        <source>Thumbs up</source>\n        <translation type=\"vanished\">讚</translation>\n    </message>\n    <message>\n        <source>Gives a thumbs up to the response</source>\n        <translation type=\"vanished\">對這則回覆比讚</translation>\n    </message>\n    <message>\n        <source>Thumbs down</source>\n        <translation type=\"vanished\">倒讚</translation>\n    </message>\n    <message>\n        <source>Opens thumbs down dialog</source>\n        <translation type=\"vanished\">開啟倒讚對話視窗</translation>\n    </message>\n    <message>\n        <source>Suggested follow-ups</source>\n        <translation type=\"vanished\">後續建議</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"924\"/>\n        <source>Erase and reset chat session</source>\n        <translation>刪除並重置交談會話</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"942\"/>\n        <source>Copy chat session to clipboard</source>\n        <translation>複製交談會議到剪貼簿</translation>\n    </message>\n    <message>\n        <source>Redo last chat response</source>\n        <translation type=\"vanished\">復原上一個交談回覆</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1216\"/>\n        <source>Add media</source>\n        <translation>附加媒體文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1217\"/>\n        <source>Adds media to the prompt</source>\n        <translation>附加媒體文件到提示詞</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1351\"/>\n        <source>Stop generating</source>\n        <translation>停止生成</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1352\"/>\n        <source>Stop the current response generation</source>\n        <translation>停止當前回覆生成</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1396\"/>\n        <source>Attach</source>\n        <translation>附加</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1398\"/>\n        <source>Single File</source>\n        <translation>單一文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1007\"/>\n        <source>Reloads the model</source>\n        <translation>重新載入模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"66\"/>\n        <source>&lt;h3&gt;Encountered an error loading model:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Model loading failures can happen for a variety of reasons, but the most common causes include a bad file format, an incomplete or corrupted download, the wrong file type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:&lt;br&gt;&lt;ul&gt;&lt;li&gt;Ensure the model file has a compatible format and type&lt;li&gt;Check the model file is complete in the download folder&lt;li&gt;You can find the download folder in the settings dialog&lt;li&gt;If you&apos;ve sideloaded the model ensure the file is not corrupt by checking md5sum&lt;li&gt;Read more about what models are supported in our &lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;documentation&lt;/a&gt; for the gui&lt;li&gt;Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help</source>\n        <translation>&lt;h3&gt;載入模型時發生錯誤：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;%1&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;導致模型載入失敗的原因可能有很多種，但絕大多數的原因是檔案格式損毀、下載的檔案不完整、檔案類型錯誤、系統RAM空間不足或不相容的模型類型。這裡有些建議可供疑難排解：&lt;br&gt;&lt;ul&gt;&lt;li&gt;確保使用的模型是相容的格式與類型&lt;li&gt;檢查位於下載資料夾的檔案是否完整&lt;li&gt;您可以從設定中找到您所設定的「下載資料夾路徑」&lt;li&gt;如果您有側載模型，請利用 md5sum 等工具確保您的檔案是完整的&lt;li&gt;想了解更多關於我們所支援的模型資訊，煩請詳閱&lt;a href=&quot;https://docs.gpt4all.io/&quot;&gt;本文件&lt;/a&gt;。&lt;li&gt;歡迎洽詢我們的 &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;Discord 伺服器&lt;/a&gt; 以尋求幫助</translation>\n    </message>\n    <message>\n        <source>restoring from text ...</source>\n        <translation type=\"vanished\">從文字中恢復......</translation>\n    </message>\n    <message numerus=\"yes\">\n        <source>%n Source(s)</source>\n        <translation type=\"vanished\">\n            <numerusform>%n 來源</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Send a message...</source>\n        <translation>傳送一則訊息......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1243\"/>\n        <source>Load a model to continue...</source>\n        <translation>載入模型以繼續......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1246\"/>\n        <source>Send messages/prompts to the model</source>\n        <translation>向模型傳送訊息/提示詞</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1287\"/>\n        <source>Cut</source>\n        <translation>剪下</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1299\"/>\n        <source>Paste</source>\n        <translation>貼上</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1303\"/>\n        <source>Select All</source>\n        <translation>全選</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1374\"/>\n        <source>Send message</source>\n        <translation>傳送訊息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ChatView.qml\" line=\"1375\"/>\n        <source>Sends the message/prompt contained in textfield to the model</source>\n        <translation>將文字欄位中包含的訊息/提示詞傳送到模型</translation>\n    </message>\n</context>\n<context>\n    <name>CodeInterpreter</name>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"79\"/>\n        <source>Code Interpreter</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../src/codeinterpreter.h\" line=\"80\"/>\n        <source>compute javascript code using console.log as output</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>CollectionsDrawer</name>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"70\"/>\n        <source>Warning: searching collections while indexing can return incomplete results</source>\n        <translation>警告：在索引時搜尋收藏可能會傳回不完整的結果</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n 個檔案</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"87\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n 個字</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"103\"/>\n        <source>Updating</source>\n        <translation>更新中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"128\"/>\n        <source>＋ Add Docs</source>\n        <translation>＋ 新增文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/CollectionsDrawer.qml\" line=\"137\"/>\n        <source>Select a collection to make it available to the chat model.</source>\n        <translation>選擇一個收藏以使其可供交談模型使用。</translation>\n    </message>\n</context>\n<context>\n    <name>ConfirmationDialog</name>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"42\"/>\n        <source>OK</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ConfirmationDialog.qml\" line=\"49\"/>\n        <source>Cancel</source>\n        <translation type=\"unfinished\">取消</translation>\n    </message>\n</context>\n<context>\n    <name>Download</name>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"278\"/>\n        <source>Model &quot;%1&quot; is installed successfully.</source>\n        <translation>模型「%1」已安裝成功。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"288\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"294\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"300\"/>\n        <source>ERROR: $BASE_URL is invalid.</source>\n        <translation>錯誤：$BASE_URL 無效。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"306\"/>\n        <source>ERROR: Model &quot;%1 (%2)&quot; is conflict.</source>\n        <translation>錯誤：模型「%1 （%2）」發生衝突。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"325\"/>\n        <source>Model &quot;%1 (%2)&quot; is installed successfully.</source>\n        <translation>模型「%1（%2）」已安裝成功。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/download.cpp\" line=\"349\"/>\n        <source>Model &quot;%1&quot; is removed.</source>\n        <translation>模型「%1」已移除。</translation>\n    </message>\n</context>\n<context>\n    <name>HomeView</name>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"49\"/>\n        <source>Welcome to GPT4All</source>\n        <translation>歡迎使用 GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"56\"/>\n        <source>The privacy-first LLM chat application</source>\n        <translation>隱私第一的大型語言模型交談應用程式</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"66\"/>\n        <source>Start chatting</source>\n        <translation>開始交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"81\"/>\n        <source>Start Chatting</source>\n        <translation>開始交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"82\"/>\n        <source>Chat with any LLM</source>\n        <translation>與任何大型語言模型交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"92\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"93\"/>\n        <source>Chat with your local files</source>\n        <translation>使用「我的文件」來交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"103\"/>\n        <source>Find Models</source>\n        <translation>搜尋模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"104\"/>\n        <source>Explore and download models</source>\n        <translation>瀏覽與下載模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"190\"/>\n        <source>Latest news</source>\n        <translation>最新消息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"191\"/>\n        <source>Latest news from GPT4All</source>\n        <translation>從 GPT4All 來的最新消息</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"222\"/>\n        <source>Release Notes</source>\n        <translation>版本資訊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"228\"/>\n        <source>Documentation</source>\n        <translation>文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"234\"/>\n        <source>Discord</source>\n        <translation>Discord</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"240\"/>\n        <source>X (Twitter)</source>\n        <translation>X (Twitter)</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"246\"/>\n        <source>Github</source>\n        <translation>Github</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"257\"/>\n        <source>nomic.ai</source>\n        <translation>nomic.ai</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/HomeView.qml\" line=\"282\"/>\n        <source>Subscribe to Newsletter</source>\n        <translation>訂閱電子報</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsSettings</name>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"19\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"29\"/>\n        <source>LocalDocs Settings</source>\n        <translation>我的文件設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"38\"/>\n        <source>Indexing</source>\n        <translation>索引</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"51\"/>\n        <source>Allowed File Extensions</source>\n        <translation>允許的副檔名</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"52\"/>\n        <source>Comma-separated list. LocalDocs will only attempt to process files with these extensions.</source>\n        <translation>以逗號分隔的列表。「我的文件」將僅嘗試處理具有這些副檔名的檔案。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"100\"/>\n        <source>Embedding</source>\n        <translation>嵌入</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"112\"/>\n        <source>Use Nomic Embed API</source>\n        <translation>使用 Nomic 嵌入 API</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"113\"/>\n        <source>Embed documents using the fast Nomic API instead of a private local model. Requires restart.</source>\n        <translation>使用快速的 Nomic API 而不是本機私有模型嵌入文件。需要重新啟動。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"130\"/>\n        <source>Nomic API Key</source>\n        <translation>Nomic API 金鑰</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"131\"/>\n        <source>API key to use for Nomic Embed. Get one from the Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API keys page&lt;/a&gt;. Requires restart.</source>\n        <translation>用於 Nomic Embed 的 API 金鑰。從 Atlas &lt;a href=&quot;https://atlas.nomic.ai/cli-login&quot;&gt;API 金鑰頁面&lt;/a&gt;取得一個。需要重新啟動。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"165\"/>\n        <source>Embeddings Device</source>\n        <translation>嵌入裝置</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"166\"/>\n        <source>The compute device used for embeddings. Requires restart.</source>\n        <translation>用於嵌入的計算裝置。需要重新啟動。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"176\"/>\n        <source>Application default</source>\n        <translation>應用程式預設值</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"211\"/>\n        <source>Display</source>\n        <translation>顯示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"224\"/>\n        <source>Show Sources</source>\n        <translation>查看來源</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"225\"/>\n        <source>Display the sources used for each response.</source>\n        <translation>顯示每則回覆所使用的來源。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"242\"/>\n        <source>Advanced</source>\n        <translation>進階</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"258\"/>\n        <source>Warning: Advanced usage only.</source>\n        <translation>警告：僅限進階使用。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"259\"/>\n        <source>Values too large may cause localdocs failure, extremely slow responses or failure to respond at all. Roughly speaking, the {N chars x N snippets} are added to the model&apos;s context window. More info &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;here&lt;/a&gt;.</source>\n        <translation>設定太大的數值可能會導致「我的文件」處理失敗、反應速度極慢或根本無法回覆。簡單地說，這會將 {N 個字元 x N 個片段} 被添加到模型的語境視窗中。更多資訊&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/localdocs.html&quot;&gt;此處&lt;/a&gt;。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"267\"/>\n        <source>Document snippet size (characters)</source>\n        <translation>文件片段大小（字元）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"268\"/>\n        <source>Number of characters per document snippet. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>每個文件片段的字元數。較大的數字會增加實際反應的可能性，但也會導致生成速度變慢。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"293\"/>\n        <source>Max document snippets per prompt</source>\n        <translation>每個提示詞的最大文件片段</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsSettings.qml\" line=\"294\"/>\n        <source>Max best N matches of retrieved document snippets to add to the context for prompt. Larger numbers increase likelihood of factual responses, but also result in slower generation.</source>\n        <translation>新增至提示詞語境中的檢索到的文件片段的最大 N 個符合的項目。較大的數字會增加實際反應的可能性，但也會導致生成速度變慢。</translation>\n    </message>\n</context>\n<context>\n    <name>LocalDocsView</name>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"52\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"58\"/>\n        <source>Chat with your local files</source>\n        <translation>使用「我的文件」來交談</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"71\"/>\n        <source>＋ Add Collection</source>\n        <translation>＋ 新增收藏</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"85\"/>\n        <source>&lt;h3&gt;ERROR: The LocalDocs database cannot be accessed or is not valid.&lt;/h3&gt;&lt;br&gt;&lt;i&gt;Note: You will need to restart after trying any of the following suggested fixes.&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Make sure that the folder set as &lt;b&gt;Download Path&lt;/b&gt; exists on the file system.&lt;/li&gt;&lt;li&gt;Check ownership as well as read and write permissions of the &lt;b&gt;Download Path&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;If there is a &lt;b&gt;localdocs_v2.db&lt;/b&gt; file, check its ownership and read/write permissions, too.&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;If the problem persists and there are any &apos;localdocs_v*.db&apos; files present, as a last resort you can&lt;br&gt;try backing them up and removing them. You will have to recreate your collections, however.</source>\n        <translation>&lt;h3&gt;錯誤：「我的文件」資料庫已無法存取或已損壞。&lt;/h3&gt;&lt;br&gt;&lt;i&gt;提醒：執行完以下任何疑難排解的動作後，請務必重新啟動應用程式。&lt;/i&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;請確保&lt;b&gt;「下載路徑」&lt;/b&gt;所指向的資料夾確實存在於檔案系統當中。&lt;/li&gt;&lt;li&gt;檢查 &lt;b&gt;「下載路徑」&lt;/b&gt;所指向的資料夾，確保其「擁有者」為您本身，以及確保您對該資料夾擁有讀寫權限。&lt;/li&gt;&lt;li&gt;如果該資料夾內存在一份名為 &lt;b&gt;localdocs_v2.db&lt;/b&gt; 的檔案，請同時確保您對其擁有讀寫權限。&lt;/li&gt;&lt;/ul&gt;&lt;br&gt;如果問題依舊存在，且該資料夾內存在與「localdocs_v*.db」名稱相關的檔案，請嘗試備份並移除它們。&lt;br&gt;雖然這樣一來，您恐怕得著手重建您的收藏，但這將或許能夠解決這份錯誤。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"109\"/>\n        <source>No Collections Installed</source>\n        <translation>沒有已安裝的收藏</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"118\"/>\n        <source>Install a collection of local documents to get started using this feature</source>\n        <translation>安裝本機文件收藏以開始使用此功能</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"129\"/>\n        <source>＋ Add Doc Collection</source>\n        <translation>＋ 新增文件收藏</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"134\"/>\n        <source>Shows the add model view</source>\n        <translation>查看新增的模型視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"231\"/>\n        <source>Indexing progressBar</source>\n        <translation>索引進度條</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"232\"/>\n        <source>Shows the progress made in the indexing</source>\n        <translation>顯示索引進度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"257\"/>\n        <source>ERROR</source>\n        <translation>錯誤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"261\"/>\n        <source>INDEXING</source>\n        <translation>索引中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"265\"/>\n        <source>EMBEDDING</source>\n        <translation>嵌入中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"268\"/>\n        <source>REQUIRES UPDATE</source>\n        <translation>必須更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"271\"/>\n        <source>READY</source>\n        <translation>已就緒</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"273\"/>\n        <source>INSTALLING</source>\n        <translation>安裝中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"300\"/>\n        <source>Indexing in progress</source>\n        <translation>正在索引</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"303\"/>\n        <source>Embedding in progress</source>\n        <translation>正在嵌入</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"306\"/>\n        <source>This collection requires an update after version change</source>\n        <translation>該收藏需要在版本變更後更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"309\"/>\n        <source>Automatically reindexes upon changes to the folder</source>\n        <translation>若資料夾有變動，會自動重新索引</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"311\"/>\n        <source>Installation in progress</source>\n        <translation>正在安裝中</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"325\"/>\n        <source>%</source>\n        <translation>%</translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n file(s)</source>\n        <translation>\n            <numerusform>%n 個檔案</numerusform>\n        </translation>\n    </message>\n    <message numerus=\"yes\">\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"337\"/>\n        <source>%n word(s)</source>\n        <translation>\n            <numerusform>%n 個字</numerusform>\n        </translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"408\"/>\n        <source>Remove</source>\n        <translation>移除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"420\"/>\n        <source>Rebuild</source>\n        <translation>重建</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"423\"/>\n        <source>Reindex this folder from scratch. This is slow and usually not needed.</source>\n        <translation>重新索引該資料夾。這將會耗費許多時間並且通常不太需要這樣做。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"430\"/>\n        <source>Update</source>\n        <translation>更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/LocalDocsView.qml\" line=\"433\"/>\n        <source>Update the collection to the new version. This is a slow operation.</source>\n        <translation>更新收藏。這將會耗費許多時間。</translation>\n    </message>\n</context>\n<context>\n    <name>ModelList</name>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1344\"/>\n        <location filename=\"../src/modellist.cpp\" line=\"1395\"/>\n        <source>cannot open &quot;%1&quot;: %2</source>\n        <translation>無法開啟“%1”：%2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1356\"/>\n        <source>cannot create &quot;%1&quot;: %2</source>\n        <translation>無法建立“%1”：%2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1406\"/>\n        <source>%1 (%2)</source>\n        <translation>%1（%2）</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1407\"/>\n        <source>&lt;strong&gt;OpenAI-Compatible API Model&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API Key: %1&lt;/li&gt;&lt;li&gt;Base URL: %2&lt;/li&gt;&lt;li&gt;Model Name: %3&lt;/li&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;OpenAI API 相容模型&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;API 金鑰：%1&lt;/li&gt;&lt;li&gt;基底 URL：%2&lt;/li&gt;&lt;li&gt;模型名稱：%3&lt;/li&gt;&lt;/ul&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1716\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal OpenAI API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to OpenAI!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with OpenAI&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;here.&lt;/a&gt;&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;需要個人的 OpenAI API 金鑰。&lt;/li&gt;&lt;li&gt;警告：這將會傳送您的交談紀錄到 OpenAI&lt;/li&gt;&lt;li&gt;您的 API 金鑰將被儲存在硬碟上&lt;/li&gt;&lt;li&gt;它只被用於與 OpenAI 進行通訊&lt;/li&gt;&lt;li&gt;您可以在&lt;a href=&quot;https://platform.openai.com/account/api-keys&quot;&gt;此處&lt;/a&gt;申請一個 API 金鑰。&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1735\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;OpenAI 的 ChatGPT 模型 GPT-3.5 Turbo&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1749\"/>\n        <source>&lt;br&gt;&lt;br&gt;&lt;i&gt;* Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info.</source>\n        <translation>&lt;br&gt;&lt;br&gt;&lt;i&gt;* 即使您已向 OpenAI 付費購買了 ChatGPT 的 GPT-4 模型使用權，但這也不能保證您能擁有 API 金鑰的使用權限。請聯繫 OpenAI 以查閱更多資訊。</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1764\"/>\n        <source>&lt;strong&gt;OpenAI&apos;s ChatGPT model GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</source>\n        <translation>&lt;strong&gt;OpenAI 的 ChatGPT 模型 GPT-4&lt;/strong&gt;&lt;br&gt; %1 %2</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1777\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal Mistral API key.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to Mistral!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with Mistral&lt;/li&gt;&lt;li&gt;You can apply for an API key &lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;here&lt;/a&gt;.&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;需要個人的 Mistral API 金鑰。&lt;/li&gt;&lt;li&gt;警告：這將會傳送您的交談紀錄到 Mistral！&lt;/li&gt;&lt;li&gt;您的 API 金鑰將被儲存在硬碟上&lt;/li&gt;&lt;li&gt;它只被用於與 Mistral 進行通訊&lt;/li&gt;&lt;li&gt;您可以在&lt;a href=&quot;https://console.mistral.ai/user/api-keys&quot;&gt;此處&lt;/a&gt;申請一個 API 金鑰。&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1796\"/>\n        <source>&lt;strong&gt;Mistral Tiny model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral 迷你模型&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1822\"/>\n        <source>&lt;strong&gt;Mistral Small model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral 小型模型&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1849\"/>\n        <source>&lt;strong&gt;Mistral Medium model&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;Mistral 中型模型&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1862\"/>\n        <source>&lt;ul&gt;&lt;li&gt;Requires personal API key and the API base URL.&lt;/li&gt;&lt;li&gt;WARNING: Will send your chats to the OpenAI-compatible API Server you specified!&lt;/li&gt;&lt;li&gt;Your API key will be stored on disk&lt;/li&gt;&lt;li&gt;Will only be used to communicate with the OpenAI-compatible API Server&lt;/li&gt;</source>\n        <translation>&lt;ul&gt;&lt;li&gt;需要個人的 API 金鑰和 API 的基底 URL（Base URL）。&lt;/li&gt;&lt;li&gt;警告：這將會傳送您的交談紀錄到您所指定的 OpenAI API 相容伺服器&lt;/li&gt;&lt;li&gt;您的 API 金鑰將被儲存在硬碟上&lt;/li&gt;&lt;li&gt;它只被用於與其 OpenAI API 相容伺服器進行通訊&lt;/li&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"1879\"/>\n        <source>&lt;strong&gt;Connect to OpenAI-compatible API server&lt;/strong&gt;&lt;br&gt; %1</source>\n        <translation>&lt;strong&gt;連線到 OpenAI API 相容伺服器&lt;/strong&gt;&lt;br&gt; %1</translation>\n    </message>\n    <message>\n        <location filename=\"../src/modellist.cpp\" line=\"2303\"/>\n        <source>&lt;strong&gt;Created by %1.&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;Published on %2.&lt;li&gt;This model has %3 likes.&lt;li&gt;This model has %4 downloads.&lt;li&gt;More info can be found &lt;a href=&quot;https://huggingface.co/%5&quot;&gt;here.&lt;/a&gt;&lt;/ul&gt;</source>\n        <translation>&lt;strong&gt;模型作者：%1&lt;/strong&gt;&lt;br&gt;&lt;ul&gt;&lt;li&gt;發佈日期：%2&lt;li&gt;累積讚數：%3 個讚&lt;li&gt;下載次數：%4 次&lt;li&gt;更多資訊請查閱&lt;a href=&quot;https://huggingface.co/%5&quot;&gt;此處&lt;/a&gt;。&lt;/ul&gt;</translation>\n    </message>\n</context>\n<context>\n    <name>ModelSettings</name>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"14\"/>\n        <source>Model</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <source>%1 system message?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"20\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>The system message will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <source>removed</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"21\"/>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>reset to the default</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"30\"/>\n        <source>%1 chat template?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>The chat template will be %1.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"31\"/>\n        <source>erased</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"57\"/>\n        <source>Model Settings</source>\n        <translation>模型設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"108\"/>\n        <source>Clone</source>\n        <translation>複製</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"118\"/>\n        <source>Remove</source>\n        <translation>移除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"132\"/>\n        <source>Name</source>\n        <translation>名稱</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"165\"/>\n        <source>Model File</source>\n        <translation>模型檔案</translation>\n    </message>\n    <message>\n        <source>System Prompt</source>\n        <translation type=\"vanished\">系統提示詞</translation>\n    </message>\n    <message>\n        <source>Prefixed at the beginning of every conversation. Must contain the appropriate framing tokens.</source>\n        <translation type=\"vanished\">在每個對話的開頭加上前綴。必須包含適當的構建符元（framing tokens）。</translation>\n    </message>\n    <message>\n        <source>Prompt Template</source>\n        <translation type=\"vanished\">提示詞模板</translation>\n    </message>\n    <message>\n        <source>The template that wraps every prompt.</source>\n        <translation type=\"vanished\">包裝每個提示詞的模板。</translation>\n    </message>\n    <message>\n        <source>Must contain the string &quot;%1&quot; to be replaced with the user&apos;s input.</source>\n        <translation type=\"vanished\">必須包含要替換為使用者輸入的字串「%1」。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"190\"/>\n        <source>System Message</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"191\"/>\n        <source>A message to set the context or guide the behavior of the model. Leave blank for none. NOTE: Since GPT4All 3.5, this should not contain control tokens.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"218\"/>\n        <source>System message is not &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;plain text&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"288\"/>\n        <source>Chat Template</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"289\"/>\n        <source>This Jinja template turns the chat into input for the model.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"371\"/>\n        <source>No &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; configured.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"375\"/>\n        <source>The &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;chat template&lt;/a&gt; cannot be blank.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"379\"/>\n        <source>&lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Syntax error&lt;/a&gt;: %1</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"383\"/>\n        <source>Chat template is not in &lt;a href=&quot;https://docs.gpt4all.io/gpt4all_desktop/chat_templates.html&quot;&gt;Jinja format&lt;/a&gt;.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"409\"/>\n        <source>Chat Name Prompt</source>\n        <translation>交談名稱提示詞</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"410\"/>\n        <source>Prompt used to automatically generate chat names.</source>\n        <translation>用於自動生成交談名稱的提示詞。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"452\"/>\n        <source>Suggested FollowUp Prompt</source>\n        <translation>後續建議提示詞</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"453\"/>\n        <source>Prompt used to generate suggested follow-up questions.</source>\n        <translation>用於生成後續建議問題的提示詞。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"506\"/>\n        <source>Context Length</source>\n        <translation>語境長度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"507\"/>\n        <source>Number of input and output tokens the model sees.</source>\n        <translation>模型看見的輸入與輸出的符元數量。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"528\"/>\n        <source>Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>資訊遺失前最大的提示詞/回覆符元組合。（Context Length）\n若語境比模型訓練時所使用的語境還要長，將會生成較差的結果。\n注意：重新載入模型後才會生效。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"566\"/>\n        <source>Temperature</source>\n        <translation>語境溫度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"567\"/>\n        <source>Randomness of model output. Higher -&gt; more variation.</source>\n        <translation>模型輸出的隨機性。更高 -&gt; 更多變化。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"578\"/>\n        <source>Temperature increases the chances of choosing less likely tokens.\nNOTE: Higher temperature gives more creative but less predictable outputs.</source>\n        <translation>語境溫度會提高選擇不容易出現的符元機率。（Temperature）\n注意：較高的語境溫度會生成更多創意，但輸出的可預測性會相對較差。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"612\"/>\n        <source>Top-P</source>\n        <translation>核心採樣</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"613\"/>\n        <source>Nucleus Sampling factor. Lower -&gt; more predictable.</source>\n        <translation>核心採樣因子。更低 -&gt; 更可預測。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"623\"/>\n        <source>Only the most likely tokens up to a total probability of top_p can be chosen.\nNOTE: Prevents choosing highly unlikely tokens.</source>\n        <translation>只選擇總機率約為核心採樣，最有可能性的符元。（Top-P）\n注意：用於避免選擇不容易出現的符元。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"657\"/>\n        <source>Min-P</source>\n        <translation>最小符元機率</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"658\"/>\n        <source>Minimum token probability. Higher -&gt; more predictable.</source>\n        <translation>最小符元機率。更高 -&gt; 更可預測。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"668\"/>\n        <source>Sets the minimum relative probability for a token to be considered.</source>\n        <translation>設定要考慮的符元的最小相對機率。（Min-P）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"704\"/>\n        <source>Top-K</source>\n        <translation>高頻率採樣機率</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"705\"/>\n        <source>Size of selection pool for tokens.</source>\n        <translation>符元選擇池的大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"716\"/>\n        <source>Only the top K most likely tokens will be chosen from.</source>\n        <translation>只選擇前 K 個最有可能性的符元。（Top-K）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"751\"/>\n        <source>Max Length</source>\n        <translation>最大長度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"752\"/>\n        <source>Maximum response length, in tokens.</source>\n        <translation>最大響應長度（以符元為單位）。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"797\"/>\n        <source>Prompt Batch Size</source>\n        <translation>提示詞批次大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"798\"/>\n        <source>The batch size used for prompt processing.</source>\n        <translation>用於即時處理的批量大小。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"809\"/>\n        <source>Amount of prompt tokens to process at once.\nNOTE: Higher values can speed up reading prompts but will use more RAM.</source>\n        <translation>一次處理的提示詞符元數量。（Prompt Batch Size）\n注意：較高的值可以加快讀取提示詞的速度，但會使用比較多的記憶體。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"844\"/>\n        <source>Repeat Penalty</source>\n        <translation>重複處罰</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"845\"/>\n        <source>Repetition penalty factor. Set to 1 to disable.</source>\n        <translation>重複懲罰因子。設定為 1 以停用。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"889\"/>\n        <source>Repeat Penalty Tokens</source>\n        <translation>重複懲罰符元</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"890\"/>\n        <source>Number of previous tokens used for penalty.</source>\n        <translation>之前用於懲罰的符元數量。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"935\"/>\n        <source>GPU Layers</source>\n        <translation>圖形處理器負載層</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"936\"/>\n        <source>Number of model layers to load into VRAM.</source>\n        <translation>要載入到顯示記憶體中的模型層數。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelSettings.qml\" line=\"947\"/>\n        <source>How many model layers to load into VRAM. Decrease this if GPT4All runs out of VRAM while loading this model.\nLower values increase CPU load and RAM usage, and make inference slower.\nNOTE: Does not take effect until you reload the model.</source>\n        <translation>要載入到顯示記憶體中的模型層數。如果 GPT4All 在載入此模型時耗盡顯示記憶體，請減少此值。\n較低的值會增加中央處理器負載與主顯示記憶體使用量，並使推理速度變慢。\n注意：重新載入模型後才會生效。</translation>\n    </message>\n</context>\n<context>\n    <name>ModelsView</name>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"40\"/>\n        <source>No Models Installed</source>\n        <translation>沒有已安裝的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"49\"/>\n        <source>Install a model to get started using GPT4All</source>\n        <translation>安裝模型以開始使用 GPT4All</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"60\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"102\"/>\n        <source>＋ Add Model</source>\n        <translation>＋ 新增模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"65\"/>\n        <source>Shows the add model view</source>\n        <translation>顯示新增模型視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"83\"/>\n        <source>Installed Models</source>\n        <translation>已安裝的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"89\"/>\n        <source>Locally installed chat models</source>\n        <translation>本機已安裝的交談模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"147\"/>\n        <source>Model file</source>\n        <translation>模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"148\"/>\n        <source>Model file to be downloaded</source>\n        <translation>即將下載的模型檔案</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"170\"/>\n        <source>Description</source>\n        <translation>描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"171\"/>\n        <source>File description</source>\n        <translation>檔案描述</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"196\"/>\n        <source>Resume</source>\n        <translation>恢復</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"204\"/>\n        <source>Stop/restart/start the download</source>\n        <translation>停止/重啟/開始下載</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"216\"/>\n        <source>Remove</source>\n        <translation>移除</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"223\"/>\n        <source>Remove model from filesystem</source>\n        <translation>從檔案系統移除模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"237\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"271\"/>\n        <source>Install</source>\n        <translation>安裝</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"272\"/>\n        <source>Install online model</source>\n        <translation>安裝線上模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"282\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;Error&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;1&quot;&gt;&lt;a href=&quot;#error&quot;&gt;錯誤&lt;/a&gt;&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"301\"/>\n        <source>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;WARNING: Not recommended for your hardware. Model requires more memory (%1 GB) than your system has available (%2).&lt;/strong&gt;&lt;/font&gt;</source>\n        <translation>&lt;strong&gt;&lt;font size=&quot;2&quot;&gt;警告：不推薦在您的硬體上運作。模型需要比較多的記憶體（%1 GB），但您的系統記憶體空間不足（%2）。&lt;/strong&gt;&lt;/font&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>%1 GB</source>\n        <translation>%1 GB</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"496\"/>\n        <source>?</source>\n        <translation>？</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"288\"/>\n        <source>Describes an error that occurred when downloading</source>\n        <translation>解釋下載時發生的錯誤</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"307\"/>\n        <source>Error for incompatible hardware</source>\n        <translation>錯誤，不相容的硬體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"345\"/>\n        <source>Download progressBar</source>\n        <translation>下載進度條</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"346\"/>\n        <source>Shows the progress made in the download</source>\n        <translation>顯示下載進度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"356\"/>\n        <source>Download speed</source>\n        <translation>下載速度</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"357\"/>\n        <source>Download speed in bytes/kilobytes/megabytes per second</source>\n        <translation>下載速度每秒 bytes/kilobytes/megabytes</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"374\"/>\n        <source>Calculating...</source>\n        <translation>計算中......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"378\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"408\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"429\"/>\n        <location filename=\"../qml/ModelsView.qml\" line=\"450\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation>是否正在計算檔案雜湊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"385\"/>\n        <source>Busy indicator</source>\n        <translatorcomment>參考自 https://terms.naer.edu.tw</translatorcomment>\n        <translation>忙線指示器</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"386\"/>\n        <source>Displayed when the file hash is being calculated</source>\n        <translation>計算檔案雜湊值時顯示</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"399\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation>錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"405\"/>\n        <source>enter $API_KEY</source>\n        <translation>請輸入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"420\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation>錯誤：$BASE_URL 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"426\"/>\n        <source>enter $BASE_URL</source>\n        <translation>請輸入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"441\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation>錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"447\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation>請輸入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"469\"/>\n        <source>File size</source>\n        <translation>檔案大小</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"491\"/>\n        <source>RAM required</source>\n        <translation>所需的記憶體</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"513\"/>\n        <source>Parameters</source>\n        <translation>參數</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"535\"/>\n        <source>Quant</source>\n        <translation>量化</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ModelsView.qml\" line=\"557\"/>\n        <source>Type</source>\n        <translation>類型</translation>\n    </message>\n</context>\n<context>\n    <name>MyFancyLink</name>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"42\"/>\n        <source>Fancy link</source>\n        <translation>精緻網址</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MyFancyLink.qml\" line=\"43\"/>\n        <source>A stylized link</source>\n        <translation>個性化網址</translation>\n    </message>\n</context>\n<context>\n    <name>MyFileDialog</name>\n    <message>\n        <location filename=\"../qml/MyFileDialog.qml\" line=\"7\"/>\n        <source>Please choose a file</source>\n        <translation>請選擇一個文件</translation>\n    </message>\n</context>\n<context>\n    <name>MyFolderDialog</name>\n    <message>\n        <location filename=\"../qml/MyFolderDialog.qml\" line=\"7\"/>\n        <source>Please choose a directory</source>\n        <translation>請選擇一個資料夾</translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsLabel</name>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Clear</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsLabel.qml\" line=\"53\"/>\n        <source>Reset</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>MySettingsTab</name>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"24\"/>\n        <source>Restore defaults?</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"25\"/>\n        <source>This page of settings will be reset to the defaults.</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"69\"/>\n        <source>Restore Defaults</source>\n        <translation>恢復預設值</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/MySettingsTab.qml\" line=\"73\"/>\n        <source>Restores settings dialog to a default state</source>\n        <translation>恢復設定對話視窗到預設狀態</translation>\n    </message>\n</context>\n<context>\n    <name>NetworkDialog</name>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"39\"/>\n        <source>Contribute data to the GPT4All Opensource Datalake.</source>\n        <translation>貢獻資料到 GPT4All 的開放原始碼資料湖泊。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"55\"/>\n        <source>By enabling this feature, you will be able to participate in the democratic process of training a large language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All Open Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you can suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake. You should have no expectation of chat privacy when this feature is enabled. You should; however, have an expectation of an optional attribution if you wish. Your chat data will be openly available for anyone to download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all attribution information attached to your data and you will be credited as a contributor to any GPT4All model release that uses your data!</source>\n        <translation>啟用這項功能後，您將能夠參與訓練大型語言模型的民主化進程，通過貢獻資料來改進未來的模型。\n\n當 GPT4All 模型回覆您並且您已選擇加入時，您的對話將被傳送到 GPT4All 開放原始碼資料湖泊。\n此外，您可以對其回覆表示讚或倒讚。如果您倒讚了某則回覆，您可以提出更好的回覆。\n這些資料將被收集並彙總到 GPT4All 資料湖泊中。\n\n注意：啟用此功能後，您的資料將被傳送到 GPT4All 開放原始碼資料湖泊。\n啟用此功能時，您將會失去對話的隱私權；然而，您可以選擇是否附上署名。\n您的對話資料將可被任何人開放下載，並將由 Nomic AI 用於改進未來的 GPT4All 模型。\nNomic AI 將保留附加在您的資料上的所有署名訊息，並且您將被認可為任何使用您的資料的 GPT4All 模型版本的貢獻者！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"70\"/>\n        <source>Terms for opt-in</source>\n        <translation>計畫規範</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"71\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>解釋當您加入計畫後，會發生什麼事情</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"79\"/>\n        <source>Please provide a name for attribution (optional)</source>\n        <translation>請提供署名（非必填）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"81\"/>\n        <source>Attribution (optional)</source>\n        <translation>署名（非必填）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"82\"/>\n        <source>Provide attribution</source>\n        <translation>提供署名</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"95\"/>\n        <source>Enable</source>\n        <translation>啟用</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"96\"/>\n        <source>Enable opt-in</source>\n        <translation>加入計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"100\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NetworkDialog.qml\" line=\"101\"/>\n        <source>Cancel opt-in</source>\n        <translation>拒絕計畫</translation>\n    </message>\n</context>\n<context>\n    <name>NewVersionDialog</name>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"34\"/>\n        <source>New version is available</source>\n        <translation>發現新版本</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"46\"/>\n        <source>Update</source>\n        <translation>更新</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/NewVersionDialog.qml\" line=\"48\"/>\n        <source>Update to new version</source>\n        <translation>更新版本</translation>\n    </message>\n</context>\n<context>\n    <name>PopupDialog</name>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"38\"/>\n        <source>Reveals a shortlived help balloon</source>\n        <translation>呼叫提示小幫手</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"48\"/>\n        <source>Busy indicator</source>\n        <translatorcomment>參考自 https://terms.naer.edu.tw</translatorcomment>\n        <translation>忙線指示器</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/PopupDialog.qml\" line=\"49\"/>\n        <source>Displayed when the popup is showing busy</source>\n        <translation>當彈出視窗忙碌時顯示</translation>\n    </message>\n</context>\n<context>\n    <name>RemoteModelCard</name>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"92\"/>\n        <source>API Key</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"104\"/>\n        <source>ERROR: $API_KEY is empty.</source>\n        <translation type=\"unfinished\">錯誤：$API_KEY 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"117\"/>\n        <source>enter $API_KEY</source>\n        <translation type=\"unfinished\">請輸入 $API_KEY</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"120\"/>\n        <source>Whether the file hash is being calculated</source>\n        <translation type=\"unfinished\">是否正在計算檔案雜湊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"127\"/>\n        <source>Base Url</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"138\"/>\n        <source>ERROR: $BASE_URL is empty.</source>\n        <translation type=\"unfinished\">錯誤：$BASE_URL 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"144\"/>\n        <source>enter $BASE_URL</source>\n        <translation type=\"unfinished\">請輸入 $BASE_URL</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"152\"/>\n        <source>Model Name</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"163\"/>\n        <source>ERROR: $MODEL_NAME is empty.</source>\n        <translation type=\"unfinished\">錯誤：$MODEL_NAME 未填寫。</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"169\"/>\n        <source>enter $MODEL_NAME</source>\n        <translation type=\"unfinished\">請輸入 $MODEL_NAME</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"179\"/>\n        <source>Models</source>\n        <translation type=\"unfinished\">模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"199\"/>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"217\"/>\n        <source>Install</source>\n        <translation type=\"unfinished\">安裝</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/RemoteModelCard.qml\" line=\"218\"/>\n        <source>Install remote model</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n</context>\n<context>\n    <name>SettingsView</name>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"22\"/>\n        <location filename=\"../qml/SettingsView.qml\" line=\"61\"/>\n        <source>Settings</source>\n        <translation>設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"23\"/>\n        <source>Contains various application settings</source>\n        <translation>內含多種應用程式設定</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"29\"/>\n        <source>Application</source>\n        <translation>應用程式</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"32\"/>\n        <source>Model</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/SettingsView.qml\" line=\"35\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n</context>\n<context>\n    <name>StartupDialog</name>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"50\"/>\n        <source>Welcome!</source>\n        <translation>歡迎使用！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"71\"/>\n        <source>Release notes</source>\n        <translation>版本資訊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"72\"/>\n        <source>Release notes for this version</source>\n        <translation>這個版本的版本資訊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"87\"/>\n        <source>### Opt-ins for anonymous usage analytics and datalake\nBy enabling these features, you will be able to participate in the democratic process of training a\nlarge language model by contributing data for future model improvements.\n\nWhen a GPT4All model responds to you and you have opted-in, your conversation will be sent to the GPT4All\nOpen Source Datalake. Additionally, you can like/dislike its response. If you dislike a response, you\ncan suggest an alternative response. This data will be collected and aggregated in the GPT4All Datalake.\n\nNOTE: By turning on this feature, you will be sending your data to the GPT4All Open Source Datalake.\nYou should have no expectation of chat privacy when this feature is enabled. You should; however, have\nan expectation of an optional attribution if you wish. Your chat data will be openly available for anyone\nto download and will be used by Nomic AI to improve future GPT4All models. Nomic AI will retain all\nattribution information attached to your data and you will be credited as a contributor to any GPT4All\nmodel release that uses your data!</source>\n        <translation>### 匿名使用統計暨資料湖泊計畫\n啟用這些功能後，您將能夠參與訓練大型語言模型的民主化進程，通過貢獻資料來改進未來的模型。\n\n當 GPT4All 模型回覆您並且您已選擇加入時，您的對話將被傳送到 GPT4All 開放原始碼資料湖泊。\n此外，您可以對其回覆表示讚或倒讚。如果您倒讚了某則回覆，您可以提出更好的回覆。\n這些資料將被收集並彙總到 GPT4All 資料湖泊中。\n\n注意：啟用此功能後，您的資料將被傳送到 GPT4All 開放原始碼資料湖泊。\n啟用此功能時，您將會失去對話的隱私權；然而，您可以選擇是否附上署名。\n您的對話資料將可被任何人開放下載，並將由 Nomic AI 用於改進未來的 GPT4All 模型。\nNomic AI 將保留附加在您的資料上的所有署名訊息，並且您將被認可為任何使用您的資料的 GPT4All 模型版本的貢獻者！</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"106\"/>\n        <source>Terms for opt-in</source>\n        <translation>計畫規範</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"107\"/>\n        <source>Describes what will happen when you opt-in</source>\n        <translation>解釋當您加入計畫後，會發生什麼事情</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"118\"/>\n        <source>Opt-in to anonymous usage analytics used to improve GPT4All</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"147\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"262\"/>\n        <source>Yes</source>\n        <translation>是</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"189\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"304\"/>\n        <source>No</source>\n        <translation>否</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"124\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"150\"/>\n        <source>Opt-in for anonymous usage statistics</source>\n        <translation>匿名使用統計計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"67\"/>\n        <source>### Release Notes\n%1&lt;br/&gt;\n### Contributors\n%2</source>\n        <translation>### 版本資訊\n%1&lt;br/&gt;\n### 貢獻者\n%2</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"151\"/>\n        <source>Allow opt-in for anonymous usage statistics</source>\n        <translation>加入匿名使用統計計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"192\"/>\n        <source>Opt-out for anonymous usage statistics</source>\n        <translation>退出匿名使用統計計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"193\"/>\n        <source>Allow opt-out for anonymous usage statistics</source>\n        <translation>終止並退出匿名使用統計計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"232\"/>\n        <source>Opt-in to anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"238\"/>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"265\"/>\n        <source>Opt-in for network</source>\n        <translation>資料湖泊計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"239\"/>\n        <source>Allow opt-in for network</source>\n        <translation>加入資料湖泊計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"307\"/>\n        <source>Opt-out for network</source>\n        <translation>退出資料湖泊計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"266\"/>\n        <source>Allow opt-in anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>開始將交談內容匿名分享到 GPT4All 資料湖泊</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/StartupDialog.qml\" line=\"308\"/>\n        <source>Allow opt-out anonymous sharing of chats to the GPT4All Datalake</source>\n        <translation>終止將交談內容匿名分享到 GPT4All 資料湖泊</translation>\n    </message>\n</context>\n<context>\n    <name>SwitchModelDialog</name>\n    <message>\n        <source>&lt;b&gt;Warning:&lt;/b&gt; changing the model will erase the current conversation. Do you wish to continue?</source>\n        <translation type=\"vanished\">&lt;b&gt;警告：&lt;/b&gt; 變更模型將會清除目前對話內容。您真的想要繼續嗎？</translation>\n    </message>\n    <message>\n        <source>Continue</source>\n        <translation type=\"vanished\">繼續</translation>\n    </message>\n    <message>\n        <source>Continue with model loading</source>\n        <translation type=\"vanished\">繼續載入模型</translation>\n    </message>\n    <message>\n        <source>Cancel</source>\n        <translation type=\"vanished\">取消</translation>\n    </message>\n</context>\n<context>\n    <name>ThumbsDownDialog</name>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"39\"/>\n        <source>Please edit the text below to provide a better response. (optional)</source>\n        <translation>請編輯以下文字，以提供更好的回覆。（非必填）</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"54\"/>\n        <source>Please provide a better response...</source>\n        <translation>請提供一則更好的回覆......</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"64\"/>\n        <source>Submit</source>\n        <translation>送出</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"65\"/>\n        <source>Submits the user&apos;s response</source>\n        <translation>送出使用者的回覆</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"69\"/>\n        <source>Cancel</source>\n        <translation>取消</translation>\n    </message>\n    <message>\n        <location filename=\"../qml/ThumbsDownDialog.qml\" line=\"70\"/>\n        <source>Closes the response dialog</source>\n        <translation>關閉回覆對話視窗</translation>\n    </message>\n</context>\n<context>\n    <name>main</name>\n    <message>\n        <location filename=\"../main.qml\" line=\"24\"/>\n        <source>GPT4All v%1</source>\n        <translation>GPT4All v%1</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"47\"/>\n        <source>Restore</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"51\"/>\n        <source>Quit</source>\n        <translation type=\"unfinished\"></translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"149\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Incompatible hardware detected.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, your CPU does not meet the minimal requirements to run this program. In particular, it does not support AVX intrinsics which this program requires to successfully run a modern large language model. The only solution at this time is to upgrade your hardware to a more modern CPU.&lt;br&gt;&lt;br&gt;See here for more information: &lt;a href=&quot;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&quot;&gt;https://en.wikipedia.org/wiki/Advanced_Vector_Extensions&lt;/a&gt;</source>\n        <translation>&lt;h3&gt;啟動時發生錯誤：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;「偵測到不相容的硬體。」&lt;/i&gt;&lt;br&gt;&lt;br&gt;糟糕！您的中央處理器不符合運行所需的最低需求。尤其，它不支援本程式運行現代大型語言模型所需的 AVX 指令集。目前唯一的解決方案，只有更新您的中央處理器及其相關硬體裝置。&lt;br&gt;&lt;br&gt;更多資訊請查閱：&lt;a href=&quot;https://zh.wikipedia.org/wiki/AVX指令集&quot;&gt;AVX 指令集 - 維基百科&lt;/a&gt;</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"165\"/>\n        <source>&lt;h3&gt;Encountered an error starting up:&lt;/h3&gt;&lt;br&gt;&lt;i&gt;&quot;Inability to access settings file.&quot;&lt;/i&gt;&lt;br&gt;&lt;br&gt;Unfortunately, something is preventing the program from accessing the settings file. This could be caused by incorrect permissions in the local app config directory where the settings file is located. Check out our &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;discord channel&lt;/a&gt; for help.</source>\n        <translation>&lt;h3&gt;啟動時發生錯誤：&lt;/h3&gt;&lt;br&gt;&lt;i&gt;「無法存取設定檔。」&lt;/i&gt;&lt;br&gt;&lt;br&gt;糟糕！有些東西正在阻止程式存取設定檔。這極為可能是由於設定檔所在的本機應用程式設定資料夾中的權限設定不正確所造成的。煩請洽詢我們的 &lt;a href=&quot;https://discord.gg/4M2QFmTt2k&quot;&gt;Discord 伺服器&lt;/a&gt; 以尋求協助。</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"193\"/>\n        <source>Connection to datalake failed.</source>\n        <translation>連線資料湖泊失敗。</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"204\"/>\n        <source>Saving chats.</source>\n        <translation>儲存交談。</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"215\"/>\n        <source>Network dialog</source>\n        <translation>資料湖泊計畫對話視窗</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"216\"/>\n        <source>opt-in to share feedback/conversations</source>\n        <translation>分享回饋/對話計畫</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"278\"/>\n        <source>Home view</source>\n        <translation>首頁視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"279\"/>\n        <source>Home view of application</source>\n        <translation>應用程式首頁視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"287\"/>\n        <source>Home</source>\n        <translation>首頁</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"313\"/>\n        <source>Chat view</source>\n        <translation>查看交談</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"314\"/>\n        <source>Chat view to interact with models</source>\n        <translation>模型互動交談視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"322\"/>\n        <source>Chats</source>\n        <translation>交談</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"347\"/>\n        <location filename=\"../main.qml\" line=\"356\"/>\n        <source>Models</source>\n        <translation>模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"348\"/>\n        <source>Models view for installed models</source>\n        <translation>已安裝模型的模型視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"381\"/>\n        <location filename=\"../main.qml\" line=\"390\"/>\n        <source>LocalDocs</source>\n        <translation>我的文件</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"382\"/>\n        <source>LocalDocs view to configure and use local docs</source>\n        <translation>用於設定與使用我的文件的「我的文件」視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"415\"/>\n        <location filename=\"../main.qml\" line=\"424\"/>\n        <source>Settings</source>\n        <translation>設定</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"416\"/>\n        <source>Settings view for application configuration</source>\n        <translation>應用程式設定視圖</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"469\"/>\n        <source>The datalake is enabled</source>\n        <translation>資料湖泊已啟用</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"471\"/>\n        <source>Using a network model</source>\n        <translation>使用一個網路模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"473\"/>\n        <source>Server mode is enabled</source>\n        <translation>伺服器模式已啟用</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"684\"/>\n        <source>Installed models</source>\n        <translation>已安裝的模型</translation>\n    </message>\n    <message>\n        <location filename=\"../main.qml\" line=\"685\"/>\n        <source>View of installed models</source>\n        <translation>已安裝的模型視圖</translation>\n    </message>\n</context>\n</TS>\n"
  },
  {
    "path": "gpt4all-training/GPT-J_MAP.md",
    "content": "# Inference on Training Data\n\n\n## Run Inference\n\n```bash\ntorchrun --master_port=29085 --nproc-per-node 8 inference.py --config=configs/inference/gptj.yaml\n```\n\n\n## Visualizations\n\n```bash\npython build_map.py\n```\n \nwill build a map in `Atlas`, one using the internal clustering algorithm provided by Nomic and one using the embeddings generated by the finetuned model."
  },
  {
    "path": "gpt4all-training/README.md",
    "content": "## Training GPT4All-J\n\n### Technical Reports\n\n<p align=\"center\">\n<a href=\"https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf\">:green_book: Technical Report 3: GPT4All Snoozy and Groovy </a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://static.nomic.ai/gpt4all/2023_GPT4All-J_Technical_Report_2.pdf\">:green_book: Technical Report 2: GPT4All-J </a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf\">:green_book: Technical Report 1: GPT4All</a>\n</p>\n\n### GPT4All-J Training Data\n\n- We are releasing the curated training data for anyone to replicate GPT4All-J here: [GPT4All-J Training Data](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations)\n   - [Atlas Map of Prompts](https://atlas.nomic.ai/map/gpt4all-j-prompts-curated)\n   - [Atlas Map of Responses](https://atlas.nomic.ai/map/gpt4all-j-response-curated)\n   \nWe have released updated versions of our `GPT4All-J` model and training data. \n\n- `v1.0`: The original model trained on the v1.0 dataset\n- `v1.1-breezy`: Trained on a filtered dataset where we removed all instances of AI language model\n- `v1.2-jazzy`: Trained on a filtered dataset where we also removed instances like I'm sorry, I can't answer... and AI language model\n\nThe [models](https://huggingface.co/nomic-ai/gpt4all-j) and [data](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations) versions can be specified by passing a `revision` argument.\n\nFor example, to load the `v1.2-jazzy` model and dataset, run:\n\n```python\nfrom datasets import load_dataset\nfrom transformers import AutoModelForCausalLM\n\ndataset = load_dataset(\"nomic-ai/gpt4all-j-prompt-generations\", revision=\"v1.2-jazzy\")\nmodel = AutoModelForCausalLM.from_pretrained(\"nomic-ai/gpt4all-j\", revision=\"v1.2-jazzy\")\n```\n\n### GPT4All-J Training Instructions\n\n```bash\naccelerate launch --dynamo_backend=inductor --num_processes=8 --num_machines=1 --machine_rank=0 --deepspeed_multinode_launcher standard --mixed_precision=bf16  --use_deepspeed --deepspeed_config_file=configs/deepspeed/ds_config_gptj.json train.py --config configs/train/finetune_gptj.yaml\n```\n"
  },
  {
    "path": "gpt4all-training/TRAINING_LOG.md",
    "content": "# Training Trials and Tribulations of gpt4all\n\nThis is a training log for both the LoRa and full model training we underwent\n\n## Inspiration\n\n\n\n## Initial Experiment\n\nWe train an initial LoRa model on ~700k examples including data from P3/BLOOM, StackOverflow, and unified_chip2.\nWe trained using 8 x A100 80GB GPUs.\n\nWe used the initial parameters:\n\n| Hyperparameter | Value |\n|----------------|-------|\n| Per Device BS  | 4     |\n| Global BS      | 32    |\n| Learning rate  | 5e-5  |\n| Epochs         | 1     |\n| Max length     | 1024  |\n| Weight decay   | 0     |\n| Warmup Steps   | 100   |\n\nWe randomly shuffle and set aside 5% of the data for validation.\n\nWe had an initial bug in logging the training loss but we noticed a decrease in validation loss.\n\n![](figs/first_lora.png)\n\n\nDuring generation, we noticed that our model seemed to never generate an `eos` token. We realized that this was due to the fact that we set\nthe tokenizer `pad` token equal to the `eos` token and most likely never learned to \"stop\". However, generations seemed to be working\nwell, even with greedy generation. \n\nFor example, when prompted with `Generate a python script to reverse a string.`, the model outputs valid markdown and a working script\n\n    Here's a Python script to reverse a string:\n\n    ```python\n    def reverse_string(string):\n        return ''.join(reversed(string))\n    ```\n\n    This function takes a string as an argument and returns the reversed string. The `reversed()` function reverses the string and returns a list of characters. The `join()` function joins the list of characters into a string.\n\n    You can use this function like this:\n\n    ```python\n    string = 'hello world'\n    print(reverse_string(string))\n    ```\n\n    This will output `'olleh dna hwl'`.\n\n    Note that this function does not handle strings with spaces or other special characters. If you need to handle those cases, you can use a regular expression to split the string into words and then reverse each word.\n\n    ```python\n    import re\n\n    def reverse_string(string):\n        words = re.split(' ', string)\n        return ''.join(reversed(word) for word in words)\n    ```\n\n    This function takes a string as an argument and returns the reversed string. The `re.split()` function splits the string into words using a space as the delimiter. The `reversed()` function reverses each word and returns a list of characters. The `join()` function joins the list of characters into a string.\n\n    You can use this function like this:\n\n    ```python\n    string = 'hello world'\n    print(reverse_string(string))\n    ```\n\n    This will output `'olleh dna hwl'`.\n\n    Note that this function also handles strings with spaces or other special characters.\n\n\n    Answer: Here's a Python script to reverse a string:\n\n    ```python\n    def reverse_string(string):\n        return ''.join(reversed(string))\n    ```\n\n    This function takes a string as an argument and returns the reversed string. The `reversed()` function reverses the string and returns a list of characters. The `join()` function joins the list of characters into a string.\n\n    You can use this function like this:\n\n    ```python\n    string = 'hello world'\n    print(reverse_string(string))\n    ```\n\n    This will output `'olleh dna hwl'`.\n\n    Note that this function does not handle strings with\n\nalthough it duplicates and continues generating past answering the full question.\n\nThe base model doesn't answer the question and hallucinates\n```\nWrite a python script to reverse a string. The string should be reversed in place.\nThe string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place.\nThe string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be reversed in place. The string should be\n```\n\n\n## EOS and Accidental Duplication\n\nSeeing as the model never stopped, we experimented with training a model with a separate token for `eos` and `pad`. Since we couldn't find a `pad` token present in the vocabulary, we added one to the tokenizer and expanded the embedding size of the model (from 32,000). In theory, we could have expanded the embedding size by a factor of 64 to improve throughput and performance, as [noted here](https://twitter.com/ctnzr/status/1623758178587648000?s=20).\nFor every sequence, we appended an `eos` token in hopes the model would learn to exit. \n\nWe successfully trained a model using the same parameters as before ![](figs/duplicate_loss.png)\n\nDuring generation, our model exited early even with greedy generations \n\n    You can use the `reversed()` function to reverse a string in Python. Here's an example:\n\n    ```python\n    string = 'hello world'\n    reversed_string = reversed(string)\n    print(reversed_string)\n    ```\n\n    Output:\n\n    ```\n    world hello\n    ```\n\nFor a harder prompt where we try to trick the model with comments\n\n```python\n#this code prints a string reversed\nmy_string = \"hello how are you\"\nprint(len(my_string))\n\n\nMy code above does not work. Can you help me?\n```\n\nThe model correctly answers\n\n    The code you provided does not print a string reversed. It prints the length of the string \"hello how are you\".\n\n\nWe realized that we had two bugs however:\n- We accidentally duplicated data and effectively trained for 2 epochs instead of 1\n- We added an eos token to every sequence, even those that we truncated (e.g. long code that exceeds the 1024).\n\n## Conditional EOS and 1 Epoch\n\nUsing the same parameters, we then trained a model using a \"conditional\" eos token where we only add an `eos` when the inputs are less than the maximum sequence length for one epoch.\n\nOur training and validation loss looks fairly normal\n\n![](figs/single_epoch.png)\n\nHowever, we found generations to be slightly poorer. For the same prompt of `Generate a python script to reverse a string.`, the model generates\n\n    Write a python script to reverse a string.\n    Here is a python script to reverse a string:\n    import string\n\n    def reverse_string(string):\n        return string[::-1]\n\n    print(reverse_string('hello world'))\n\n    Output:\n    world hello\n\nThe prompt \n```python\n#this code prints a string reversed\nmy_string = \"hello how are you\"\nprint(len(my_string))\n\n\nMy code above does not work. Can you help me?\n```\n\ndoes not generate any text.\n\n\nAnd the prompt `\"Generate a python script to make a get request to an api endpoint.\"` generates\n\n    I'm sorry, I cannot provide a specific answer to this question as it requires more context and details about the API endpoint and the specific task you are trying to accomplish. Can you please provide more information?\n\n\n## Multi Epoch and Full Model Training\n\nWe decided to remove the entire Bigscience/P3 subset from the final training dataset due to data diversity considerations. \nP3 contains many homogeneous prompts which produce short and homogeneous responses from GPT-3.5-Turbo. \nThe final dataset is ~400k examples.\n\nWe train a LoRa model using the parameters \n\n| Hyperparameter | Value |\n|----------------|-------|\n| Per Device BS  | 4     |\n| Global BS      | 32    |\n| Learning rate  | 5e-5  |\n| Epochs         | 4     |\n| Max length     | 1024  |\n| Weight decay   | 0     |\n| Warmup Steps   | 100   |\n\n\nWe additionally train a full model \n| Hyperparameter | Value |\n|----------------|-------|\n| Per Device BS  | 32    |\n| Global BS      | 256   |\n| Learning rate  | 5e-5  |\n| Epochs         | 2     |\n| Max length     | 1024  |\n| Weight decay   | 0     |\n| Warmup Steps   | 100   |\n\nTaking inspiration from [the Alpaca Repo](https://github.com/tatsu-lab/stanford_alpaca), we roughly scale the learning rate by `sqrt(k)`, where `k` is the increase in batch size, where Alpaca used a batch size of 128 and learning rate of 2e-5.\n\nComparing our model LoRa to the [Alpaca LoRa](https://huggingface.co/tloen/alpaca-lora-7b), our model has lower perplexity. Qualitatively, training on 3 epochs performed the best on perplexity as well as qualitative examples. \n\nWe tried training a full model using the parameters above, but found that during the second epoch the model diverged and samples generated post training were worse than the first epoch. \n\n\n## GPT-J Training\n\n### Model Training Divergence\n\nWe trained multiple [GPT-J models](https://huggingface.co/EleutherAI/gpt-j-6b) with varying success. We found that training the full model lead to diverged post epoch 1. ![](figs/overfit-gpt-j.png)\n\n\nWe release the checkpoint after epoch 1.\n\n\nUsing Atlas, we extracted the embeddings of each point in the dataset and calculated the loss per sequence. We then uploaded [this to Atlas](https://atlas.nomic.ai/map/gpt4all-j-post-epoch-1-embeddings) and noticed that the higher loss items seem to cluster. On further inspection, the highest density clusters seemed to be of prompt/response pairs that asked for creative-like generations such as `Generate a story about ...` ![](figs/clustering_overfit.png)\n\n\n\n### GPT4All-J Hyperparameters\n\nWe varied learning rate, learning rate schedule, and weight decay following suggestions from the [original GPT-J codebase](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/howto_finetune.md) but found no real performance difference (qualitatively or quantitatively) when varying these parameters.\n\n\n\nThe final model was trained using the following hyperparameters with a linear warmup followed by constant learning rate:\n\n| Hyperparameter | Value |\n|----------------|-------|\n| Per Device BS  | 32    |\n| Global BS      | 256   |\n| Learning rate  | 2e-5  |\n| Epochs         | 2     |\n| Max length     | 1024  |\n| Weight decay   | 0     |\n| Warmup Steps   | 500   |\n\n\nThe LoRA model was trained using using the following hyperparameters with a linear warmup followed by constant learning rate: \n\n| Hyperparameter | Value |\n|----------------|-------|\n| Per Device BS  | 4     |\n| Global BS      | 32    |\n| Learning rate  | 2e-5  |\n| Epochs         | 2     |\n| Max length     | 1024  |\n| Weight decay   | 0     |\n| Warmup Steps   | 500   |\n"
  },
  {
    "path": "gpt4all-training/clean.py",
    "content": "#!/usr/bin/env python3\nimport numpy as np\nimport glob\nimport os\nimport json\nimport jsonlines\nimport pandas as pd\n\n\nprompt_generation_dir = \"raw_data_sanity_cleaned_without_p3/\"\nfor file in glob.glob(os.path.join(prompt_generation_dir, \"*.jsonl\")):\n    if \"clean.jsonl\" in file:\n        continue\n    data = []\n    print(file)\n    with open(file) as f:\n        for line in f:\n            try:\n                contents = json.loads(line)\n                data.append(contents)\n            except BaseException:\n                pass\n\n    processed = []\n\n    for item in data:\n        if 'source' not in item:\n            item['source'] = 'unspecified'\n        if 'model_settings' in item:\n            item.pop('model_settings', None)\n        \n        for key in list(item.keys()):\n            if key not in ['source', 'prompt', 'response']:\n                #print(item[key])\n                item.pop(key, None)\n        \n        if isinstance(item['prompt'], dict):\n            if \"value\" in item[\"prompt\"]:\n                item[\"prompt\"] = item[\"prompt\"][\"value\"]\n            elif \"description\" in item[\"prompt\"]:\n                item[\"prompt\"] = item[\"prompt\"][\"description\"]\n            else:\n                continue\n                \n        elif not isinstance(item['prompt'], str):\n            continue\n        \n        if isinstance(item['response'], dict):\n            if \"value\" in item[\"response\"]:\n                item[\"response\"] = item[\"response\"][\"value\"]\n            elif \"description\" in item[\"response\"]:\n                item[\"response\"] = item[\"response\"][\"description\"]\n            else:\n                continue \n        elif not isinstance(item['response'], str):\n            continue\n\n        if item:\n            processed.append(item)\n\n    df = pd.DataFrame(processed)\n    prev_len = len(df)\n\n    # drop empty or null string\n    df = df.dropna(subset=['prompt', 'response'])\n    df = df[df['prompt'] != '']\n    df = df[df['response'] != '']\n    df = df[df[\"prompt\"].str.len() > 1]\n    curr_len = len(df)\n\n    print(f\"Removed {prev_len - curr_len} rows\")\n\n    clean_name = file.split(\".jsonl\")[0] + \"_clean.jsonl\"\n    print(f\"writing to {curr_len} rows to {clean_name}\")\n    df.to_json(clean_name, orient=\"records\", lines=True)\n"
  },
  {
    "path": "gpt4all-training/configs/deepspeed/ds_config.json",
    "content": "{\n\t\"train_batch_size\": \"auto\",\n\t\"gradient_accumulation_steps\": \"auto\",\n\t\"train_micro_batch_size_per_gpu\": \"auto\",\n\t\"fp16\": {\n\t  \"enabled\": \"auto\",\n\t  \"min_loss_scale\": 1,\n\t  \"loss_scale_window\": 1000,\n\t  \"hysteresis\": 2,\n\t  \"initial_scale_power\": 32\n\t},\n\t\"bf16\": {\n\t\t\"enabled\": \"auto\"\n\t},\n\t\"gradient_clipping\": 1,\n\t\"zero_optimization\": {\n\t  \"stage\": 2,\n\t  \"offload_param\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"offload_optimizer\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"allgather_partitions\": true,\n\t  \"allgather_bucket_size\": 5e8,\n\t  \"contiguous_gradients\": true\n\t},\n\t\"optimizer\": {\n\t  \"type\": \"AdamW\",\n\t  \"params\": {\n\t\t\"lr\": \"auto\",\n\t\t\"betas\": [\n\t\t  0.9,\n\t\t  0.999\n\t\t],\n\t\t\"eps\": 1e-08\n\t  }\n\t},\n\t\"scheduler\": {\n\t  \"type\": \"WarmupLR\",\n\t  \"params\": {\n\t\t\"warmup_min_lr\": 0,\n\t\t\"warmup_max_lr\": \"auto\",\n\t\t\"warmup_num_steps\": \"auto\",\n\t\t\"warmup_type\": \"linear\"\n\t  }\n\t}\n  }"
  },
  {
    "path": "gpt4all-training/configs/deepspeed/ds_config_gptj.json",
    "content": "{\n\t\"train_batch_size\": \"auto\",\n\t\"gradient_accumulation_steps\": \"auto\",\n\t\"train_micro_batch_size_per_gpu\": \"auto\",\n\t\"fp16\": {\n\t  \"enabled\": \"auto\",\n\t  \"min_loss_scale\": 1,\n\t  \"loss_scale_window\": 1000,\n\t  \"hysteresis\": 2,\n\t  \"initial_scale_power\": 32\n\t},\n\t\"bf16\": {\n\t\t\"enabled\": \"auto\"\n\t},\n\t\"gradient_clipping\": 1.0,\n\t\"zero_optimization\": {\n\t  \"stage\": 2,\n\t  \"offload_param\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"offload_optimizer\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"allgather_partitions\": true,\n\t  \"allgather_bucket_size\": 5e8,\n\t  \"contiguous_gradients\": true\n\t},\n\t\"optimizer\": {\n\t\t\"type\": \"AdamW\",\n\t\t\"params\": {\n\t\t  \"lr\": \"auto\",\n\t\t  \"betas\": [\n\t\t\t0.9,\n\t\t\t0.999\n\t\t  ],\n\t\t  \"eps\": 1e-08\n\t\t}\n\t  },\n\t  \"scheduler\": {\n\t\t\"type\": \"WarmupLR\",\n\t\t\"params\": {\n\t\t  \"warmup_min_lr\": 0,\n\t\t  \"warmup_max_lr\": \"auto\",\n\t\t  \"warmup_num_steps\": \"auto\",\n\t\t  \"warmup_type\": \"linear\"\n\t\t}\n\t  }\n}"
  },
  {
    "path": "gpt4all-training/configs/deepspeed/ds_config_gptj_lora.json",
    "content": "{\n\t\"train_batch_size\": \"auto\",\n\t\"gradient_accumulation_steps\": \"auto\",\n\t\"train_micro_batch_size_per_gpu\": \"auto\",\n\t\"fp16\": {\n\t  \"enabled\": \"auto\",\n\t  \"min_loss_scale\": 1,\n\t  \"loss_scale_window\": 1000,\n\t  \"hysteresis\": 2,\n\t  \"initial_scale_power\": 32\n\t},\n\t\"bf16\": {\n\t\t\"enabled\": \"auto\"\n\t},\n\t\"gradient_clipping\": 1,\n\t\"zero_optimization\": {\n\t  \"stage\": 2,\n\t  \"offload_param\": {\n\t\t\"device\": \"cpu\"\n\t  },\n\t  \"offload_optimizer\": {\n\t\t\"device\": \"cpu\"\n\t  },\n\t  \"allgather_partitions\": true,\n\t  \"allgather_bucket_size\": 5e8,\n\t  \"contiguous_gradients\": true\n\t},\n\t\"optimizer\": {\n\t  \"type\": \"AdamW\",\n\t  \"params\": {\n\t\t\"lr\": \"auto\",\n\t\t\"betas\": [\n\t\t  0.9,\n\t\t  0.999\n\t\t],\n\t\t\"eps\": 1e-08\n\t  }\n\t},\n\t\"scheduler\": {\n\t  \"type\": \"WarmupLR\",\n\t  \"params\": {\n\t\t\"warmup_min_lr\": 0,\n\t\t\"warmup_max_lr\": \"auto\",\n\t\t\"warmup_num_steps\": \"auto\",\n\t\t\"warmup_type\": \"linear\"\n\t  }\n\t}\n  }"
  },
  {
    "path": "gpt4all-training/configs/deepspeed/ds_config_mpt.json",
    "content": "{\n\t\"train_batch_size\": \"auto\",\n\t\"gradient_accumulation_steps\": \"auto\",\n\t\"train_micro_batch_size_per_gpu\": \"auto\",\n\t\"fp16\": {\n\t  \"enabled\": \"auto\",\n\t  \"min_loss_scale\": 1,\n\t  \"loss_scale_window\": 1000,\n\t  \"hysteresis\": 2,\n\t  \"initial_scale_power\": 32\n\t},\n\t\"bf16\": {\n\t\t\"enabled\": \"auto\"\n\t},\n\t\"gradient_clipping\": 1.0,\n\t\"zero_optimization\": {\n\t\t\"stage\": 1,\n\t\t\"offload_param\": {\n\t\t  \"device\": \"none\"\n\t\t},\n\t\t\"offload_optimizer\": {\n\t\t  \"device\": \"none\"\n\t\t},\n\t\t\"allgather_partitions\": true,\n\t\t\"allgather_bucket_size\": 5e8,\n\t\t\"contiguous_gradients\": true\n\t  },\n\t\"optimizer\": {\n\t\t\"type\": \"AdamW\",\n\t\t\"params\": {\n\t\t  \"lr\": \"auto\",\n\t\t  \"betas\": [\n\t\t\t0.9,\n\t\t\t0.999\n\t\t  ],\n\t\t  \"eps\": 1e-08\n\t\t}\n\t  },\n\t  \"scheduler\": {\n\t\t\"type\": \"WarmupDecayLR\",\n\t\t\"params\": {\n\t\t  \"warmup_min_lr\": 0,\n\t\t  \"warmup_max_lr\": \"auto\",\n\t\t  \"warmup_num_steps\": \"auto\",\n\t\t  \"warmup_type\": \"linear\",\n\t\t  \"total_num_steps\": \"auto\"\n\t\t}\n\t  }\n}"
  },
  {
    "path": "gpt4all-training/configs/deepspeed/ds_config_pythia.json",
    "content": "{\n\t\"train_batch_size\": \"auto\",\n\t\"gradient_accumulation_steps\": \"auto\",\n\t\"train_micro_batch_size_per_gpu\": \"auto\",\n\t\"fp16\": {\n\t  \"enabled\": \"auto\",\n\t  \"min_loss_scale\": 1,\n\t  \"loss_scale_window\": 1000,\n\t  \"hysteresis\": 2,\n\t  \"initial_scale_power\": 32\n\t},\n\t\"bf16\": {\n\t\t\"enabled\": \"auto\"\n\t},\n\t\"gradient_clipping\": 1.0,\n\t\"zero_optimization\": {\n\t  \"stage\": 2,\n\t  \"offload_param\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"offload_optimizer\": {\n\t\t\"device\": \"none\"\n\t  },\n\t  \"allgather_partitions\": true,\n\t  \"allgather_bucket_size\": 5e8,\n\t  \"contiguous_gradients\": true\n\t},\n\t\"optimizer\": {\n\t\t\"type\": \"AdamW\",\n\t\t\"params\": {\n\t\t  \"lr\": \"auto\",\n\t\t  \"betas\": [\n\t\t\t0.9,\n\t\t\t0.999\n\t\t  ],\n\t\t  \"eps\": 1e-08\n\t\t}\n\t  },\n\t  \"scheduler\": {\n\t\t\"type\": \"WarmupLR\",\n\t\t\"params\": {\n\t\t  \"warmup_min_lr\": 0,\n\t\t  \"warmup_max_lr\": \"auto\",\n\t\t  \"warmup_num_steps\": \"auto\",\n\t\t  \"warmup_type\": \"linear\"\n\t\t}\n\t  }\n}"
  },
  {
    "path": "gpt4all-training/configs/eval/generate_baseline.yaml",
    "content": "# model/tokenizer\nmodel_name: \"zpn/llama-7b\"\ntokenizer_name: \"zpn/llama-7b\"\nlora: true\nlora_path: \"tloen/alpaca-lora-7b\""
  },
  {
    "path": "gpt4all-training/configs/eval/generate_gpt4all_gptj.yaml",
    "content": "# model/tokenizer\nmodel_name: \"nomic-ai/gpt4all-warmup-lr-epoch_0\"\ntokenizer_name: \"EleutherAI/gpt-j-6b\"\nlora: false\n"
  },
  {
    "path": "gpt4all-training/configs/eval/generate_gpt4all_gptj_lora.yaml",
    "content": "# model/tokenizer\nmodel_name: \"EleutherAI/gpt-j-6b\"\ntokenizer_name: \"EleutherAI/gpt-j-6B\"\nlora: true\nlora_path: \"nomic-ai/gpt4all-gptj-lora-epoch_1\"\n"
  },
  {
    "path": "gpt4all-training/configs/eval/generate_gpt4all_llama_lora.yaml",
    "content": "# model/tokenizer\nmodel_name: \"zpn/llama-7b\"\ntokenizer_name: \"zpn/llama-7b\"\nlora: true\nlora_path: \"nomic-ai/gpt4all-lora\"\n"
  },
  {
    "path": "gpt4all-training/configs/generate/generate.yaml",
    "content": "# model/tokenizer\nmodel_name: \"zpn/llama-7b\"\ntokenizer_name: \"zpn/llama-7b\"\nlora: true\nlora_path: \"nomic-ai/gpt4all-lora\"\n\nmax_new_tokens: 512\ntemperature: 0\nprompt: null\n"
  },
  {
    "path": "gpt4all-training/configs/generate/generate_gptj.yaml",
    "content": "# model/tokenizer\nmodel_name: \"nomic-ai/gpt4all-warmup-lr-epoch_1\"\ntokenizer_name: \"EleutherAI/gpt-j-6b\"\nlora: false\n\n\nmax_new_tokens: 512\ntemperature: 0.001\nprompt: | \n  #this code prints a string reversed\n  my_string = \"hello how are you\"\n  print(len(my_string))\n\n\n  My code above does not work. Can you help me?\n"
  },
  {
    "path": "gpt4all-training/configs/generate/generate_gptj_lora.yaml",
    "content": "# model/tokenizer\nmodel_name: \"EleutherAI/gpt-j-6b\"\ntokenizer_name: \"EleutherAI/gpt-j-6b\"\nlora: true\nlora_path: \"nomic-ai/gpt4all-gptj-lora-epoch_0\"\n\nmax_new_tokens: 512\ntemperature: 0\nprompt: | \n  #this code prints a string reversed\n  my_string = \"hello how are you\"\n  print(len(my_string))\n\n\n  My code above does not work. Can you help me?"
  },
  {
    "path": "gpt4all-training/configs/generate/generate_llama.yaml",
    "content": "# model/tokenizer\nmodel_name: # REPLACE WITH LLAMA MODEL NAME\ntokenizer_name: # REPLACE WITH LLAMA MODEL NAME\n\n\nmax_new_tokens: 512\ntemperature: 0.001\nprompt: | \n  #this code prints a string reversed\n  my_string = \"hello how are you\"\n  print(len(my_string))\n\n\n  My code above does not work. Can you help me?\n"
  },
  {
    "path": "gpt4all-training/configs/inference/gptj.yaml",
    "content": "# model/tokenizer\nmodel_name: \"nomic-ai/gpt4all-warmup-lr-epoch_1\"\ntokenizer_name: \"EleutherAI/gpt-j-6B\"\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: \"nomic-ai/turbo-500k-multi\" \nmax_length: 1024\nbatch_size: 32 \n\n# logging\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune.yaml",
    "content": "# model/tokenizer\nmodel_name: # add model here\ntokenizer_name: # add model here\ngradient_checkpointing: true\nsave_name: # CHANGE \n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: # update\nmax_length: 1024\nbatch_size: 32\n\n# train dynamics\nlr: 5.0e-5\neval_every: 800\neval_steps: 100\nsave_every: 800\noutput_dir: # CHANGE\ncheckpoint: null\nlora: false\nwarmup_steps: 100\nnum_epochs: 2\n\n# logging\nwandb: true\nwandb_entity: # update\nwandb_project_name: # update\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_falcon.yaml",
    "content": "# model/tokenizer\nmodel_name: \"tiiuae/falcon-7b\"\ntokenizer_name: \"tiiuae/falcon-7b\"\ngradient_checkpointing: true\nsave_name: \"nomic-ai/gpt4all-falcon\"\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: \"nomic-ai/gpt4all-j-prompt-generations\"\nrevision: \"v1.3-groovy\"\nmax_length: 1024\nbatch_size: 32\n\n# train dynamics\nlr: 2.0e-5\nmin_lr: 0 \nweight_decay: 0.0\neval_every: 500\neval_steps: 105\nsave_every: 1000\nlog_grads_every: 500\noutput_dir: \"ckpts/falcon\"\ncheckpoint: \"/home/paperspace/gpt4all/ckpts/mpt/step_1000\"\nlora: false\nwarmup_steps: 500\nnum_epochs: 2 \n\n# logging\nwandb: true\nwandb_entity: \"gpt4all\"\nwandb_project_name: \"gpt4all\"\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_gptj.yaml",
    "content": "# model/tokenizer\nmodel_name: \"EleutherAI/gpt-j-6B\"\ntokenizer_name: \"EleutherAI/gpt-j-6B\"\ngradient_checkpointing: true\nsave_name: # CHANGE\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: # CHANGE\nmax_length: 1024\nbatch_size: 32\n\n# train dynamics\nlr: 2.0e-5\nmin_lr: 0 \nweight_decay: 0.0\neval_every: 500\neval_steps: 105\nsave_every: 500\nlog_grads_every: 100\noutput_dir: # CHANGE\ncheckpoint: null\nlora: false\nwarmup_steps: 500\nnum_epochs: 2 \n\n# logging\nwandb: true\nwandb_entity: # CHANGE\nwandb_project_name: # CHANGE\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_gptj_lora.yaml",
    "content": "# model/tokenizer\nmodel_name: \"EleutherAI/gpt-j-6b\"\ntokenizer_name: \"EleutherAI/gpt-j-6b\"\ngradient_checkpointing: false\nsave_name: # CHANGE\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: # CHANGE\nmax_length: 1024\nbatch_size: 1 \n\n# train dynamics\nlr: 2.0e-5\nmin_lr: 0 \nweight_decay: 0.0\neval_every: 500\neval_steps: 105\nsave_every: 500\nlog_grads_every: 500\noutput_dir: # CHANGE\ncheckpoint: null\nlora: true\nwarmup_steps: 500\nnum_epochs: 2 \n\n# logging\nwandb: true\nwandb_entity: # CHANGE\nwandb_project_name: # CHANGE\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_lora.yaml",
    "content": "# model/tokenizer\nmodel_name: # update\ntokenizer_name: # update\ngradient_checkpointing: false\nsave_name: # CHANGE\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: # CHANGE\nmax_length: 1024\nbatch_size: 4\n\n# train dynamics\nlr: 5.0e-5\nmin_lr: 0\nweight_decay: 0.0\neval_every: 2000\neval_steps: 100\nsave_every: 2000\noutput_dir: # CHANGE\ncheckpoint: null\nlora: true\nwarmup_steps: 100\nnum_epochs: 2\n\n# logging\nwandb: true\nwandb_entity: # update\nwandb_project_name: # update\nseed: 42\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_mpt.yaml",
    "content": "# model/tokenizer\nmodel_name: \"mosaicml/mpt-7b\"\ntokenizer_name: \"mosaicml/mpt-7b\"\ngradient_checkpointing: false\nsave_name: \"nomic-ai/mpt-finetuned-round2\"\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: \"nomic-ai/gpt4all-j-prompt-generations\"\nrevision: \"v1.3-groovy\"\nmax_length: 1024\nbatch_size: 8\n\n# train dynamics\nlr: 2.0e-5\nmin_lr: 0 \nweight_decay: 0.0\neval_every: 500\neval_steps: 105\nsave_every: 1000\nlog_grads_every: 500\noutput_dir: \"ckpts/mpt\"\ncheckpoint: null\nlora: false\nwarmup_steps: 500\nnum_epochs: 2 \n\n# logging\nwandb: false\nwandb_entity: \"gpt4all\"\nwandb_project_name: \"gpt4all\"\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/configs/train/finetune_openllama.yaml",
    "content": "# model/tokenizer\nmodel_name: \"openlm-research/open_llama_7b\"\ntokenizer_name: \"openlm-research/open_llama_7b\"\ngradient_checkpointing: true\nsave_name: \"nomic-ai/gpt4all-openllama\"\n\n# dataset\nstreaming: false\nnum_proc: 64\ndataset_path: \"nomic-ai/gpt4all-updated\"\nrevision: null\nmax_length: 1024\nbatch_size: 32\n\n# train dynamics\nlr: 2.0e-5\nmin_lr: 0 \nweight_decay: 0.0\neval_every: 500\nlog_every: 10\nsave_every: 1000\nlog_grads_every: 500\noutput_dir: \"ckpts/falcon\"\ncheckpoint: null\nlora: false\nwarmup_steps: 500\nnum_epochs: 3 \n\n# logging\nwandb: true\nwandb_entity: \"gpt4all\"\nwandb_project_name: \"gpt4all\"\nseed: 42\n\n"
  },
  {
    "path": "gpt4all-training/create_hostname.sh",
    "content": "#!/bin/bash\n\nexport WORKER_IP=$1\nN_GPUS=8\n# create dir if doesn't exist\nsudo mkdir -p /job\nprintf \"localhost slots=$N_GPUS\\n$WORKER_IP slots=$N_GPUS\" | sudo tee /job/hostfile\necho /job/hostfile"
  },
  {
    "path": "gpt4all-training/data.py",
    "content": "import glob\nimport torch\nfrom datasets import load_dataset, concatenate_datasets\nimport os\nfrom torch.utils.data import DataLoader\nfrom transformers import DefaultDataCollator\n\n\n\ndef tokenize_inputs(config, tokenizer, examples):\n    max_length = config[\"max_length\"]\n\n    # hacky backward compatible\n    different_eos = tokenizer.eos_token != \"</s>\"\n    out = {\"labels\": [], \"input_ids\": [], \"attention_mask\": []}\n    for prompt, response in zip(examples[\"prompt\"], examples[\"response\"]):\n        if different_eos:\n            if response.count(\"</s> \\n\") > 0:\n                response = response.replace(\"</s> \\n\", f\"{tokenizer.eos_token} \\n\") \n\n        prompt_len = len(tokenizer(prompt + \"\\n\", return_tensors=\"pt\")[\"input_ids\"][0])\n\n        # hack if our prompt is super long\n        # we need to include some labels so we arbitrarily trunacate at max_length // 2\n        # if the length is too long\n        if prompt_len >= max_length // 2:\n            # if prompt is too long, truncate\n            # but make sure to truncate to at max 1024 tokens\n            new_len = min(max_length // 2, len(prompt) // 2)\n            prompt = prompt[:new_len]\n            # get new prompt length\n            prompt_len = tokenizer(prompt + \"\\n\", return_tensors=\"pt\", max_length=max_length // 2, truncation=True).input_ids.ne(tokenizer.pad_token_id).sum().item()\n\n        assert prompt_len <= max_length // 2, f\"prompt length {prompt_len} exceeds max length {max_length}\"\n\n        input_tokens = tokenizer(prompt + \"\\n\" + response + tokenizer.eos_token,\n                                 truncation=True, max_length=max_length, return_tensors=\"pt\")[\"input_ids\"].squeeze()\n\n        labels = input_tokens.clone()\n        labels[:prompt_len] = -100\n        if len(labels) < max_length:\n            # pad to max_length with -100\n            labels = torch.cat([labels, torch.full((max_length - len(labels),), -100)])\n\n        assert (labels == -100).sum() < len(labels), f\"Labels are all -100, something wrong. prompt length {prompt_len} exceeds max length {max_length}\" \n        \n        if (labels == -100).sum() == len(labels) - 1:\n            print(prompt)\n            print(response)\n            raise\n\n        padded = tokenizer.pad({\"input_ids\": input_tokens}, padding=\"max_length\", max_length=max_length, return_tensors=\"pt\")\n        out[\"labels\"].append(labels)\n        out[\"input_ids\"].append(padded[\"input_ids\"])\n        out[\"attention_mask\"].append(padded[\"attention_mask\"])\n\n    out = {k: torch.stack(v) if isinstance(v, list) else v for k, v in out.items()}\n\n    return out\n\n\ndef load_data(config, tokenizer):\n    dataset_path = config[\"dataset_path\"]\n\n    if os.path.exists(dataset_path):\n        if os.path.isdir(dataset_path):\n            files = glob.glob(os.path.join(dataset_path, \"*_clean.jsonl\"))\n        else:\n            files = [dataset_path]\n\n        print(f\"Reading files {files}\")\n\n        dataset = load_dataset(\"json\", data_files=files, split=\"train\")\n\n    else:\n        dataset = load_dataset(dataset_path, split=\"train\", revision=config[\"revision\"] if \"revision\" in config else None)\n\n    dataset = dataset.train_test_split(test_size=.05, seed=config[\"seed\"])\n\n    train_dataset, val_dataset = dataset[\"train\"], dataset[\"test\"]\n\n    if config[\"streaming\"] is False:\n        kwargs = {\"num_proc\": config[\"num_proc\"]}\n    else:\n        kwargs = {}\n\n    cols_to_keep = [\"input_ids\", \"labels\", \"attention_mask\"]\n    # tokenize inputs and return labels and attention mask\n    train_dataset = train_dataset.map(\n        lambda ele: tokenize_inputs(config, tokenizer, ele),\n        batched=True,\n        **kwargs\n    )\n    remove_cols = [col for col in train_dataset.column_names if col not in cols_to_keep]\n    train_dataset = train_dataset.remove_columns(remove_cols)\n\n    val_dataset = val_dataset.map(\n        lambda ele: tokenize_inputs(config, tokenizer, ele),\n        batched=True,\n        **kwargs\n    )\n    remove_cols = [col for col in val_dataset.column_names if col not in cols_to_keep]\n    val_dataset = val_dataset.remove_columns(remove_cols)\n\n    train_dataset = train_dataset.with_format(\"torch\")\n    val_dataset = val_dataset.with_format(\"torch\")\n\n    # create dataloader with default data collator since we already have labels\n\n    train_dataloader = DataLoader(\n        train_dataset,\n        collate_fn=DefaultDataCollator(),\n        batch_size=config[\"batch_size\"],\n        shuffle=True,\n    )\n\n    val_dataloader = DataLoader(\n        val_dataset,\n        collate_fn=DefaultDataCollator(),\n        batch_size=config[\"batch_size\"],\n        shuffle=True,\n    )\n\n    return train_dataloader, val_dataloader\n\n    \ndef load_data_for_inference(config, tokenizer):\n    dataset_path = config[\"dataset_path\"]\n\n    if os.path.exists(dataset_path):\n        # check if path is a directory\n        if os.path.isdir(dataset_path):\n            files = glob.glob(os.path.join(dataset_path, \"*_clean.jsonl\"))\n        else:\n            files = [dataset_path]\n\n        print(f\"Reading files {files}\")\n\n        dataset = load_dataset(\"json\", data_files=files, split=\"train\")\n\n    else:\n        dataset = load_dataset(dataset_path, split=\"train\")\n\n    dataset = dataset.train_test_split(test_size=.05, seed=config[\"seed\"])\n\n    train_dataset, val_dataset = dataset[\"train\"], dataset[\"test\"]\n\n    train_dataset = train_dataset.add_column(\"index\", list(range(len(train_dataset))))\n    # select first N batches that are divisible by batch_size\n    # gather is a bit annoying (or the way I'm using it) to get uneven batches as it duplicates data\n    train_dataset = train_dataset.select(range((len(train_dataset) // config[\"batch_size\"]) * config[\"batch_size\"]))\n    val_dataset = val_dataset.add_column(\"index\", list(range(len(val_dataset))))\n    val_dataset = val_dataset.select(range((len(val_dataset) // config[\"batch_size\"]) * config[\"batch_size\"]))\n\n    if config[\"streaming\"] is False:\n        kwargs = {\"num_proc\": config[\"num_proc\"]}\n    else:\n        kwargs = {}\n\n    # tokenize inputs and return labels and attention mask\n    train_dataset = train_dataset.map(\n        lambda ele: tokenize_inputs(config, tokenizer, ele),\n        batched=True,\n        **kwargs\n    )\n    val_dataset = val_dataset.map(\n        lambda ele: tokenize_inputs(config, tokenizer, ele), \n        batched=True,\n        **kwargs\n    )\n    train_dataset = train_dataset.with_format(\"torch\")\n    val_dataset = val_dataset.with_format(\"torch\")\n\n    return train_dataset, val_dataset\n"
  },
  {
    "path": "gpt4all-training/env.yaml",
    "content": "name: vicuna\nchannels:\n  - conda-forge\n  - pytorch\n  - nvidia\n  - huggingface\ndependencies:\n  - python=3.8\n  - accelerate\n  - datasets\n  - torchmetrics\n  - evaluate\n  - transformers\n  - wandb\n  - jsonlines\n  - pip:\n    - peft\n    - nodelist-inflator\n    - deepspeed\n    - sentencepiece"
  },
  {
    "path": "gpt4all-training/eval_figures.py",
    "content": "#!/usr/bin/env python3\nimport glob\nimport pickle\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nplt.figure()\nfor fpath in glob.glob('./eval_data/*.pkl'):\n    parts = fpath.split('__')\n    model_name = \"-\".join(fpath.replace(\".pkl\", \"\").split(\"_\")[2:])\n    with open(fpath, 'rb') as f:\n        data = pickle.load(f)\n        perplexities = data['perplexities']\n        perplexities = np.nan_to_num(perplexities, 100)\n        perplexities = np.clip(perplexities, 0, 100)\n        if 'alpaca' not in fpath:\n            identifier = model_name = \"-\".join(fpath.replace(\".pkl\", \"\").split(\"eval__model-\")[1:]) \n            label = 'GPT4all-'\n            label += identifier\n            \n        else:\n            label = 'alpaca-lora'\n        plt.hist(perplexities, label=label, alpha=.5, bins=50)\n\nplt.xlabel('Perplexity')\nplt.ylabel('Frequency')\nplt.legend()\nplt.savefig('figs/perplexity_hist.png')\n\n"
  },
  {
    "path": "gpt4all-training/eval_self_instruct.py",
    "content": "#!/usr/bin/env python3\nimport json\nimport torch\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\nfrom read import read_config\nfrom argparse import ArgumentParser\nfrom peft import PeftModelForCausalLM\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\n'''\nEvaluates perplexity on the outputs of:\nhttps://github.com/yizhongw/self-instruct/blob/main/human_eval/user_oriented_instructions.jsonl\n'''\n\ndef read_jsonl_file(file_path):\n    data = []\n    with open(file_path, 'r', encoding='utf-8') as file:\n        for line in file:\n            json_object = json.loads(line.strip())\n            data.append(json_object)\n    return data\n\ndef setup_model(config):\n    model = AutoModelForCausalLM.from_pretrained(config[\"model_name\"], device_map=\"auto\", torch_dtype=torch.float16, output_hidden_states=True)\n    tokenizer = AutoTokenizer.from_pretrained(config[\"tokenizer_name\"])\n    added_tokens = tokenizer.add_special_tokens({\"bos_token\": \"<s>\", \"eos_token\": \"</s>\", \"pad_token\": \"<pad>\"})\n\n    if added_tokens > 0:\n        model.resize_token_embeddings(len(tokenizer))\n\n    if 'lora' in config and config['lora']:\n        model = PeftModelForCausalLM.from_pretrained(model, config[\"lora_path\"], device_map=\"auto\", torch_dtype=torch.float16, return_hidden_states=True)\n        model.to(dtype=torch.float16)\n\n    print(f\"Mem needed: {model.get_memory_footprint() / 1024 / 1024 / 1024:.2f} GB\")\n        \n    return model, tokenizer\n\n\n\n\ndef eval_example(model, tokenizer, example, config):\n\n    prompt = example['instruction'] + ' ' + example['instances'][0]['input']\n    gt = prompt + ' ' + example['instances'][0]['output']\n\n    #decode several continuations and compute their page trajectories\n    input = tokenizer(prompt, return_tensors=\"pt\")\n    input = {k: v.to(model.device) for k, v in input.items()}\n\n    #compute the ground truth perplexity\n    gt_input = tokenizer(gt, return_tensors=\"pt\")\n    gt_input = {k: v.to(model.device) for k, v in gt_input.items()}\n\n    nlls = []\n    prev_end_loc = 0\n    stride = 512\n    seq_len = gt_input['input_ids'].size(1)\n\n    for begin_loc in tqdm(range(input['input_ids'].size(1), gt_input['input_ids'].size(1), stride)):\n        end_loc = min(begin_loc + stride, seq_len)\n        trg_len = end_loc - prev_end_loc  # may be different from stride on last loop\n        input_ids = gt_input['input_ids'][:, begin_loc:end_loc].to(model.device)\n        target_ids = input_ids.clone()\n        target_ids[:, :-trg_len] = -100\n\n        with torch.no_grad():\n            outputs = model(input_ids, labels=target_ids)\n            neg_log_likelihood = outputs.loss * trg_len\n\n        nlls.append(neg_log_likelihood)\n        prev_end_loc = end_loc\n        if end_loc == seq_len:\n            break\n\n    ppl = torch.exp(torch.stack(nlls).sum() / end_loc).item()\n    print('ppl: ', ppl)\n\n    print(prompt)\n    print(80*'-')\n   \n\n    return ppl\n\ndef do_eval(config):\n    eval_data = read_jsonl_file('eval_data/user_oriented_instructions.jsonl')\n    model, tokenizer = setup_model(config)\n    all_perplexities = []\n    for example in tqdm(eval_data):\n        gt_perplexity = eval_example(model, tokenizer, example, config)\n        all_perplexities.append(gt_perplexity)\n\n        \n    name = f\"eval_data/eval__model-{config['model_name'].replace('/', '_')}{'__lora-' + config['lora_path'].replace('/', '_') if config['lora'] else ''}.pkl\"\n\n    with open(name, 'wb') as f:\n        r = {'perplexities': all_perplexities}\n        pickle.dump(r, f)\n\n\nif __name__ == '__main__':\n    parser = ArgumentParser()\n    parser.add_argument(\"--config\", type=str, required=True)\n    args = parser.parse_args()\n\n    config = read_config(args.config)\n    do_eval(config)\n"
  },
  {
    "path": "gpt4all-training/generate.py",
    "content": "#!/usr/bin/env python3\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nfrom peft import PeftModelForCausalLM\nfrom read import read_config\nfrom argparse import ArgumentParser\nimport torch\nimport time\n\n\ndef generate(tokenizer, prompt, model, config):\n    input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids.to(model.device)\n\n    outputs = model.generate(input_ids=input_ids, max_new_tokens=config[\"max_new_tokens\"], temperature=config[\"temperature\"])\n\n    decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()\n\n    return decoded[len(prompt):]\n\n    \ndef setup_model(config):\n    model = AutoModelForCausalLM.from_pretrained(config[\"model_name\"], device_map=\"auto\", torch_dtype=torch.float16)\n    tokenizer = AutoTokenizer.from_pretrained(config[\"tokenizer_name\"])\n    added_tokens = tokenizer.add_special_tokens({\"bos_token\": \"<s>\", \"eos_token\": \"</s>\", \"pad_token\": \"<pad>\"})\n\n    if added_tokens > 0:\n        model.resize_token_embeddings(len(tokenizer))\n\n    if config[\"lora\"]:\n        model = PeftModelForCausalLM.from_pretrained(model, config[\"lora_path\"], device_map=\"auto\", torch_dtype=torch.float16)\n        model.to(dtype=torch.float16)\n\n    print(f\"Mem needed: {model.get_memory_footprint() / 1024 / 1024 / 1024:.2f} GB\")\n        \n    return model, tokenizer\n\n    \n    \nif __name__ == \"__main__\":\n    parser = ArgumentParser()\n    parser.add_argument(\"--config\", type=str, required=True)\n    parser.add_argument(\"--prompt\", type=str)\n\n    args = parser.parse_args()\n\n    config = read_config(args.config)\n\n    if config[\"prompt\"] is None and args.prompt is None:\n        raise ValueError(\"Prompt is required either in config or as argument\")\n\n    prompt = config[\"prompt\"] if args.prompt is None else args.prompt\n\n    print(\"Setting up model\")\n    model, tokenizer = setup_model(config)\n\n    print(\"Generating\")\n    start = time.time()\n    generation = generate(tokenizer, prompt, model, config)\n    print(f\"Done in {time.time() - start:.2f}s\")\n    print(generation)\n"
  },
  {
    "path": "gpt4all-training/inference.py",
    "content": "#!/usr/bin/env python3\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\nimport torch.nn as nn\nfrom argparse import ArgumentParser\nfrom read import read_config\nfrom accelerate.utils import  set_seed\nfrom data import load_data_for_inference\nfrom tqdm import tqdm\nfrom datasets import  Dataset\nimport torch.distributed as dist\nfrom transformers.trainer_pt_utils import  nested_numpify\nfrom transformers import DefaultDataCollator\nfrom torch.utils.data import DataLoader, DistributedSampler\nimport numpy as np\nimport pyarrow as pa\nfrom pyarrow import compute as pc\n\n\ndef calc_cross_entropy_no_reduction(lm_logits, labels):\n    # calculate cross entropy across batch dim\n    shift_logits = lm_logits[..., :-1, :].contiguous()\n    shift_labels = labels[..., 1:].contiguous()\n    # Flatten the tokens\n    loss_fct = nn.CrossEntropyLoss(reduction='none')\n    loss = loss_fct(shift_logits.permute(0, 2, 1), shift_labels).mean(dim=1)\n\n    return loss\n\n\ndef rank0_print(msg):\n    if dist.get_rank() == 0:\n        print(msg)\n        \n\ndef inference(config):\n    set_seed(config['seed'])\n\n    rank0_print(f\"World size: {dist.get_world_size()}\")\n\n    tokenizer = AutoTokenizer.from_pretrained(config['tokenizer_name'], model_max_length=config['max_length'])\n    # llama has no pad token, set it to new token\n    if tokenizer.pad_token is None:\n        tokenizer.pad_token = tokenizer.eos_token\n\n        \n    train_dataset, val_dataset = load_data_for_inference(config, tokenizer) \n\n    num_processes = dist.get_world_size()\n    local_rank = dist.get_rank()\n\n    train_sampler = DistributedSampler(train_dataset, shuffle=False, drop_last=True, num_replicas=num_processes, rank=local_rank)\n    train_dataloader = DataLoader(\n        train_dataset,\n        collate_fn=DefaultDataCollator(),\n        batch_size=config[\"batch_size\"],\n        sampler=train_sampler,\n        drop_last=True\n    )\n\n    val_sampler = DistributedSampler(val_dataset, shuffle=False, drop_last=True, num_replicas=num_processes, rank=local_rank)\n    val_dataloader = DataLoader(\n        val_dataset,\n        collate_fn=DefaultDataCollator(),\n        batch_size=config[\"batch_size\"],\n        sampler=val_sampler,\n        drop_last=True\n    )\n\n\n    model = AutoModelForCausalLM.from_pretrained(config[\"model_name\"], \n                                                    trust_remote_code=True,\n                                                    torch_dtype=torch.bfloat16,\n                                                    ) \n    model.to(f\"cuda:{local_rank}\")\n\n    with torch.no_grad():\n        train_outputs = {\"loss\": [], \"embeddings\": [], \"index\": []}\n        for batch in tqdm(train_dataloader, disable=local_rank != 0):\n            batch[\"input_ids\"] = batch[\"input_ids\"].to(f\"cuda:{local_rank}\")\n            batch[\"labels\"] = batch[\"labels\"].to(f\"cuda:{local_rank}\")\n            outputs = model(input_ids=batch[\"input_ids\"], labels=batch[\"labels\"], output_hidden_states=True)\n            loss = calc_cross_entropy_no_reduction(outputs.logits, batch[\"labels\"])\n            train_outputs[\"loss\"].extend(loss)\n\n            embeddings = outputs.hidden_states[-1]\n            batch_size = batch[\"input_ids\"].shape[0]\n            sequence_lengths = []\n            # since we use mutiturn with multiple <|endoftext|>, we need to find the place where \n            # <|endoftext|> is repeated\n            for item in batch[\"input_ids\"]:\n                indices = torch.where(item == tokenizer.pad_token_id)[0]\n                found = False\n                for index in indices:\n                    # case where sequence is less than max length\n                    if torch.all(item[index:] == tokenizer.pad_token_id):\n                        sequence_lengths.append(index)\n                        found = True\n                        break\n                # case where sequence is >= max length\n                if not found:\n                    sequence_lengths.append(len(item) - 1)\n\n            sequence_lengths = torch.tensor(sequence_lengths)\n            pooled_logits = embeddings[torch.arange(batch_size, device=embeddings.device), sequence_lengths]\n\n            train_outputs[\"embeddings\"].append(pooled_logits)\n            train_outputs[\"index\"].extend(batch[\"index\"].to(model.device))\n\n            torch.cuda.empty_cache()\n\n        train_outputs = nested_numpify(train_outputs)\n        # stack since they're 0-dim arrays\n        train_outputs[\"index\"] = np.stack(train_outputs[\"index\"])\n        train_outputs[\"loss\"] = np.stack(train_outputs[\"loss\"])\n        train_outputs[\"embeddings\"] = np.concatenate(train_outputs[\"embeddings\"])\n\n        df_train = Dataset.from_dict(train_outputs)\n        curr_idx = df_train[\"index\"]\n\n        # compute mask in pyarrow since it's super fast\n        # ty @bmschmidt for showing me this!\n        table = train_dataset.data\n        mask = pc.is_in(table['index'], value_set=pa.array(curr_idx, pa.int32()))\n        filtered_table = table.filter(mask)\n        # convert from pyarrow to Dataset\n        filtered_train = Dataset.from_dict(filtered_table.to_pydict())\n\n        filtered_train = filtered_train.add_column(\"embeddings\", df_train[\"embeddings\"])\n        filtered_train = filtered_train.add_column(\"loss\", df_train[\"loss\"])\n        filtered_train = filtered_train.add_column(\"is_train\", [True] * len(filtered_train))\n\n        filtered_train.to_json(f\"inference/epoch_2_embeddings_train_shard_{local_rank}.jsonl\", lines=True, orient=\"records\", num_proc=64)\n\n        val_outputs = {\"loss\": [], \"embeddings\": [], \"index\": []}\n        for batch in tqdm(val_dataloader, disable=local_rank != 0):\n            batch[\"input_ids\"] = batch[\"input_ids\"].to(f\"cuda:{local_rank}\")\n            batch[\"labels\"] = batch[\"labels\"].to(f\"cuda:{local_rank}\")\n            outputs = model(input_ids=batch[\"input_ids\"], labels=batch[\"labels\"], output_hidden_states=True)\n            loss = calc_cross_entropy_no_reduction(outputs.logits, batch[\"labels\"])\n            val_outputs[\"loss\"].extend(loss)\n\n            embeddings = outputs.hidden_states[-1]\n            batch_size = batch[\"input_ids\"].shape[0]\n            sequence_lengths = []\n            # since we use mutiturn with multiple <|endoftext|>, we need to find the place where \n            # <|endoftext|> is repeated\n            for item in batch[\"input_ids\"]:\n                indices = torch.where(item == tokenizer.pad_token_id)[0]\n                found = False\n                for index in indices:\n                    # case where sequence is less than max length\n                    if torch.all(item[index:] == tokenizer.pad_token_id):\n                        sequence_lengths.append(index)\n                        found = True\n                        break\n                # case where sequence is >= max length\n                if not found:\n                    sequence_lengths.append(len(item) - 1)\n\n            sequence_lengths = torch.tensor(sequence_lengths)\n            pooled_logits = embeddings[torch.arange(batch_size, device=embeddings.device), sequence_lengths]\n\n            val_outputs[\"embeddings\"].append(pooled_logits)\n            val_outputs[\"index\"].extend(batch[\"index\"].to(model.device))\n\n            torch.cuda.empty_cache()\n\n        val_outputs = nested_numpify(val_outputs)\n        val_outputs[\"index\"] = np.stack(val_outputs[\"index\"])\n        val_outputs[\"loss\"] = np.stack(val_outputs[\"loss\"])\n        val_outputs[\"embeddings\"] = np.concatenate(val_outputs[\"embeddings\"])\n\n        df_val = Dataset.from_dict(val_outputs)\n        curr_idx = df_val[\"index\"]\n\n        # compute mask in pyarrow since it's super fast\n        # ty @bmschmidt for showing me this!\n        table = val_dataset.data\n        mask = pc.is_in(table['index'], value_set=pa.array(curr_idx, pa.int32()))\n        filtered_table = table.filter(mask)\n        # convert from pyarrow to Dataset\n        filtered_val = Dataset.from_dict(filtered_table.to_pydict())\n        filtered_val = filtered_val.add_column(\"embeddings\", df_val[\"embeddings\"])\n        filtered_val = filtered_val.add_column(\"loss\", df_val[\"loss\"])\n        filtered_val = filtered_val.add_column(\"is_train\", [False] * len(filtered_val))\n\n        filtered_val.to_json(f\"inference/epoch_2_embeddings_val_shard_{local_rank}.jsonl\", lines=True, orient=\"records\", num_proc=64)\n    \n\ndef main():\n    dist.init_process_group(\"nccl\")\n    parser = ArgumentParser()\n    parser.add_argument(\"--config\", type=str, default=\"config.yaml\")\n\n    args = parser.parse_args()\n    config = read_config(args.config)\n\n    inference(config)\n\n\nif __name__ == \"__main__\":\n    # parse arguments by reading in a config\n    main()\n    \n"
  },
  {
    "path": "gpt4all-training/launcher.sh",
    "content": "#!/bin/bash\n\n# Display header\necho \"==========================================================\"\necho \" ██████  ██████  ████████ ██   ██  █████  ██      ██      \"\necho \"██       ██   ██    ██    ██   ██ ██   ██ ██      ██      \"\necho \"██   ███ ██████     ██    ███████ ███████ ██      ██      \"\necho \"██    ██ ██         ██         ██ ██   ██ ██      ██      \"\necho \" ██████  ██         ██         ██ ██   ██ ███████ ███████ \"\necho \" └─> https://github.com/nomic-ai/gpt4all\"\n\n# Function to detect macOS architecture and set the binary filename\ndetect_mac_arch() {\n  local mac_arch\n  mac_arch=$(uname -m)\n  case \"$mac_arch\" in\n    arm64)\n      os_type=\"M1 Mac/OSX\"\n      binary_filename=\"gpt4all-lora-quantized-OSX-m1\"\n      ;;\n    x86_64)\n      os_type=\"Intel Mac/OSX\"\n      binary_filename=\"gpt4all-lora-quantized-OSX-intel\"\n      ;;\n    *)\n      echo \"Unknown macOS architecture\"\n      exit 1\n      ;;\n  esac\n}\n\n# Detect operating system and set the binary filename\ncase \"$(uname -s)\" in\n  Darwin*)\n    detect_mac_arch\n    ;;\n  Linux*)\n    if grep -q Microsoft /proc/version; then\n      os_type=\"Windows (WSL)\"\n      binary_filename=\"gpt4all-lora-quantized-win64.exe\"\n    else\n      os_type=\"Linux\"\n      binary_filename=\"gpt4all-lora-quantized-linux-x86\"\n    fi\n    ;;\n  CYGWIN*|MINGW32*|MSYS*|MINGW*)\n    os_type=\"Windows (Cygwin/MSYS/MINGW)\"\n    binary_filename=\"gpt4all-lora-quantized-win64.exe\"\n    ;;\n  *)\n    echo \"Unknown operating system\"\n    exit 1\n    ;;\nesac\necho \"================================\"\necho \"== You are using $os_type.\"\n\n\n# Change to the chat directory\ncd chat\n\n# List .bin files and prompt user to select one\nbin_files=(*.bin)\necho \"== Available .bin files:\"\nfor i in \"${!bin_files[@]}\"; do\n  echo \"   [$((i+1))] ${bin_files[i]}\"\ndone\n\n# Function to get user input and validate it\nget_valid_user_input() {\n  local input_valid=false\n\n  while ! $input_valid; do\n    echo \"==> Please enter a number:\"\n    read -r user_selection\n    if [[ $user_selection =~ ^[0-9]+$ ]] && (( user_selection >= 1 && user_selection <= ${#bin_files[@]} )); then\n      input_valid=true\n    else\n      echo \"Invalid input. Please enter a number between 1 and ${#bin_files[@]}.\"\n    fi\n  done\n}\n\nget_valid_user_input\nselected_bin_file=\"${bin_files[$((user_selection-1))]}\"\n\n# Run the selected .bin file with the appropriate command\n./\"$binary_filename\" -m \"$selected_bin_file\"\n"
  },
  {
    "path": "gpt4all-training/old-README.md",
    "content": "<h1 align=\"center\">GPT4All</h1>\n<p align=\"center\">Demo, data, and code to train open-source assistant-style large language model based on GPT-J and LLaMa</p>\n<p align=\"center\">\n<a href=\"https://static.nomic.ai/gpt4all/2023_GPT4All-J_Technical_Report_2.pdf\">:green_book: Technical Report 2: GPT4All-J </a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf\">:green_book: Technical Report 1: GPT4All</a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://github.com/nomic-ai/pyllamacpp\">:snake: Official Python Bindings</a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://github.com/nomic-ai/gpt4all-ts\">:computer: Official Typescript Bindings</a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://github.com/nomic-ai/gpt4all-ui\">:speech_balloon: Official Web Chat Interface</a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://github.com/nomic-ai/gpt4all-chat\">:speech_balloon: Official Chat Interface</a>\n</p>\n\n<p align=\"center\">\n<a href=\"https://python.langchain.com/en/latest/modules/models/llms/integrations/gpt4all.html\">🦜️🔗 Official Langchain Backend</a> \n</p>\n\n\n<p align=\"center\">\n<a href=\"https://discord.gg/mGZE39AS3e\">Discord</a>\n</p>\n\n\n\n\n<p align=\"center\">\nGPT4All is made possible by our compute partner <a href=\"https://www.paperspace.com/\">Paperspace</a>.\n</p>\n\n\n\n## GPT4All-J: An Apache-2 Licensed GPT4All Model\n![gpt4all-j-demo](https://user-images.githubusercontent.com/13879686/231876409-e3de1934-93bb-4b4b-9013-b491a969ebbc.gif)\n\nRun on an M1 Mac (not sped up!)\n\n\n### GPT4All-J Chat UI Installers\nInstalls a native chat-client with auto-update functionality that runs on your desktop with the GPT4All-J model baked into it.\n\n[Mac/OSX](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg)\n\n[Windows](https://gpt4all.io/installers/gpt4all-installer-win64.exe)\n\n[Ubuntu](https://gpt4all.io/installers/gpt4all-installer-linux.run)\n\nIf you have older hardware that only supports avx and not avx2 you can use these.\n\n[Mac/OSX - avx-only](https://gpt4all.io/installers/gpt4all-installer-darwin-avx-only.dmg)\n\n[Windows - avx-only](https://gpt4all.io/installers/gpt4all-installer-win64-avx-only.exe)\n\n[Ubuntu - avx-only](https://gpt4all.io/installers/gpt4all-installer-linux-avx-only.run)\n\nThese files are not yet cert signed by Windows/Apple so you will see security warnings on initial installation. We did not want to delay release while waiting for their process to complete.\n\nFind the most up-to-date information on the [GPT4All Website](https://gpt4all.io/)\n\n### Raw Model\n[ggml Model Download Link](https://gpt4all.io/models/ggml-gpt4all-j.bin)\n\nNote this model is only compatible with the C++ bindings found [here](https://github.com/nomic-ai/gpt4all-chat). It will not work with any existing llama.cpp bindings as we had to do a large fork of llama.cpp. GPT4All will support the ecosystem around this new C++ backend going forward.\n\nPython bindings are imminent and will be integrated into this [repository](https://github.com/nomic-ai/pyllamacpp). Stay tuned on the [GPT4All discord](https://discord.gg/mGZE39AS3e) for updates.\n\n## Training GPT4All-J\n\nPlease see [GPT4All-J Technical Report](https://static.nomic.ai/gpt4all/2023_GPT4All-J_Technical_Report_2.pdf) for details.\n\n### GPT4All-J Training Data\n\n- We are releasing the curated training data for anyone to replicate GPT4All-J here: [GPT4All-J Training Data](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations)\n   - [Atlas Map of Prompts](https://atlas.nomic.ai/map/gpt4all-j-prompts-curated)\n   - [Atlas Map of Responses](https://atlas.nomic.ai/map/gpt4all-j-response-curated)\n   \nWe have released updated versions of our `GPT4All-J` model and training data. \n\n- `v1.0`: The original model trained on the v1.0 dataset\n- `v1.1-breezy`: Trained on a filtered dataset where we removed all instances of AI language model\n- `v1.2-jazzy`: Trained on a filtered dataset where we also removed instances like I'm sorry, I can't answer... and AI language model\n\nThe [models](https://huggingface.co/nomic-ai/gpt4all-j) and [data](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations) versions can be specified by passing a `revision` argument.\n\nFor example, to load the `v1.2-jazzy` model and dataset, run:\n\n```python\nfrom datasets import load_dataset\nfrom transformers import AutoModelForCausalLM\n\ndataset = load_dataset(\"nomic-ai/gpt4all-j-prompt-generations\", revision=\"v1.2-jazzy\")\nmodel = AutoModelForCausalLM.from_pretrained(\"nomic-ai/gpt4all-j-prompt-generations\", revision=\"v1.2-jazzy\")\n```\n\n### GPT4All-J Training Instructions\n\n```bash\naccelerate launch --dynamo_backend=inductor --num_processes=8 --num_machines=1 --machine_rank=0 --deepspeed_multinode_launcher standard --mixed_precision=bf16  --use_deepspeed --deepspeed_config_file=configs/deepspeed/ds_config_gptj.json train.py --config configs/train/finetune_gptj.yaml\n```\n\n\n# Original GPT4All Model (based on GPL Licensed LLaMa)\n\n\n\n![gpt4all-lora-demo](https://user-images.githubusercontent.com/13879686/228352356-de66ca7a-df70-474e-b929-2e3656165051.gif)\n\nRun on M1 Mac (not sped up!)\n\n# Try it yourself\n\nHere's how to get started with the CPU quantized GPT4All model checkpoint:\n\n1. Download the `gpt4all-lora-quantized.bin` file from [Direct Link](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized.bin) or [[Torrent-Magnet]](https://tinyurl.com/gpt4all-lora-quantized).\n2. Clone this repository, navigate to `chat`, and place the downloaded file there.\n3. Run the appropriate command for your OS:\n   - M1 Mac/OSX: `cd chat;./gpt4all-lora-quantized-OSX-m1`\n   - Linux: `cd chat;./gpt4all-lora-quantized-linux-x86`\n   - Windows (PowerShell): `cd chat;./gpt4all-lora-quantized-win64.exe`\n   - Intel Mac/OSX: `cd chat;./gpt4all-lora-quantized-OSX-intel`\n\nFor custom hardware compilation, see our [llama.cpp](https://github.com/zanussbaum/gpt4all.cpp) fork.\n\n-----------\nFind all compatible models in the GPT4All Ecosystem section.\n\n[Secret Unfiltered Checkpoint](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-unfiltered-quantized.bin) - [[Torrent]](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-unfiltered-quantized.bin.torrent)\n\nThis model had all refusal to answer responses removed from training. Try it with:\n- M1 Mac/OSX: `cd chat;./gpt4all-lora-quantized-OSX-m1 -m gpt4all-lora-unfiltered-quantized.bin`\n- Linux: `cd chat;./gpt4all-lora-quantized-linux-x86 -m gpt4all-lora-unfiltered-quantized.bin`\n- Windows (PowerShell): `cd chat;./gpt4all-lora-quantized-win64.exe -m gpt4all-lora-unfiltered-quantized.bin`\n- Intel Mac/OSX: `cd chat;./gpt4all-lora-quantized-OSX-intel -m gpt4all-lora-unfiltered-quantized.bin`\n-----------\nNote: the full model on GPU (16GB of RAM required) performs much better in our qualitative evaluations.\n\n# Python Client\n## CPU Interface\nTo run GPT4All in python, see the new [official Python bindings](https://github.com/nomic-ai/pyllamacpp).\n\nThe old bindings are still available but now deprecated. They will not work in a notebook environment.\nTo get running using the python client with the CPU interface, first install the [nomic client](https://github.com/nomic-ai/nomic) using `pip install nomic`\nThen, you can use the following script to interact with GPT4All:\n```\nfrom nomic.gpt4all import GPT4All\nm = GPT4All()\nm.open()\nm.prompt('write me a story about a lonely computer')\n```\n\n## GPU Interface\nThere are two ways to get up and running with this model on GPU.\nThe setup here is slightly more involved than the CPU model.\n1. clone the nomic client [repo](https://github.com/nomic-ai/nomic) and run `pip install .[GPT4All]` in the home dir.\n2. run `pip install nomic` and install the additional deps from the wheels built [here](https://github.com/nomic-ai/nomic/tree/main/bin)\n\nOnce this is done, you can run the model on GPU with a script like the following:\n```\nfrom nomic.gpt4all import GPT4AllGPU\nm = GPT4AllGPU(LLAMA_PATH)\nconfig = {'num_beams': 2,\n          'min_new_tokens': 10,\n          'max_length': 100,\n          'repetition_penalty': 2.0}\nout = m.generate('write me a story about a lonely computer', config)\nprint(out)\n```\nWhere LLAMA_PATH is the path to a Huggingface Automodel compliant LLAMA model.\nNomic is unable to distribute this file at this time.\nWe are working on a GPT4All that does not have this limitation right now.\n\nYou can pass any of the [huggingface generation config params](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig) in the config.\n\n# GPT4All Compatibility Ecosystem\nEdge models in the GPT4All Ecosystem. Please PR as the [community grows](https://huggingface.co/models?sort=modified&search=4bit).\nFeel free to convert this to a more structured table.\n\n- [gpt4all](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized.bin) [[MD5 Signature](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized.bin.md5)]\n   - [gpt4all-ggml-converted](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin) [[MD5 Signature](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-quantized-ggml.bin.md5)]\n- [gpt4all-unfiltered](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-unfiltered-quantized.bin) [[MD5 Signature](https://the-eye.eu/public/AI/models/nomic-ai/gpt4all/gpt4all-lora-unfiltered-quantized.bin.md5)]\n- [ggml-vicuna-7b-4bit](https://huggingface.co/eachadea/ggml-vicuna-7b-4bit)\n- [vicuna-13b-GPTQ-4bit-128g](https://huggingface.co/anon8231489123/vicuna-13b-GPTQ-4bit-128g)\n- [LLaMa-Storytelling-4Bit](https://huggingface.co/GamerUntouch/LLaMa-Storytelling-4Bit)\n- [Alpaca Native 4bit](https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/tree/main)\n\n\n# Roadmap\n## Short Term\n - <span style=\"color:green\">(Done)</span> Train a GPT4All model based on GPTJ to alleviate llama distribution issues.\n - <span style=\"color:green\">(Done)</span> Create improved CPU and GPU interfaces for this model.\n - <span style=\"color:green\">(Done)</span> [Integrate llama.cpp bindings](https://github.com/nomic-ai/pyllamacpp)\n - <span style=\"color:green\">(Done)</span> [Create a good conversational chat interface for the model.](https://github.com/nomic-ai/gpt4all-ui)\n - <span style=\"color:green\">(Done)</span> [Allow users to opt in and submit their chats for subsequent training runs](https://github.com/nomic-ai/gpt4all-ui)\n\n## Medium Term\n - <span style=\"color:red\">(NOT STARTED)</span> Integrate GPT4All with [Atlas](https://atlas.nomic.ai) to allow for document retrieval.\n   - BLOCKED by GPT4All based on GPTJ\n - <span style=\"color:red\">(Done)</span> Integrate GPT4All with Langchain.\n - <span style=\"color:green\">(IN PROGRESS)</span> Build easy custom training scripts to allow users to fine tune models.\n\n## Long Term\n - <span style=\"color:red\">(NOT STARTED)</span> Allow anyone to curate training data for subsequent GPT4All releases using Atlas.\n - <span style=\"color:green\">(IN PROGRESS)</span> Democratize AI. \n\n# Reproducibility\n\nTrained Model Weights:\n- gpt4all-lora (four full epochs of training):  https://huggingface.co/nomic-ai/gpt4all-lora\n- gpt4all-lora-epoch-2 (three full epochs of training) https://huggingface.co/nomic-ai/gpt4all-lora-epoch-2\n- gpt4all-j (one full epoch of training) (https://huggingface.co/nomic-ai/gpt4all-j)\n- gpt4all-j-lora (one full epoch of training) (https://huggingface.co/nomic-ai/gpt4all-j-lora)\n\nRaw Data:\n- [Training Data Without P3](https://huggingface.co/datasets/nomic-ai/gpt4all_prompt_generations)\n  - Explorer: https://atlas.nomic.ai/map/gpt4all_data_clean_without_p3\n- [Full Dataset with P3](https://huggingface.co/datasets/nomic-ai/gpt4all_prompt_generations_with_p3)\n  - Explorer: https://atlas.nomic.ai/map/gpt4all_data_clean\n- [GPT4All-J Dataset](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations)\n   -  Explorer Indexed on Prompts: https://atlas.nomic.ai/map/gpt4all-j-prompts-curated\n   -  Explorer Indexed on Responses: https://atlas.nomic.ai/map/gpt4all-j-response-curated\n\nWe are not distributing a LLaMa 7B checkpoint.\n\nYou can reproduce our trained model by doing the following:\n\n## Setup\n\nClone the repo\n\n```\ngit clone --recurse-submodules https://github.com/nomic-ai/gpt4all.git\ngit submodule update --init\n```\n\nSetup the environment\n\n```\npython -m pip install -r requirements.txt\n\ncd ../peft\npip install -e .\n```\n\n## Training\n\n```bash\naccelerate launch --dynamo_backend=inductor --num_processes=8 --num_machines=1 --machine_rank=0 --deepspeed_multinode_launcher standard --mixed_precision=bf16  --use_deepspeed --deepspeed_config_file=configs/deepspeed/ds_config.json train.py --config configs/train/finetune-7b.yaml\n```\n\n## Generate\n\n```bash\npython generate.py --config configs/generate/generate.yaml --prompt \"Write a script to reverse a string in Python\"\n```\n\n## Need Help?\n\nJoin the <a href=\"https://discord.gg/E7q6fp9bmx\"> Discord </a> and ask for help in `#gpt4all-help`\n\n# Sample Generations\n\n### Provide instructions for the given exercise. Leg Raises\n>Leg Raises\n>1. Stand with your feet shoulder-width apart and your knees slightly bent.\n>2. Raise your arms straight out in front of you.\n>3. Slowly bend your knees and raise your heels off the ground.\n>4. Hold this position for a count of 3, then slowly lower your heels back to the ground.\n>5. Repeat the exercise 10 times.\n>6. Do not let your knees go beyond your toes.\n>7. Keep your back straight and your abdominal muscles engaged.\n>8. Do not let your hips sag or your back arch.\n>9. Keep your arms straight and your elbows locked.\n>10. Do not let your head drop or your shoulders rise.\n>11. Breathe normally throughout the exercise.\n>12. Rest for 30 seconds before repeating the exercise.\n>13. Gradually increase the number of repetitions as you become stronger.\n\n\n### A color description has been provided. Find the CSS code associated with that color. A light red color with a medium light shade of pink\n\n>The CSS code associated with the color provided is: #FF6347\n\n\n### Come up with an interesting idea for a new movie plot. Your plot should be described with a title and a summary.\n\n> 1. The title of your movie plot should be \"The Last Stand\". \n> 2. The summary of your movie plot should be: A group of mercenaries are hired to protect a small town from a dangerous criminal who is on the run. However, the town's sheriff is not willing to let them take over and decides to take matters into his own hands.\n\n### Reverse a string in python.\n> In Python, you can reverse a list or tuple by using the reversed() function on it. Here's an example of how to use this method with strings: \n>    ```python\n>   my_string = \"Hello World\" # Define your original string here\n>   reversed_str = my_string[::-1]   # Use a slice and negative index for reverse order\n>   print(reversed_str)     # Output will be reversed version of the inputted string.\n\n### List 10 dogs.\n> 1. Labrador Retriever\n> 2. Golden Retriever\n> 3. Beagle\n> 4. Poodle\n> 5. German Shepherd\n> 6. Bulldog\n> 7. Rottweiler\n> 8. Boxer\n> 9. Shih Tzu\n> 10. Pug\n\n### Write me a poem about the fall of Julius Caesar into a caesar salad in iambic pentameter. \n\n> The fall of Julius Caesar into a caesar salad<br>\n> In iambic pentameter,<br>\n> The mighty Roman emperor<br>\n> Fell into a salad of lettuce and croutons,<br>\n> His empire crumbling around him,<br>\n> As he was devoured by the hungry diners.<br>\n> The once mighty emperor<br>\n> Was now just a salad topping,<br>\n> His legacy reduced to a mere garnish,<br>\n> As he was consumed by the masses.<br>\n> The fall of Julius Caesar into a caesar salad,<br>\n> In iambic pentameter,<br>\n> Was a tragic end to a great man,<br>\n> Who had once ruled the world with an iron fist.<br>\n> But now, he was just a salad topping,<br>\n> His legacy reduced to a mere garnish,<br>\n> As he was consumed by the masses.\n    \n### What is a three word topic describing the following keywords: baseball, football, soccer: \n> Sports, athletics, games\n    \n## Citation\n\nIf you utilize this repository, models or data in a downstream project, please consider citing it with:\n```\n@misc{gpt4all,\n  author = {Yuvanesh Anand and Zach Nussbaum and Brandon Duderstadt and Benjamin Schmidt and Andriy Mulyar},\n  title = {GPT4All: Training an Assistant-style Chatbot with Large Scale Data Distillation from GPT-3.5-Turbo},\n  year = {2023},\n  publisher = {GitHub},\n  journal = {GitHub repository},\n  howpublished = {\\url{https://github.com/nomic-ai/gpt4all}},\n}\n```\n"
  },
  {
    "path": "gpt4all-training/read.py",
    "content": "import yaml\n\n\ndef read_config(path):\n    # read yaml and return contents \n    with open(path, 'r') as file:\n        try:\n            return yaml.safe_load(file)\n        except yaml.YAMLError as exc:\n            print(exc)"
  },
  {
    "path": "gpt4all-training/requirements.txt",
    "content": "accelerate\ndatasets\neinops\ntorchmetrics\nevaluate\ntransformers>=4.28.0\nwandb\npeft\nnodelist-inflator\ndeepspeed\nsentencepiece\njsonlines\nnomic\nscikit-learn\nmatplotlib"
  },
  {
    "path": "gpt4all-training/train.py",
    "content": "#!/usr/bin/env python3\nimport os\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler\nimport torch\nfrom torch.optim import AdamW\nfrom argparse import ArgumentParser\nfrom read import read_config\nfrom accelerate import Accelerator\nfrom accelerate.utils import DummyScheduler, DummyOptim, set_seed\nfrom peft import get_peft_model, LoraConfig, TaskType\nfrom data import load_data\nfrom torchmetrics import MeanMetric\nfrom tqdm import tqdm\nimport wandb\n\ntorch.backends.cuda.matmul.allow_tf32 = True\n\ndef format_metrics(metrics, split, prefix=\"\"):\n    log = f\"[{split}]\" + prefix\n    log += \" \".join([f\"{key}: {value:.4f}\" for key, value in metrics.items()])\n\n    return log\n\n\ndef evaluate(model, val_dataloader):\n    model.eval()\n    val_loss = MeanMetric(nan_strategy=\"error\").to(model.device)\n\n    with torch.no_grad():\n        for batch in tqdm(val_dataloader):\n            loss = model(**batch).loss\n\n            loss_values = accelerator.gather_for_metrics({\"loss\": loss.detach()})\n\n            val_loss.update(loss_values[\"loss\"])\n\n    return val_loss\n\n\ndef train(accelerator, config):\n    set_seed(config['seed'])\n\n    accelerator.print(config)\n    accelerator.print(f\"Using {accelerator.num_processes} GPUs\")\n\n    tokenizer = AutoTokenizer.from_pretrained(config['tokenizer_name'], model_max_length=config['max_length'], use_fast=False)\n    # if no pad token, set it to eos\n    if tokenizer.pad_token is None:\n        tokenizer.pad_token = tokenizer.eos_token\n\n        \n    with accelerator.main_process_first():\n        train_dataloader, val_dataloader = load_data(config, tokenizer) \n\n\n    checkpoint = config[\"gradient_checkpointing\"]\n\n    model = AutoModelForCausalLM.from_pretrained(config[\"model_name\"], \n                                                    use_cache=False if checkpoint else True,\n                                                    trust_remote_code=True) \n    if checkpoint:\n        model.gradient_checkpointing_enable()\n\n    if config[\"lora\"]:\n        peft_config = LoraConfig(\n            # should R be configurable?\n            task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1\n        )\n        model = get_peft_model(model, peft_config)\n        model.print_trainable_parameters()\n\n    optimizer_cls = (\n        AdamW\n        if accelerator.state.deepspeed_plugin is None\n        or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n        else DummyOptim\n    )\n\n    # karpathy doesn't decay embedding, maybe we should exclude\n    # https://github.com/karpathy/minGPT/commit/bbbdac74fa9b2e55574d70056163ffbae42310c1#diff-2075fa9c224b395be5bda85544dd36572b59c76c54562819eadadbf268602834R157s\n    optimizer = optimizer_cls(model.parameters(), lr=config[\"lr\"], weight_decay=config[\"weight_decay\"])\n\n    if accelerator.state.deepspeed_plugin is not None:\n        gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[\n            \"gradient_accumulation_steps\"\n        ]\n\n    # decay to min_lr instead of 0\n    lr_ratio = config[\"min_lr\"] / config[\"lr\"]\n    accelerator.print(f\"Len of train_dataloader: {len(train_dataloader)}\")\n    total_num_steps = (len(train_dataloader) / gradient_accumulation_steps) * (config[\"num_epochs\"])\n    # instead of decaying to zero, decay to ratio of min_lr / lr\n    total_num_steps += int(total_num_steps * lr_ratio) + config[\"warmup_steps\"]\n    accelerator.print(f\"Total training steps: {total_num_steps}\")\n\n    # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler\n    if (\n        accelerator.state.deepspeed_plugin is None\n        or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n    ):\n        scheduler = get_scheduler(\n            name=\"cosine\",\n            optimizer=optimizer,\n            num_warmup_steps=config[\"warmup_steps\"] * accelerator.num_processes,\n            num_training_steps=total_num_steps,\n        )\n    else:\n        scheduler = DummyScheduler(\n            optimizer, total_num_steps=total_num_steps, warmup_num_steps=config[\"warmup_steps\"]\n        )\n\n    model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare(\n            model, optimizer, train_dataloader, val_dataloader, scheduler\n    )\n\n    # setup for saving training states in case preemption\n    accelerator.register_for_checkpointing(scheduler)\n\n    if config[\"checkpoint\"]:\n        accelerator.load_state(config[\"checkpoint\"])\n        accelerator.print(f\"Resumed from checkpoint: {config['checkpoint']}\")\n        path = os.path.basename(config[\"checkpoint\"])\n        training_difference = os.path.splitext(path)[0]\n        resume_step = int(training_difference.replace(\"step_\", \"\"))\n        train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)\n        accelerator.print(f\"Resuming from step {resume_step}\")\n    else:\n        resume_step = 0\n\n\n    # log gradients\n    if accelerator.is_main_process and config[\"wandb\"]:\n        wandb.watch(model, log_freq=config[\"log_grads_every\"], log=\"all\")\n\n\n    accelerator.wait_for_everyone()\n\n    for epoch in range(0, config[\"num_epochs\"]):\n        train_loss = MeanMetric(nan_strategy=\"error\").to(model.device)\n        for step, batch in enumerate(tqdm(train_dataloader)):\n            curr_step = epoch * len(train_dataloader) + step\n            model.train()\n            outputs = model(**batch)\n            loss = outputs.loss\n\n            # gather loss before backprop in case of gradient accumulation\n            loss_values = accelerator.gather_for_metrics({\"loss\": loss.detach().float()})\n            if config[\"wandb\"]:\n                accelerator.log({\"loss\": torch.mean(loss_values[\"loss\"]).item()}, step=curr_step)\n            train_loss.update(loss_values[\"loss\"])\n\n            loss = loss / gradient_accumulation_steps\n            accelerator.backward(loss)\n            # get gradient norm of all params\n\n            # log LR in case something weird happens \n            if step > 0 and step % (config[\"log_lr_every\"]) == 0:\n                if config[\"wandb\"]:\n                    accelerator.log({\"lr\": scheduler.get_last_lr()[0]}, step=curr_step)\n\n            if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n                optimizer.step()\n                scheduler.step()\n                optimizer.zero_grad()\n\n\n            if step > 0 and step % config[\"save_every\"] == 0:\n                accelerator.save_state(f\"{config['output_dir']}/step_{curr_step}\")\n\n            if step > 0 and (step % config[\"eval_every\"] == 0 or step == len(train_dataloader) - 1):\n                val_loss = evaluate(model, val_dataloader)\n\n                log_train = {\n                        \"train_loss\": train_loss.compute()\n                    }\n                log_val = {\n                    \"val_loss\": val_loss.compute()\n                }\n\n                if config[\"wandb\"]:\n                    accelerator.log({**log_train, **log_val}, step=curr_step)\n\n                accelerator.print(f\"Current LR: {scheduler.get_last_lr()[0]}\")\n                accelerator.print(format_metrics(log_train, \"train\", f\" step {step} \"))\n                accelerator.print(format_metrics(log_val, \"val\", f\" step {step} \"))\n\n                train_loss.reset()\n\n        accelerator.print(f\"Epoch {epoch} finished\")\n        accelerator.print(f\"Pushing to HF hub\")\n        unwrapped_model = accelerator.unwrap_model(model)\n\n        unwrapped_model.save_pretrained(\n            f\"{config['output_dir']}/epoch_{epoch}\",\n            is_main_process=accelerator.is_main_process,\n            save_function=accelerator.save,\n            state_dict=accelerator.get_state_dict(model),\n        )\n        try:\n            if accelerator.is_main_process:\n                unwrapped_model.push_to_hub(config[\"save_name\"] + f\"-epoch_{epoch}\", private=True)\n\n        except Exception as e:\n            accelerator.print(e)\n            accelerator.print(f\"Failed to push to hub\")\n\n            \n    if config[\"num_epochs\"] > 1:\n        accelerator.wait_for_everyone()\n        unwrapped_model = accelerator.unwrap_model(model)\n        unwrapped_model.save_pretrained(\n            f\"{config['output_dir']}/final\",\n            is_main_process=accelerator.is_main_process,\n            save_function=accelerator.save,\n            state_dict=accelerator.get_state_dict(model),\n        )\n\n    accelerator.end_training()\n\n    \n\nif __name__ == \"__main__\":\n    # parse arguments by reading in a config\n    parser = ArgumentParser()\n    parser.add_argument(\"--config\", type=str, default=\"config.yaml\")\n\n    args = parser.parse_args()\n\n    config = read_config(args.config)\n\n    if config[\"wandb\"]:\n        accelerator = Accelerator(log_with=\"wandb\")\n        accelerator.init_trackers(\n            project_name=config[\"wandb_project_name\"],\n            config=config,\n            init_kwargs={\"wandb\": {\"entity\": config[\"wandb_entity\"]}},\n        )\n    else:\n        accelerator = Accelerator()\n\n    train(accelerator, config=config)\n"
  },
  {
    "path": "roadmap.md",
    "content": "\n# GPT4All 2024 Roadmap\nTo contribute to the development of any of the below roadmap items, make or find the corresponding issue and cross-reference the [in-progress task](https://github.com/orgs/nomic-ai/projects/2/views/1).\n\nEach item should have an issue link below.\n\n- Chat UI Language Localization (localize UI into the native languages of users)\n    - [ ] Chinese\n    - [ ] German\n    - [ ] French\n    - [x] Portuguese\n    - [ ] Your native language here. \n- UI Redesign: an internal effort at Nomic to improve the UI/UX of gpt4all for all users.\n    - [x] Design new user interface and gather community feedback\n    - [x] Implement the new user interface and experience.\n- Installer and Update Improvements\n    - [ ] Seamless native installation and update process on OSX\n    - [ ] Seamless native installation and update process on Windows\n    - [ ] Seamless native installation and update process on Linux\n- Model discoverability improvements:\n    - [x] Support huggingface model discoverability\n    - [x] Support Nomic hosted model discoverability\n- LocalDocs (towards a local perplexity)\n    - Multilingual LocalDocs Support\n        - [ ] Create a multilingual experience\n        - [ ] Incorporate a multilingual embedding model\n        - [ ] Specify a preferred multilingual LLM for localdocs\n    - Improved RAG techniques\n        - [ ] Query augmentation and re-writing\n        - [ ] Improved chunking and text extraction from arbitrary modalities\n            - [ ] Custom PDF extractor past the QT default (charts, tables, text)\n        - [ ] Faster indexing and local exact search with v1.5 hamming embeddings and reranking (skip ANN index construction!)\n    - Support queries like 'summarize X document'\n    - Multimodal LocalDocs support with Nomic Embed\n    - Nomic Dataset Integration with real-time LocalDocs\n        - [ ] Include an option to allow the export of private LocalDocs collections to Nomic Atlas for debugging data/chat quality\n        - [ ] Allow optional sharing of LocalDocs collections between users.\n        - [ ] Allow the import of a LocalDocs collection from an Atlas Datasets\n            - Chat with live version of Wikipedia, Chat with Pubmed, chat with the latest snapshot of world news.\n- First class Multilingual LLM Support\n    - [ ] Recommend and set a default LLM for German\n    - [ ] Recommend and set a default LLM for English\n    - [ ] Recommend and set a default LLM for Chinese\n    - [ ] Recommend and set a default LLM for Spanish\n\n- Server Mode improvements\n    - Improved UI and new requested features:\n        - [ ] Fix outstanding bugs and feature requests around networking configurations.\n        - [ ] Support Nomic Embed inferencing\n        - [ ] First class documentation\n        - [ ] Improving developer use and quality of server mode (e.g. support larger batches)"
  }
]